TensorRT-LLMs/cpp/tensorrt_llm/kernels/contextFusedMultiHeadAttention/pagedKVCubin
2023-12-01 22:27:51 +08:00
..
fmha_cubin.h Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_16_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_32_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_64_S_256_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_32_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_64_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_128_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_64_128_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_bf16_128_128_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_16_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_32_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_64_S_256_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_32_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_64_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_128_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_64_128_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_128_128_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_16_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_32_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_64_S_256_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_32_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_64_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_80_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_80_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_80_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_80_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_128_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_128_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_128_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_128_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_128_pagedKV_tma_ws_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_160_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_160_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_160_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_160_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_256_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_256_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_256_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_64_128_S_256_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_16_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_16_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_16_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_16_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_32_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_32_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_32_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_32_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_40_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_40_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_40_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_40_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_64_pagedKV_sm80.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_64_pagedKV_sm86.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_64_pagedKV_sm89.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00
fmha_v2_flash_attention_fp16_fp32_128_128_S_64_pagedKV_sm90.cubin.cpp Update TensorRT-LLM (#524) 2023-12-01 22:27:51 +08:00