| .. |
|
deep_gemm_tests.py
|
[perf] Reduce the workspace size of FP4 activation scales for MoE (#4303)
|
2025-05-30 09:03:52 +08:00 |
|
test_causal_conv1d_op.py
|
[ci] parallelize torch unittests (#5714)
|
2025-07-09 11:05:57 +03:00 |
|
test_cublas_mm.py
|
[fix] Remove stale cublas heuristics (#4326)
|
2025-05-14 17:35:51 -07:00 |
|
test_dsv3_fused_a_gemm.py
|
Feat/ds r1 min latency opt round3, add router gemm, fused a gemm, PDL (#4560)
|
2025-06-14 17:36:22 +08:00 |
|
test_dsv3_router_gemm.py
|
Feat/ds r1 min latency opt round3, add router gemm, fused a gemm, PDL (#4560)
|
2025-06-14 17:36:22 +08:00 |
|
test_fp4_bmm_quantize.py
|
[feat] Support torch compile for attention dp (#5086)
|
2025-07-01 13:48:52 -04:00 |
|
test_fp4_gemm_quantize.py
|
feat: Add w4a8_mxfp4_fp8 quantization recipe. (#4867)
|
2025-06-16 11:30:57 +08:00 |
|
test_fp4_linear.py
|
[feat] Support torch compile for attention dp (#5086)
|
2025-07-01 13:48:52 -04:00 |
|
test_fp4_swizzle.py
|
perf: Optimize swizzle_sf, unswizzle_sf, reswizzle_sf (#5318)
|
2025-06-26 14:03:56 +08:00 |
|
test_fp8_block_scale_gemm.py
|
ci: [nvbugs/5280806] Unwaive unittests/_torch. (#4951)
|
2025-06-09 19:04:11 +08:00 |
|
test_fp8_linear.py
|
chore: reorganize some unit tests of PyTorch (#3780)
|
2025-04-23 11:19:10 -07:00 |
|
test_fp8_quantize.py
|
chore: reorganize some unit tests of PyTorch (#3780)
|
2025-04-23 11:19:10 -07:00 |
|
test_fp8_rowwise_linear.py
|
[TRTLLM-5812][feat] support FP8 row-wise dense GEMM in torch flow (#5615)
|
2025-07-07 18:04:57 +08:00 |
|
test_fused_qk_norm_rope.py
|
perf: Add fused q_norm/k_norm/RoPE for Qwen3. (#4482)
|
2025-05-23 15:31:04 +08:00 |
|
test_logits_bitmask_op.py
|
Update (#2978)
|
2025-03-23 16:39:35 +08:00 |
|
test_mamba2_chunk_ss_update.py
|
[CI] reduce mamba2 ssm test parameterization (#5571)
|
2025-06-29 15:56:23 +03:00 |
|
test_mamba_conv1d_op.py
|
[ci] parallelize torch unittests (#5714)
|
2025-07-09 11:05:57 +03:00 |
|
test_moe_alltoall.py
|
[ci] parallelize torch unittests (#5714)
|
2025-07-09 11:05:57 +03:00 |
|
test_moe.py
|
[TRTLLM-5881] feat: Integrate TRT-LLM Gen FP4 block scale MoE with Pytorch workflow kernel autotuner (#5764)
|
2025-07-09 08:21:58 +01:00 |
|
test_noaux_tc.py
|
Clean up modeling_deepseek.py (#3640)
|
2025-04-18 17:54:33 -07:00 |
|
test_scaled_mm.py
|
test: fix cublas_scaled_mm with aligned workspace size (#3600)
|
2025-04-21 14:51:42 +08:00 |
|
test_selective_scan_op.py
|
ci: [nvbugs/5280806] Unwaive unittests/_torch. (#4951)
|
2025-06-09 19:04:11 +08:00 |
|
test_tllmg_bmm.py
|
[TRTLLM-5589] feat: Integrate TRT-LLM Gen FP8 Batched GEMM with Pytorch workflow kernel autotuner (#4872)
|
2025-06-09 11:02:48 +01:00 |
|
test_w4a8_mxfp4_mxfp8_gemm.py
|
[feat] Support torch compile for attention dp (#5086)
|
2025-07-01 13:48:52 -04:00 |
|
test_w4a16_gemm.py
|
feat: W4A16 GEMM (#4232)
|
2025-07-01 10:36:05 +03:00 |
|
test_w4a16_linear.py
|
feat: W4A16 GEMM (#4232)
|
2025-07-01 10:36:05 +03:00 |