| .. |
|
checkpoints
|
[TRTLLM-7136][feat] Update load_weights method to include mapping parameter in checkpoint loaders (#9583)
|
2025-12-05 16:07:20 +01:00 |
|
__init__.py
|
[TRTLLM-7967][feat] Adding Starcoder2 PyTorch Backend Support (#8923)
|
2025-11-24 11:23:22 -08:00 |
|
modeling_auto.py
|
[TRTLLM-6746][feat] Enable two-model spec dec for MTP Eagle (#7001)
|
2025-09-18 12:05:36 -04:00 |
|
modeling_bert.py
|
[None][chore] replace print_colored_debug with logger_debug (#8417)
|
2025-10-22 17:54:38 +08:00 |
|
modeling_clip.py
|
[None][feat] Support kv_cahce_reuse for HyperCLOVAX-Vision model (#7789)
|
2025-10-21 11:11:24 +09:00 |
|
modeling_deepseekv3.py
|
[TRTLLM-9506][fix] Fix AR for DeepSeek-R1 2 model path (#9661)
|
2025-12-08 10:12:32 -05:00 |
|
modeling_exaone4.py
|
[https://nvbugs/5569713][fix] Disable fp8 deep gemm for EXAONE-4.0-32B-FP8 (#8429)
|
2025-11-20 12:43:13 -05:00 |
|
modeling_gemma3.py
|
[#6186][feat] Introduce QKNormRoPEAttention module (#6830)
|
2025-09-05 14:04:41 +02:00 |
|
modeling_gemma3vl.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_glm.py
|
[None][feat] Support Glm4MoeForCausalLM (#8256)
|
2025-11-18 09:43:21 +08:00 |
|
modeling_gpt_oss.py
|
[https://nvbugs/5552132][fix] Enable LoRa for GPT OSS Torch (#8253)
|
2025-12-03 15:42:15 +01:00 |
|
modeling_hunyuan_dense.py
|
[None][feat] Add Tencent HunYuanDenseV1 model support (#7081)
|
2025-09-23 09:27:29 +08:00 |
|
modeling_hunyuan_moe.py
|
[TRTLLM-8958][feat] and [TRTLLM-8960]: create ConfigurableMoE and support TRTLLMGenFusedMoE as backend (#9486)
|
2025-12-01 08:37:07 +08:00 |
|
modeling_hyperclovax.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_llama_min_latency.py
|
[TRTLLM-9293][feat] Enable partial weight loading to support streaming update weights (#9224)
|
2025-11-26 10:59:06 +08:00 |
|
modeling_llama.py
|
[https://nvbugs/5608489][fix] Fix output unpack issues for Llama3/4 NVFP4 models. (#8679)
|
2025-11-20 12:43:13 -05:00 |
|
modeling_llava_next.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_mistral.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_mixtral.py
|
[TRTLLM-7408][feat] Wrap MOE with custom op. (#7277)
|
2025-09-09 12:18:56 -04:00 |
|
modeling_mllama.py
|
feat : support duplicate_kv_weight for qwen3 blockwise scale (#5459)
|
2025-06-30 11:49:22 +08:00 |
|
modeling_multimodal_encoder.py
|
[None][chore] update torch_dtype -> dtype in 'transformers' (#8263)
|
2025-10-15 17:09:30 +09:00 |
|
modeling_multimodal_utils.py
|
[None][fix] fix error when processing batches containing both text and mm data (#8381)
|
2025-12-04 14:28:24 +09:00 |
|
modeling_nemotron_h.py
|
[FMDL-1328][feat] Add support for nano-v3 and super-v3 with pytorch backend (#9261)
|
2025-12-02 13:40:20 +08:00 |
|
modeling_nemotron_nano.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_nemotron_nas.py
|
[None][feat] add specdec to nemotron nas (#8985)
|
2025-11-19 19:28:35 +01:00 |
|
modeling_nemotron.py
|
feat: Remove not used padding_idx in models (#5385)
|
2025-06-25 17:19:59 +08:00 |
|
modeling_phi3.py
|
[https://nvbugs/5540752][fix] Support quantized Phi4 MM models (#8190)
|
2025-10-20 06:36:09 -04:00 |
|
modeling_phi4mm.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_pixtral.py
|
[TRTLLM-7442][model] Remove unnecessary D2H copies (#7273)
|
2025-09-03 23:14:20 -04:00 |
|
modeling_qwen2vl.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |
|
modeling_qwen3_moe.py
|
[None][fix] Skip Allreduce init for Attention DP (#9542)
|
2025-12-01 21:24:40 +08:00 |
|
modeling_qwen3_next.py
|
[TRTLLM-9431][perf] Enable multistream for Linear Attention in Qwen3-… (#9696)
|
2025-12-08 13:39:12 +08:00 |
|
modeling_qwen3.py
|
[None][fix] Skip Allreduce init for Attention DP (#9542)
|
2025-12-01 21:24:40 +08:00 |
|
modeling_qwen_moe.py
|
[TRTLLM-7408][feat] Wrap MOE with custom op. (#7277)
|
2025-09-09 12:18:56 -04:00 |
|
modeling_qwen.py
|
[None][feat] Support Yarn on QwQ-32B model (#9059)
|
2025-11-25 07:27:28 +08:00 |
|
modeling_radio.py
|
[TRTLLM-8579][feat] Support quantized model for nano-v2-vlm (#8304)
|
2025-10-16 09:44:11 +08:00 |
|
modeling_seedoss.py
|
[None][feat] Support Seed-OSS model in pytorch backend (#7496)
|
2025-09-24 03:57:12 -07:00 |
|
modeling_siglip.py
|
[None][feat] Support kv_cahce_reuse for HyperCLOVAX-Vision model (#7789)
|
2025-10-21 11:11:24 +09:00 |
|
modeling_speculative.py
|
[TRTLLM-9506][fix] Fix AR for DeepSeek-R1 2 model path (#9661)
|
2025-12-08 10:12:32 -05:00 |
|
modeling_starcoder2.py
|
[TRTLLM-7967][feat] Adding Starcoder2 PyTorch Backend Support (#8923)
|
2025-11-24 11:23:22 -08:00 |
|
modeling_utils.py
|
[TRTLLM-7073][feat] Support torch compile for PP for Llama and DeepSeekV3 (#7838)
|
2025-12-04 13:32:11 +08:00 |
|
modeling_vila.py
|
[None][fix] Multimodal InputProcessor dummy builder fix (#8916)
|
2025-11-19 22:32:21 -08:00 |