| .. |
|
__init__.py
|
Update TensorRT-LLM (#2755)
|
2025-02-11 03:01:00 +00:00 |
|
_util.py
|
[None][chore] Validate features combination (#7630)
|
2025-09-25 08:01:13 +08:00 |
|
config_utils.py
|
[None][fix] fix hunyuan_moe init bug (#7502)
|
2025-09-04 03:06:00 -04:00 |
|
config.py
|
[TRTLLM-7728][feat] batched sampling by strategy (supersedes enable_mixed_sampler, cf. TRTLLM-7156) (#7294)
|
2025-09-23 16:05:05 -07:00 |
|
cuda_graph_runner.py
|
[TRTLLM-7330][feat] Eagle3 cuda graph support for the first draft model inference (#7363)
|
2025-09-26 11:28:05 +08:00 |
|
executor_request_queue.py
|
[None][opt] Balance the request based on number of tokens in AttentionDP (#7183)
|
2025-08-27 11:16:12 +08:00 |
|
finish_reason.py
|
[TRTLLM-5974][feat] Support disaggregated serving in TRTLLM Sampler (#5328)
|
2025-06-25 17:41:36 +02:00 |
|
grammar_matcher.py
|
[TRTLLM-8209][feat] Support new structural tag API (upgrade XGrammar to 0.1.25) (#7893)
|
2025-09-23 09:10:09 +08:00 |
|
guided_decoder.py
|
[None][fix] Disable torch.compile for CapturableGuidedDecoder (#7871)
|
2025-09-22 10:04:30 +08:00 |
|
handle_logits.py
|
[TRTLLM-7155][feat] Unify sampler handle logits implementation. (#6867)
|
2025-08-22 08:09:30 +02:00 |
|
kv_cache_connector.py
|
[None][chore] add TorchLlmArgs to the connector api (#7493)
|
2025-09-09 09:05:59 -04:00 |
|
kv_cache_transceiver.py
|
[TRTLLM-7361][feat] KV cache transfer for uneven pp (#7117)
|
2025-09-08 13:37:46 -04:00 |
|
layerwise_nvtx_marker.py
|
Update TensorRT-LLM (#2849)
|
2025-03-04 18:44:00 +08:00 |
|
llm_request.py
|
[TRTLLM-7330][feat] Eagle3 cuda graph support for the first draft model inference (#7363)
|
2025-09-26 11:28:05 +08:00 |
|
make_decoding_batch_input_output.py
|
feat: Optimize TRTLLM Sampler perf single beam single step (#5550)
|
2025-07-07 15:44:47 +02:00 |
|
mamba_cache_manager.py
|
[None] [chore] Mamba cache in separate file (#6796)
|
2025-08-15 13:42:51 +03:00 |
|
model_engine.py
|
[TRTLLM-7330][feat] Eagle3 cuda graph support for the first draft model inference (#7363)
|
2025-09-26 11:28:05 +08:00 |
|
model_loader.py
|
[None][chore] extract weights loading related logic to model loader (#7579)
|
2025-09-25 10:19:22 -07:00 |
|
py_executor_creator.py
|
[TRTLLM-7330][feat] Eagle3 cuda graph support for the first draft model inference (#7363)
|
2025-09-26 11:28:05 +08:00 |
|
py_executor.py
|
[https://nvbugs/5528405][fix] Set up draft_tokens before scheduling (#7903)
|
2025-09-24 09:56:17 +08:00 |
|
resource_manager.py
|
[TRTLLM-7385][feat] Optimize Qwen2/2.5-VL performance (#7250)
|
2025-09-22 03:40:02 -07:00 |
|
sampler.py
|
[None][fix] Revert "[None][feat] Return topk logprobs in torch backend (#7756)" (#7969)
|
2025-09-24 15:36:38 -07:00 |
|
scheduler.py
|
[https://nvbugs/5528405][fix] Set up draft_tokens before scheduling (#7903)
|
2025-09-24 09:56:17 +08:00 |
|
seq_slot_manager.py
|
[https://nvbugs/5394392][fix] Enlarge scheduler capacity under disagg bs == 1 (#6537)
|
2025-08-15 09:52:06 -07:00 |