| .. |
|
__init__.py
|
Update TensorRT-LLM (#2755)
|
2025-02-11 03:01:00 +00:00 |
|
_util.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
config_utils.py
|
[None][feat] Support Mistral Large3 LLM part (#9820)
|
2025-12-13 11:44:27 +08:00 |
|
cuda_graph_runner.py
|
[TRTLLM-8310][feat] Add Qwen3-VL-MoE (#9689)
|
2025-12-15 20:05:20 -08:00 |
|
executor_request_queue.py
|
[TRTLLM-9467][fix] Fix PP+CP combination with helix parallelism (#10312)
|
2026-01-01 13:42:53 -05:00 |
|
finish_reason.py
|
[TRTLLM-5974][feat] Support disaggregated serving in TRTLLM Sampler (#5328)
|
2025-06-25 17:41:36 +02:00 |
|
grammar_matcher.py
|
[TRTLLM-8763][chore] Deprecate pybind based GuidedDecodingConfig usage in torch backend (#8717)
|
2025-10-29 20:37:14 +08:00 |
|
guided_decoder.py
|
[None][feat] Graceful Error Handling for Guided Decoder (#9078)
|
2025-12-13 19:57:59 +08:00 |
|
handle_additional_outputs.py
|
[TRTLLM-8831][feat] Enable early exit with overlap scheduler (#8587)
|
2025-11-17 18:07:13 +01:00 |
|
handle_logits.py
|
[TRTLLM-8831][feat] Enable early exit with overlap scheduler (#8587)
|
2025-11-17 18:07:13 +01:00 |
|
kv_cache_connector.py
|
[None][feat] Support KV Connector with Disagg Prefill Worker (#8246)
|
2025-10-24 11:09:06 -07:00 |
|
kv_cache_transceiver.py
|
[None][feat] Support Mooncake transfer engine as a cache transceiver backend (#8309)
|
2025-12-19 10:09:51 +08:00 |
|
layerwise_nvtx_marker.py
|
Update TensorRT-LLM (#2849)
|
2025-03-04 18:44:00 +08:00 |
|
llm_request.py
|
[None][fix] Fix request_id for best_of/n case (#8368)
|
2025-12-26 22:20:24 +01:00 |
|
make_decoding_batch_input_output.py
|
[None][refactor] decoding inputs, part 2 (#5799)
|
2025-11-18 14:38:51 +01:00 |
|
mamba_cache_manager.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
model_engine.py
|
[https://nvbugs/5707359][fix] Unwaive OOM case that should be fixed by #9446 (#10334)
|
2025-12-31 10:41:39 +08:00 |
|
model_loader.py
|
[TRTLLM-9660][feat] Convert cuteDSL GEMM to opt-in feature (#9682)
|
2025-12-06 02:24:51 -08:00 |
|
py_executor_creator.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
py_executor.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
resource_manager.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
sampler.py
|
[None][fix] avoid implicit cudaStreamSynchronize in sample_async. (#10120)
|
2025-12-23 10:15:40 +08:00 |
|
sampling_utils_flashinfer.py
|
[TRTLLM-6756][feat] Update BeamSearch for TorchSampler (#9660)
|
2025-12-09 10:44:01 +01:00 |
|
sampling_utils.py
|
[TRTLLM-6756][feat] Update BeamSearch for TorchSampler (#9660)
|
2025-12-09 10:44:01 +01:00 |
|
scheduler.py
|
[https://nvbugs/5677746][fix] Use first PP rank's schedule result in other PP ranks to fix PP hang (#9659)
|
2025-12-08 18:43:52 -08:00 |
|
seq_slot_manager.py
|
[https://nvbugs/5394392][fix] Enlarge scheduler capacity under disagg bs == 1 (#6537)
|
2025-08-15 09:52:06 -07:00 |