| .. |
|
__init__.py
|
Update TensorRT-LLM (#2755)
|
2025-02-11 03:01:00 +00:00 |
|
_util.py
|
refactor: Introduce ResourceManagerType enum for resource management (#5246)
|
2025-06-18 09:55:59 +02:00 |
|
config_utils.py
|
feat: support kv cache reuse for MLA (#3571)
|
2025-05-15 15:22:21 +08:00 |
|
config.py
|
refactor [BREAKING CHANGE]:: remove the redundant use_kv_cache field from PytorchConfig (#5031)
|
2025-06-13 16:34:24 +08:00 |
|
cuda_graph_runner.py
|
[TRTLLM-4983] feat: enable overlap scheduler between draft forwards (#4802)
|
2025-06-15 23:09:16 +08:00 |
|
grammar_matcher.py
|
feat: TRTLLM-5941 Upgrade xgrammar to 0.1.18 (#5364)
|
2025-06-25 14:10:50 +08:00 |
|
guided_decoder.py
|
feat: Add LLGuidance Support for PyTorch Backend (#5214)
|
2025-06-18 19:33:34 +08:00 |
|
handle_context_logits.py
|
refactor: Update decoder buffer and logits management (#4450)
|
2025-06-18 08:10:32 +08:00 |
|
handle_generation_logits.py
|
refactor: Update decoder buffer and logits management (#4450)
|
2025-06-18 08:10:32 +08:00 |
|
kv_cache_transceiver.py
|
Solve underallocation in VSWA+/VGQA (#4667)
|
2025-06-12 12:12:46 +08:00 |
|
layerwise_nvtx_marker.py
|
Update TensorRT-LLM (#2849)
|
2025-03-04 18:44:00 +08:00 |
|
llm_request.py
|
update LlmRequest.is_dummy property (#5283)
|
2025-06-18 10:52:13 +08:00 |
|
make_decoding_batch_input_output.py
|
refactor: Update decoder buffer and logits management (#4450)
|
2025-06-18 08:10:32 +08:00 |
|
model_engine.py
|
refactor: Introduce ResourceManagerType enum for resource management (#5246)
|
2025-06-18 09:55:59 +02:00 |
|
py_executor_creator.py
|
refactor: Introduce ResourceManagerType enum for resource management (#5246)
|
2025-06-18 09:55:59 +02:00 |
|
py_executor.py
|
refactor: Introduce ResourceManagerType enum for resource management (#5246)
|
2025-06-18 09:55:59 +02:00 |
|
resource_manager.py
|
refactor: Introduce ResourceManagerType enum for resource management (#5246)
|
2025-06-18 09:55:59 +02:00 |
|
sampler.py
|
refactor: Update decoder buffer and logits management (#4450)
|
2025-06-18 08:10:32 +08:00 |
|
scheduler.py
|
fix: max_num_sequences calculation with overlap scheduling (#4532)
|
2025-06-03 09:31:22 +02:00 |
|
seq_slot_manager.py
|
fix: skip add new slot if request has slot 0 (#3991)
|
2025-05-06 07:46:39 +02:00 |