| .. |
|
apps
|
[https://nvbugs/5837281][fix] Fix trtllm-serve guided decoding test (#11101)
|
2026-01-30 16:59:55 +08:00 |
|
__init__.py
|
test: reorganize tests folder hierarchy (#2996)
|
2025-03-27 12:07:53 +08:00 |
|
_run_mpi_comm_task.py
|
[https://nvbugs/5783876][fix] fix hmac launch (#10434)
|
2026-01-22 23:20:53 +08:00 |
|
_run_multi_llm_tasks.py
|
[https://nvbugs/5437384][test] CHERRY-PICK: fix trtllm-llmapi-launch multi tests (#8567)
|
2025-11-01 06:49:33 -07:00 |
|
_run_multi_mpi_comm_tasks.py
|
[https://nvbugs/5783876][fix] fix hmac launch (#10434)
|
2026-01-22 23:20:53 +08:00 |
|
_test_remote_mpi_session.sh
|
fix [nvbug/5351244]: address remote mpi session submit (#5664)
|
2025-07-22 12:48:00 +08:00 |
|
fake.sh
|
doc: fix path after examples migration (#3814)
|
2025-04-24 02:36:45 +08:00 |
|
lora_test_utils.py
|
[https://nvbugs/5322131][feat] Multi-LoRA serving with CUDA Graph (#8279)
|
2026-01-22 14:01:18 +01:00 |
|
run_llm_exit.py
|
Update TensorRT-LLM (#2936)
|
2025-03-18 21:25:19 +08:00 |
|
run_llm_with_postproc.py
|
[nvbug 5327706][fix] fix mgmn postprocess error (#5835)
|
2025-07-14 17:17:30 +08:00 |
|
run_llm.py
|
[TRTLLM-5208][BREAKING CHANGE] chore: make pytorch LLM the default (#5312)
|
2025-06-20 03:01:10 +08:00 |
|
test_additional_model_outputs.py
|
[None][fix] Additional model outputs for pipeline parallelism (#9794)
|
2025-12-09 10:41:22 +01:00 |
|
test_async_llm.py
|
[TRTLLM-9737][chore] Add rl perf reproduce script and enhance the robustness of Ray tests (#9939)
|
2025-12-24 15:27:01 +08:00 |
|
test_build_cache.py
|
Update TensorRT-LLM (#2936)
|
2025-03-18 21:25:19 +08:00 |
|
test_config_database.py
|
[None][fix] enable KV cache reuse for config database (#10094)
|
2025-12-19 15:16:56 -08:00 |
|
test_executor.py
|
[None][refactor] simplify get_stats and get_kvcache_events with rpc (#9980)
|
2025-12-22 18:23:43 +08:00 |
|
test_gc_utils.py
|
[nvbug 5273941] fix: broken cyclic reference detect (#5417)
|
2025-07-01 20:12:55 +08:00 |
|
test_grpc.py
|
[None][feat] Add gRPC server for high-performance external router integration (#11037)
|
2026-01-30 07:48:27 +08:00 |
|
test_llm_args.py
|
[TRTC-122][feat] Eagle3 Specdec UX improvements (#10124)
|
2026-01-22 07:24:11 -08:00 |
|
test_llm_download.py
|
[TRTLLM-5208][BREAKING CHANGE] chore: make pytorch LLM the default (#5312)
|
2025-06-20 03:01:10 +08:00 |
|
test_llm_kv_cache_events.py
|
[TRTLLM-9601][feat] Expose mmKeys for multimodal to integrate with dynamo. (#9604)
|
2025-12-15 08:42:30 +08:00 |
|
test_llm_models.py
|
[https://nvbugs/5371480][fix] Enable test_phi3_small_8k (#6938)
|
2025-08-19 09:42:35 +08:00 |
|
test_llm_multi_gpu_pytorch.py
|
[https://nvbugs/5322131][feat] Multi-LoRA serving with CUDA Graph (#8279)
|
2026-01-22 14:01:18 +01:00 |
|
test_llm_multi_gpu.py
|
[None][refactor] simplify get_stats and get_kvcache_events with rpc (#9980)
|
2025-12-22 18:23:43 +08:00 |
|
test_llm_pytorch.py
|
[https://nvbugs/5322131][feat] Multi-LoRA serving with CUDA Graph (#8279)
|
2026-01-22 14:01:18 +01:00 |
|
test_llm_quant.py
|
[https://nvbugs/5558117][fix] Allow per-layer quant config from hf_quant_config.json (#8617)
|
2025-10-31 04:41:44 -07:00 |
|
test_llm_utils.py
|
[TRTLLM-8189][chore] enhance GenerationExecutor with RPC (part1) (#5543)
|
2025-10-05 17:28:20 +08:00 |
|
test_llm.py
|
[None][feat] Auto download speculative models from HF for pytorch backend, add speculative_model field alias (#10099)
|
2026-01-14 21:06:07 -08:00 |
|
test_memory_profiling.py
|
[https://nvbugs/5717993][fix] Add execution_stream across PyExecutor, KVCacheManager, PeftCacheManager to ensure proper CUDA stream synchronization between KV cache transfer operations and model forward kernels. (#10060)
|
2025-12-31 09:22:54 -08:00 |
|
test_mpi_session.py
|
[https://nvbugs/5437384][test] CHERRY-PICK: fix trtllm-llmapi-launch multi tests (#8567)
|
2025-11-01 06:49:33 -07:00 |
|
test_reasoning_parser.py
|
[None][feat] Update reasoning parser for nano-v3 (#9944)
|
2025-12-15 05:39:37 -08:00 |
|
test_serialization.py
|
[TRTLLM-8682][chore] Remove auto_parallel module (#8329)
|
2025-10-22 20:53:08 -04:00 |
|
test_utils.py
|
[https://nvbugs/5322131][feat] Multi-LoRA serving with CUDA Graph (#8279)
|
2026-01-22 14:01:18 +01:00 |