| .. |
|
__init__.py
|
test: reorganize tests folder hierarchy (#2996)
|
2025-03-27 12:07:53 +08:00 |
|
_test_llm_chat.py
|
Add thread leak check and fix thread/memory leak issues. (#3270)
|
2025-04-08 19:03:18 +08:00 |
|
_test_llm_server.py
|
Add thread leak check and fix thread/memory leak issues. (#3270)
|
2025-04-08 19:03:18 +08:00 |
|
_test_openai_chat_harmony.py
|
[TRTLLM-7207][feat] Chat completions API for gpt-oss (#7261)
|
2025-08-28 10:22:06 +08:00 |
|
_test_openai_chat_json.py
|
[https://nvbugs/5375594][fix] fix oom issue on structural_tag test case (#6838)
|
2025-09-01 11:02:31 +08:00 |
|
_test_openai_chat_multimodal.py
|
[TRTLLM-7157][feat] BREAKING CHANGE Introduce sampler_type, detect sampler according to options (#6831)
|
2025-08-16 00:27:24 -04:00 |
|
_test_openai_chat_structural_tag.py
|
[https://nvbugs/5375594][fix] fix oom issue on structural_tag test case (#6838)
|
2025-09-01 11:02:31 +08:00 |
|
_test_openai_chat.py
|
[TRTLLM-7157][feat] BREAKING CHANGE Introduce sampler_type, detect sampler according to options (#6831)
|
2025-08-16 00:27:24 -04:00 |
|
_test_openai_completions.py
|
[TRTLLM-7157][feat] BREAKING CHANGE Introduce sampler_type, detect sampler according to options (#6831)
|
2025-08-16 00:27:24 -04:00 |
|
_test_openai_consistent_chat.py
|
[TRTLLM-5208][BREAKING CHANGE] chore: make pytorch LLM the default (#5312)
|
2025-06-20 03:01:10 +08:00 |
|
_test_openai_lora.py
|
[https://nvbugs/5390853][fix] Fix _test_openai_lora.py - disable cuda graph (#6965)
|
2025-08-17 16:56:16 +03:00 |
|
_test_openai_metrics.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_openai_misc.py
|
[TRTLLM-6785][feat] BREAKING CHANGE Enable TRTLLM sampler by default (#6216)
|
2025-08-07 22:19:37 -04:00 |
|
_test_openai_mmencoder.py
|
[TRTLLM-7326][feat] Add standalone multimodal encoder (#6743)
|
2025-08-19 21:42:50 -07:00 |
|
_test_openai_multi_chat.py
|
[TRTLLM-5208][BREAKING CHANGE] chore: make pytorch LLM the default (#5312)
|
2025-06-20 03:01:10 +08:00 |
|
_test_openai_multi_gpu.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_openai_multi_nodes.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_openai_perf_metrics.py
|
[TRTLLM-6549][feat] add perf metrics endpoint to openai server and openai disagg server (#6985)
|
2025-08-26 15:34:44 +08:00 |
|
_test_openai_prometheus.py
|
[None][feat] Core Metrics Implementation (#5785)
|
2025-08-09 02:48:53 -04:00 |
|
_test_openai_reasoning.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_trtllm_serve_benchmark.py
|
tests: [TRTQA-2906] add benchmark serving tests (#4901)
|
2025-06-05 14:33:03 +08:00 |
|
_test_trtllm_serve_duplicated_args.py
|
chore: update trtllm-serve usage doc by removing backend parameter when it use torch as backend. (#6419)
|
2025-07-30 11:11:06 -04:00 |
|
_test_trtllm_serve_example.py
|
[None][chore] Enhance trtllm-serve example test (#6604)
|
2025-08-06 20:30:35 +08:00 |
|
_test_trtllm_serve_lora.py
|
[5830][feat] Improve LoRA cache memory control (#6220)
|
2025-07-31 09:26:38 +03:00 |
|
_test_trtllm_serve_multimodal_benchmark.py
|
[TRTLLM-6772][feat] Multimodal benchmark_serving support (#6622)
|
2025-08-12 19:34:02 -07:00 |
|
_test_trtllm_serve_multimodal_example.py
|
[TRTLLM-7157][feat] BREAKING CHANGE Introduce sampler_type, detect sampler according to options (#6831)
|
2025-08-16 00:27:24 -04:00 |
|
openai_server.py
|
feat: support abort disconnected requests (#3214)
|
2025-04-07 16:14:58 +08:00 |
|
README.md
|
Update TensorRT-LLM (#2936)
|
2025-03-18 21:25:19 +08:00 |
|
utils.py
|
[TRTLLM-7157][feat] BREAKING CHANGE Introduce sampler_type, detect sampler according to options (#6831)
|
2025-08-16 00:27:24 -04:00 |