| .. |
|
__init__.py
|
test: reorganize tests folder hierarchy (#2996)
|
2025-03-27 12:07:53 +08:00 |
|
_test_disagg_serving_multi_nodes_service_discovery.py
|
[TRTLLM-9181][feat] improve disagg-server prometheus metrics; synchronize workers' clocks when workers are dynamic (#9726)
|
2025-12-16 05:16:32 -08:00 |
|
_test_disagg_serving_multi_nodes.py
|
[TRTLLM-9840][test] switch ucx backend to default backend (#10101)
|
2025-12-18 18:54:15 -08:00 |
|
_test_llm_chat.py
|
Add thread leak check and fix thread/memory leak issues. (#3270)
|
2025-04-08 19:03:18 +08:00 |
|
_test_llm_server.py
|
Add thread leak check and fix thread/memory leak issues. (#3270)
|
2025-04-08 19:03:18 +08:00 |
|
_test_openai_cache_salt.py
|
[TRTLLM-7398][feat] Support KV cache salting for secure KV cache reuse (#7106)
|
2025-09-06 17:58:32 -04:00 |
|
_test_openai_chat_guided_decoding.py
|
[TRTLLM-9295][fix] use greedy decoding in test_openai_compatible_json_schema (#9305)
|
2025-11-20 08:32:23 +01:00 |
|
_test_openai_chat_harmony.py
|
[https://nvbugs/5633700][fix] Cache tiktoken vocab for gpt-oss (#10219)
|
2025-12-26 18:39:03 +08:00 |
|
_test_openai_chat_multimodal.py
|
[https://nvbugs/5685428][fix] fix test_openai_chat_multimodal.py (#9406)
|
2025-11-24 16:56:33 -08:00 |
|
_test_openai_chat.py
|
[TRTLLM-8598][feat] enable n > 1 in OpenAI API with PyTorch backend (#8951)
|
2025-11-07 17:47:35 -08:00 |
|
_test_openai_completions.py
|
[TRTLLM-8598][feat] enable n > 1 in OpenAI API with PyTorch backend (#8951)
|
2025-11-07 17:47:35 -08:00 |
|
_test_openai_consistent_chat.py
|
[TRTLLM-8682][chore] Remove auto_parallel module (#8329)
|
2025-10-22 20:53:08 -04:00 |
|
_test_openai_lora.py
|
[https://nvbugs/5390853][fix] Fix _test_openai_lora.py - disable cuda graph (#6965)
|
2025-08-17 16:56:16 +03:00 |
|
_test_openai_metrics.py
|
[TRTLLM-8274][feat] Check if executor is shutdown in /health entrypoint (#9057)
|
2025-12-04 13:49:40 +08:00 |
|
_test_openai_misc.py
|
[https://nvbugs/5718004][fix] Add warmup for cancellation test (#9860)
|
2025-12-11 12:20:33 +08:00 |
|
_test_openai_mmencoder.py
|
[https://nvbugs/5747911][fix] Use offline data path for the unit test of mmencoder server (#10135)
|
2025-12-18 15:19:23 -08:00 |
|
_test_openai_multi_chat.py
|
[TRTLLM-8682][chore] Remove auto_parallel module (#8329)
|
2025-10-22 20:53:08 -04:00 |
|
_test_openai_multi_gpu.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_openai_multi_nodes.py
|
[BREAKING CHANGE]: change default backend to PyTorch in trtllm-serve (#5717)
|
2025-07-21 21:09:43 +08:00 |
|
_test_openai_perf_metrics.py
|
[TRTLLM-6549][feat] add perf metrics endpoint to openai server and openai disagg server (#6985)
|
2025-08-26 15:34:44 +08:00 |
|
_test_openai_prometheus.py
|
[None][feat] Add trtllm_ prefix for exposed metrics (#8845)
|
2025-11-06 15:27:18 +08:00 |
|
_test_openai_reasoning.py
|
[None][feat] Support Qwen3 reasoning parser (#8000)
|
2025-10-21 14:08:39 +08:00 |
|
_test_openai_responses.py
|
[https://nvbugs/5753250][fix] Fix undefined local variable in responses utils (#10154)
|
2025-12-28 06:59:32 +08:00 |
|
_test_openai_tool_call.py
|
[TRTLLM-8214][feat] Support Qwen3 tool parser (#8216)
|
2025-10-29 15:48:29 +08:00 |
|
_test_trtllm_serve_benchmark.py
|
[TRTLLM-7070][feat] add gpt-oss chunked prefill tests (#7779)
|
2025-09-22 00:12:43 -07:00 |
|
_test_trtllm_serve_duplicated_args.py
|
chore: update trtllm-serve usage doc by removing backend parameter when it use torch as backend. (#6419)
|
2025-07-30 11:11:06 -04:00 |
|
_test_trtllm_serve_example.py
|
[https://nvbugs/5747938][fix] Use local tokenizer (#10230)
|
2025-12-26 22:08:10 +08:00 |
|
_test_trtllm_serve_lora.py
|
[5830][feat] Improve LoRA cache memory control (#6220)
|
2025-07-31 09:26:38 +03:00 |
|
_test_trtllm_serve_multimodal_benchmark.py
|
[https://nvbugs/5494698][fix] skip gemma3 27b on blackwell (#7505)
|
2025-09-10 21:09:27 +08:00 |
|
_test_trtllm_serve_multimodal_example.py
|
[TRTLLM-9091] [feat] Replace GenAI-Perf with AIPerf (#9310)
|
2025-12-23 13:25:55 +08:00 |
|
_test_trtllm_serve_top_logprobs.py
|
[None][chore] Remove logprobs constraint on trtllm-serve pytorch backend (#9911)
|
2025-12-22 21:37:22 +08:00 |
|
openai_server.py
|
[TRTLLM-9181][feat] improve disagg-server prometheus metrics; synchronize workers' clocks when workers are dynamic (#9726)
|
2025-12-16 05:16:32 -08:00 |
|
README.md
|
[TRTLLM-8214][feat] Support Qwen3 tool parser (#8216)
|
2025-10-29 15:48:29 +08:00 |
|
test_chat_utils.py
|
[None][feat] Support custom chat template for tool calling (#9297)
|
2025-11-25 22:07:04 +08:00 |
|
test_disagg_serving_perf_metrics.py
|
[https://nvbugs/5726066][fix] fix auto-scaling related failures (#9845)
|
2025-12-18 16:37:48 -05:00 |
|
test_harmony_channel_validation.py
|
[https://nvbugs/5521799][fix] add harmony channel validation (#8837)
|
2025-11-03 02:31:54 -08:00 |
|
test_tool_parsers.py
|
[TRTLLM-9677][feat] Support DeepSeek-V3.2 tool parser (#10126)
|
2025-12-23 08:46:47 +08:00 |
|
utils.py
|
[TRTLLM-9181][feat] improve disagg-server prometheus metrics; synchronize workers' clocks when workers are dynamic (#9726)
|
2025-12-16 05:16:32 -08:00 |