mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* Add test case for kv memory estimation * Dump running log into file and parse kv cache memory size from file * Set bigger peak memory size for mixed percision case and test_ptp_quickstart_advanced_eagle3 case * Revert change to usage of fraction * use context manager to guard temp files Signed-off-by: Hui Gao <huig@nvidia.com> |
||
|---|---|---|
| .. | ||
| __init__.py | ||
| _util.py | ||
| config.py | ||
| cuda_graph_runner.py | ||
| decoder.py | ||
| guided_decoder.py | ||
| kv_cache_transceiver.py | ||
| layerwise_nvtx_marker.py | ||
| llm_request.py | ||
| model_engine.py | ||
| py_executor_creator.py | ||
| py_executor.py | ||
| resource_manager.py | ||
| scheduler.py | ||
| seq_slot_manager.py | ||