TensorRT-LLMs/scripts/generate_config_table.py
Venky fd1270b9ab
[TRTC-43] [feat] Add config db and docs (#9420)
Signed-off-by: Frank Di Natale <3429989+FrankD412@users.noreply.github.com>
Signed-off-by: Venky Ganesh <23023424+venkywonka@users.noreply.github.com>
Co-authored-by: Frank Di Natale <3429989+FrankD412@users.noreply.github.com>
2025-12-12 04:00:03 +08:00

170 lines
5.9 KiB
Python

# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from collections import defaultdict
from pathlib import Path
from examples.configs.database.database import DATABASE_LIST_PATH, RecipeList
SCRIPT_DIR = Path(__file__).parent.resolve()
REPO_ROOT = SCRIPT_DIR.parent
MODEL_INFO = {
"deepseek-ai/DeepSeek-R1-0528": {
"display_name": "DeepSeek-R1",
"url": "https://huggingface.co/deepseek-ai/DeepSeek-R1-0528",
},
"nvidia/DeepSeek-R1-0528-FP4-v2": {
"display_name": "DeepSeek-R1 (NVFP4)",
"url": "https://huggingface.co/nvidia/DeepSeek-R1-0528-FP4-v2",
},
"openai/gpt-oss-120b": {
"display_name": "gpt-oss-120b",
"url": "https://huggingface.co/openai/gpt-oss-120b",
},
}
LOW_LATENCY_CONCURRENCY_THRESHOLD = 8
HIGH_THROUGHPUT_CONCURRENCY_THRESHOLD = 32
def generate_rst(yaml_path, output_file=None):
"""Generate RST table from YAML config database.
Args:
yaml_path: Path to lookup.yaml (str or Path)
output_file: Optional output file path. If None, prints to stdout.
"""
recipe_list = RecipeList.from_yaml(Path(yaml_path))
# Group by model -> (gpu, isl, osl) -> list of recipes
model_groups = defaultdict(lambda: defaultdict(list))
for recipe in recipe_list:
key = (recipe.gpu, recipe.isl, recipe.osl)
model_groups[recipe.model][key].append(recipe)
lines = []
# Include note_sections.rst at the top (relative include for Sphinx)
lines.append(".. include:: note_sections.rst")
lines.append(" :start-after: .. start-note-traffic-patterns")
lines.append(" :end-before: .. end-note-traffic-patterns")
lines.append("")
sorted_models = sorted(model_groups.keys())
for model in sorted_models:
lines.append(f".. start-{model}")
lines.append("")
if model in MODEL_INFO:
info = MODEL_INFO[model]
title_text = f"`{info['display_name']} <{info['url']}>`_"
else:
title_text = model
lines.append(f".. _{model}:")
lines.append("")
lines.append(title_text)
lines.append("^" * len(title_text))
lines.append("")
lines.append(".. list-table::")
lines.append(" :width: 100%")
lines.append(" :header-rows: 1")
lines.append(" :widths: 12 15 15 13 20 25")
lines.append("")
lines.append(" * - GPU")
lines.append(" - Performance Profile")
lines.append(" - ISL / OSL")
lines.append(" - Concurrency")
lines.append(" - Config")
lines.append(" - Command")
subgroups = model_groups[model]
sorted_keys = sorted(
subgroups.keys(), key=lambda k: (str(k[0]), int(k[1] or 0), int(k[2] or 0))
)
for key in sorted_keys:
entries = subgroups[key]
entries.sort(key=lambda x: x.concurrency)
n = len(entries)
for idx, entry in enumerate(entries):
gpu = entry.gpu
num_gpus = entry.num_gpus
gpu_display = f"{num_gpus}x{gpu}" if num_gpus and num_gpus > 1 else gpu
isl = entry.isl
osl = entry.osl
conc = entry.concurrency
config_path = entry.config_path
if n == 1:
if conc <= LOW_LATENCY_CONCURRENCY_THRESHOLD:
profile = "Low Latency"
elif conc >= HIGH_THROUGHPUT_CONCURRENCY_THRESHOLD:
profile = "High Throughput"
else:
profile = "Balanced"
elif idx == 0:
profile = "Min Latency"
elif idx == n - 1:
profile = "Max Throughput"
elif idx in ((n - 1) // 2, n // 2):
profile = "Balanced"
elif idx < n // 2:
profile = "Low Latency"
else:
profile = "High Throughput"
full_config_path = config_path
command = f"trtllm-serve {model} --extra_llm_api_options ${{TRTLLM_DIR}}/{full_config_path}"
config_filename = os.path.basename(full_config_path)
github_url = f"https://github.com/NVIDIA/TensorRT-LLM/blob/main/{full_config_path}"
config_link = f"`{config_filename} <{github_url}>`_"
lines.append(f" * - {gpu_display}")
lines.append(f" - {profile}")
lines.append(f" - {isl} / {osl}")
lines.append(f" - {conc}")
lines.append(f" - {config_link}")
lines.append(f" - ``{command}``")
lines.append("")
lines.append(f".. end-{model}")
lines.append("")
output_text = "\n".join(lines)
if output_file:
with open(output_file, "w") as f:
f.write(output_text)
print(f"Generated table written to: {output_file}", file=sys.stderr)
else:
print(output_text)
if __name__ == "__main__":
yaml_path = DATABASE_LIST_PATH
if not yaml_path.exists():
print(f"Error: YAML file not found at {yaml_path}", file=sys.stderr)
sys.exit(1)
output_path = REPO_ROOT / "docs/source/deployment-guide/config_table.rst"
generate_rst(yaml_path, output_file=output_path)