From c713eb57991929689ab785a6bc492f6b38eb4029 Mon Sep 17 00:00:00 2001 From: Venky <23023424+venkywonka@users.noreply.github.com> Date: Thu, 22 May 2025 13:07:33 -0700 Subject: [PATCH] test(perf): Add `Llama-3_1-Nemotron-Ultra-253B-v1` perf tests (cpp) (#4446) ultra Signed-off-by: Venky Ganesh <23023424+venkywonka@users.noreply.github.com> --- tests/integration/defs/perf/test_perf.py | 4 ++++ .../test_lists/qa/trt_llm_release_perf_test.yml | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/tests/integration/defs/perf/test_perf.py b/tests/integration/defs/perf/test_perf.py index 5258f923f5..c155c13c7e 100644 --- a/tests/integration/defs/perf/test_perf.py +++ b/tests/integration/defs/perf/test_perf.py @@ -58,6 +58,8 @@ MODEL_PATH_DICT = { "llama_v3.1_nemotron_nano_8b": "Llama-3.1-Nemotron-Nano-8B-v1", "llama_v3.3_nemotron_super_49b": "nemotron-nas/Llama-3_3-Nemotron-Super-49B-v1", + "llama_v3.1_nemotron_ultra_253b": + "nemotron-nas/Llama-3_1-Nemotron-Ultra-253B-v1", # "llama_30b": "llama-models/llama-30b-hf", "mixtral_8x7b_v0.1": "Mixtral-8x7B-v0.1", "mixtral_8x7b_v0.1_instruct": "Mixtral-8x7B-Instruct-v0.1", @@ -106,6 +108,8 @@ HF_MODEL_PATH = { "llama_v3.1_nemotron_nano_8b_hf": "nvidia/Llama-3.1-Nemotron-Nano-8B-v1", "llama_v3.3_nemotron_super_49b_hf": "nvidia/Llama-3_3-Nemotron-Super-49B-v1", + "llama_v3.1_nemotron_ultra_253b_hf": + "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", "mixtral_8x7b_v0.1_hf": "mistralai/Mixtral-8x7B-v0.1", "mixtral_8x7b_v0.1_instruct_hf": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistral_7b_v0.1_hf": "mistralai/Mistral-7B-v0.1", diff --git a/tests/integration/test_lists/qa/trt_llm_release_perf_test.yml b/tests/integration/test_lists/qa/trt_llm_release_perf_test.yml index f13e5907df..ff85799973 100644 --- a/tests/integration/test_lists/qa/trt_llm_release_perf_test.yml +++ b/tests/integration/test_lists/qa/trt_llm_release_perf_test.yml @@ -180,6 +180,13 @@ trt_llm_release_perf_test: - perf/test_perf.py::test_perf[llama_v3.3_70b_instruct_fp8-bench-pytorch-float8-input_output_len:128,128-gpus:8] - perf/test_perf.py::test_perf[gpt_20b-bench-float16-maxbs:8-input_output_len:128,128-reqs:80-gpus:8] - perf/test_perf.py::test_perf[gpt_20b-bench-float16-maxbs:8-input_output_len:512,32-reqs:80-gpus:8] + # Llama-3_1-Nemotron-Ultra-253B-v1 + # all cpp backend, bf16->fp8 post-quantized + - perf/test_perf.py::test_perf[llama_v3.1_nemotron_ultra_253b-bench-bfloat16-maxbs:64-input_output_len:5000,500-quant:fp8-reqs:8-con:1-tp:8-gpus:8] + - perf/test_perf.py::test_perf[llama_v3.1_nemotron_ultra_253b-bench-bfloat16-maxbs:64-input_output_len:500,2000-quant:fp8-reqs:8-con:1-tp:8-gpus:8] + - perf/test_perf.py::test_perf[llama_v3.1_nemotron_ultra_253b-bench-bfloat16-maxbs:64-input_output_len:5000,500-quant:fp8-reqs:250-con:250-tp:8-gpus:8] + - perf/test_perf.py::test_perf[llama_v3.1_nemotron_ultra_253b-bench-bfloat16-maxbs:64-input_output_len:500,2000-quant:fp8-reqs:250-con:250-tp:8-gpus:8] + - condition: ranges: