From ee92edf2b4cbf73ec6e0dce90b7f37d0f76eda3b Mon Sep 17 00:00:00 2001 From: Ivy Zhang <25222398+crazydemo@users.noreply.github.com> Date: Mon, 12 May 2025 15:27:15 +0800 Subject: [PATCH] [https://nvbugspro.nvidia.com/bug/5270564][test] skip per-hopper for llama4 (#4211) skip per-hopper for llama4 Signed-off-by: Ivy Zhang <25222398+crazydemo@users.noreply.github.com> --- tests/integration/defs/accuracy/test_llm_api_pytorch.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/defs/accuracy/test_llm_api_pytorch.py b/tests/integration/defs/accuracy/test_llm_api_pytorch.py index fd83e5d70c..f55a1a03a2 100644 --- a/tests/integration/defs/accuracy/test_llm_api_pytorch.py +++ b/tests/integration/defs/accuracy/test_llm_api_pytorch.py @@ -202,6 +202,7 @@ class TestLlama4MaverickInstruct(LlmapiAccuracyTestHarness): MODEL_NAME = "meta-llama/Llama-4-Maverick-17B-128E-Instruct" MODEL_PATH = f"{llm_models_root()}/llama4-models/Llama-4-Maverick-17B-128E-Instruct" + @skip_pre_hopper @pytest.mark.skip_less_device(8) @parametrize_with_ids("cuda_graph", [False, True]) @pytest.mark.parametrize("tp_size,pp_size,ep_size", [(8, 1, 1), (8, 1, 4), @@ -223,6 +224,7 @@ class TestLlama4ScoutInstruct(LlmapiAccuracyTestHarness): MODEL_NAME = "meta-llama/Llama-4-Scout-17B-16E-Instruct" MODEL_PATH = f"{llm_models_root()}/llama4-models/Llama-4-Scout-17B-16E-Instruct" + @skip_pre_hopper @pytest.mark.skip_less_device(8) @parametrize_with_ids("cuda_graph", [False, True]) @pytest.mark.parametrize("tp_size,pp_size,ep_size", [(8, 1, 1), (8, 1, 4),