diff --git a/tests/integration/defs/accuracy/references/mmmu.yaml b/tests/integration/defs/accuracy/references/mmmu.yaml index 9cc3753656..0eb3934abd 100644 --- a/tests/integration/defs/accuracy/references/mmmu.yaml +++ b/tests/integration/defs/accuracy/references/mmmu.yaml @@ -4,7 +4,7 @@ Qwen/Qwen2.5-VL-7B-Instruct: - accuracy: 51.22 nvidia/Nano-v2-VLM: - accuracy: 43.78 -llava-hf/llava-v1.6-mistral-7b: +llava-hf/llava-v1.6-mistral-7b-hf: - accuracy: 35.33 Efficient-Large-Model/NVILA-8B: - accuracy: 47.77 diff --git a/tests/integration/defs/accuracy/test_llm_api_pytorch_multimodal.py b/tests/integration/defs/accuracy/test_llm_api_pytorch_multimodal.py index 4dbc7db31d..0b9ad7ac55 100644 --- a/tests/integration/defs/accuracy/test_llm_api_pytorch_multimodal.py +++ b/tests/integration/defs/accuracy/test_llm_api_pytorch_multimodal.py @@ -80,8 +80,8 @@ class TestNano_V2_VLM(LlmapiAccuracyTestHarness): class TestLlava_V1_6_Mistral_7B(LlmapiAccuracyTestHarness): - MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b" - MODEL_PATH = f"{llm_models_root()}/llava-v1.6-mistral-7b" + MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b-hf" + MODEL_PATH = f"{llm_models_root()}/llava-v1.6-mistral-7b-hf" MAX_NUM_TOKENS = 16384 # NOTE: MMMU adds <|endoftext|> to the stop token. diff --git a/tests/integration/defs/test_e2e.py b/tests/integration/defs/test_e2e.py index 81267671de..09d0f40ccc 100644 --- a/tests/integration/defs/test_e2e.py +++ b/tests/integration/defs/test_e2e.py @@ -2658,7 +2658,7 @@ def test_ptp_quickstart_multimodal_kv_cache_reuse(llm_root, llm_venv, ], ] * num_same_requests, }, - "phi4-multimodal-instruct-fp8": { + "phi4-multimodal-instruct": { "image": [ [ "image", "depicts", "natural", "environment", "ocean",