From e30e0c86932accf1d5689275ab4900daf18c16f9 Mon Sep 17 00:00:00 2001 From: Xiwen Yu <13230610+VALLIS-NERIA@users.noreply.github.com> Date: Mon, 8 Sep 2025 21:02:35 +0800 Subject: [PATCH] waive Signed-off-by: Xiwen Yu <13230610+VALLIS-NERIA@users.noreply.github.com> --- .../unit/singlegpu/test_ad_build_small_single.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_build_small_single.py b/tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_build_small_single.py index 3a226eee42..50b07acdbc 100644 --- a/tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_build_small_single.py +++ b/tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_build_small_single.py @@ -64,11 +64,12 @@ def _check_ad_config(experiment_config: ExperimentConfig, llm_args: LlmArgs): attn_backend="triton", compile_backend="torch-simple", ), - get_small_model_config( - "meta-llama/Llama-4-Scout-17B-16E-Instruct", - attn_backend="flashinfer", - compile_backend="torch-simple", - ), + # disabled due to https://nvbugspro.nvidia.com/bug/5505835 + # get_small_model_config( + # "meta-llama/Llama-4-Scout-17B-16E-Instruct", + # attn_backend="flashinfer", + # compile_backend="torch-simple", + # ), get_small_model_config( "deepseek-ai/DeepSeek-V3", attn_backend="triton",