fix deepseek multi gpu tests timeout (#3285)

Signed-off-by: junq <22017000+QiJune@users.noreply.github.com>
This commit is contained in:
QI JUN 2025-04-04 16:19:02 +08:00 committed by GitHub
parent d96c4e3379
commit 059a34468c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -15,8 +15,12 @@ l0_dgx_h100:
tests:
# ------------- PyTorch tests ---------------
- unittest/_torch/multi_gpu
- unittest/_torch/multi_gpu_modeling -k "deepseek and not (tp1 and pp1) and nextn0"
- unittest/_torch/multi_gpu_modeling -k "deepseek and not (tp1 and pp1) and not nextn0"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp4_pp1_ep1_nextn0"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp4_pp1_ep1_nextn2"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp4_pp1_ep4_nextn0"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp4_pp1_ep4_nextn2"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp2_pp2_ep1_nextn0"
- unittest/_torch/multi_gpu_modeling -k "deepseek and tp2_pp2_ep1_nextn2"
- unittest/_torch/multi_gpu_modeling -k "llama and not (tp1 and pp1)"
- unittest/_torch/auto_deploy/unit/multigpu
- disaggregated/test_disaggregated.py::test_disaggregated_multi_gpu_with_mpirun[TinyLlama-1.1B-Chat-v1.0]