Revert "[infra] Unwaive unittests/_torch" (#4950)

This commit is contained in:
QI JUN 2025-06-05 17:21:07 +08:00 committed by GitHub
parent 743fb0a159
commit d5a8079eb6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 1 additions and 7 deletions

View File

@ -380,14 +380,9 @@ full:B200/examples/test_qwen.py::test_llm_qwen_7b_multi_gpus_summary[qwen2.5_7b_
accuracy/test_cli_flow.py::TestMixtral8x22B::test_int8_plugin_tp8[renormalize-tensor_parallel] SKIP (https://nvbugs/5273695)
examples/test_gpt.py::test_starcoder_fp8_quantization_2gpu[starcoder] SKIP (https://nvbugs/5144931)
examples/test_gpt.py::test_starcoder_fp8_quantization_2gpu[starcoderplus] SKIP (https://nvbugs/5144931)
unittest/_torch -k "not (modeling or multi_gpu or auto_deploy)" SKIP (https://nvbugs/5280806)
examples/test_whisper.py::test_llm_whisper_general[large-v3-disable_gemm_plugin-disable_attention_plugin-disable_weight_only-float16-nb:1-use_python_runtime] SKIP (https://nvbugs/5244570)
unittest/_torch/speculative/test_eagle3.py SKIP (https://nvbugs/5280806)
unittest/_torch/modules/test_fused_moe.py SKIP (https://nvbugspro.nvidia.com/bug/5324229)
unittest/_torch/modules/test_moe_load_balancer.py SKIP (https://nvbugspro.nvidia.com/bug/5324229)
unittest/_torch/speculative/test_ngram.py SKIP (https://nvbugspro.nvidia.com/bug/5324239)
unittest/_torch/test_pytorch_model_engine.py SKIP (https://nvbugspro.nvidia.com/bug/5324248)
unittest/_torch/test_resource_manager.py SKIP (https://nvbugspro.nvidia.com/bug/5324252)
unittest/_torch/thop/test_selective_scan_op.py SKIP (https://nvbugspro.nvidia.com/bug/5324258)
triton_server/test_triton_rcca.py::test_mistral_beam_search[rcca_4714407-True-10---False-True-False-0-128-disableDecoupleMode-inflight_fused_batching-disableTrtOverlap--guaranteed_no_evict---1-1-1-False-ensemble] SKIP (https://nvbugs/5240060)
triton_server/test_triton.py::test_triton_extensive[triton-extensive] SKIP
triton_server/test_triton.py::test_gpt_speculative_decoding[gpt-speculative-decoding] SKIP

View File

@ -17,7 +17,6 @@ from utils.llm_data import llm_models_root
[[True, "TRTLLM"], [False, "TRTLLM"],
[True, "FLASHINFER"], [False, "FLASHINFER"]])
def test_llama_eagle3(use_cuda_graph: bool, attn_backend: str):
pytest.skip("Test is hanging")
total_mem_gb = torch.cuda.get_device_properties(0).total_memory / 1e9
if total_mem_gb < 35:
pytest.skip("Not enough memory to load target + draft model")