TensorRT-LLMs/tests/integration/defs/cpp/test_unit_tests.py
Dom Brown 2d0f93a054
Refactor: Restructure C++ tests for better modularisation of non-shared code (#4027)
* Refactor: Restructure C++ tests for better modularisation of non-shared code

Start cleanup of pytest code for C++ tests

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

Clean up names and remove references to test_cpp.py

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

WIP

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

Move multi-GPU code

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

Update doc and try un-waiving

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

* Update multi GPU file check

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

* Address minor multi-GPU setup bug

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>

---------

Signed-off-by: Dom Brown <3886319+DomBrown@users.noreply.github.com>
2025-05-09 19:16:51 +01:00

38 lines
1.1 KiB
Python

import os as _os
import defs.cpp.cpp_common as _cpp
import pytest
@pytest.mark.parametrize("build_google_tests", ["80", "86", "89", "90"],
indirect=True)
@pytest.mark.parametrize("test_group", [
"batch_manager", "common", "executor", "kernels", "layers", "runtime",
"thop", "utils"
])
def test_unit_tests(build_google_tests, test_group, build_dir, lora_setup):
xml_name = f"results-unit-tests-{test_group}.xml"
# Discover and run the actual gtests
ctest_command = [
"ctest",
"--output-on-failure",
"--test-dir",
f"{build_dir}/tests/unit_tests/{test_group}",
"--output-junit",
f"{build_dir}/{xml_name}",
]
parallel = _cpp.default_test_parallel
if parallel_override := _os.environ.get("LLM_TEST_PARALLEL_OVERRIDE", None):
parallel = int(parallel_override)
cpp_env = {**_os.environ}
_cpp.parallel_run_ctest(ctest_command,
cwd=build_dir,
env=cpp_env,
timeout=2700,
parallel=parallel)