mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-02-04 02:02:01 +08:00
Signed-off-by: qqiao <qqiao@nvidia.com> Signed-off-by: Yanchao Lu <yanchaol@nvidia.com> Co-authored-by: Emma Qiao <qqiao@nvidia.com>
This commit is contained in:
parent
7bf4dd9f63
commit
0096b50ba0
@ -11,8 +11,8 @@ This branch is a prototype and not stable for production use. PRs are not accept
|
||||
[](https://nvidia.github.io/TensorRT-LLM/)
|
||||
[](https://www.python.org/downloads/release/python-3123/)
|
||||
[](https://www.python.org/downloads/release/python-31012/)
|
||||
[](https://developer.nvidia.com/cuda-downloads)
|
||||
[](https://pytorch.org)
|
||||
[](https://developer.nvidia.com/cuda-downloads)
|
||||
[](https://pytorch.org)
|
||||
[](https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/version.py)
|
||||
[](https://github.com/NVIDIA/TensorRT-LLM/blob/main/LICENSE)
|
||||
|
||||
|
||||
@ -9,15 +9,15 @@
|
||||
Before the pre-built Python wheel can be installed via `pip`, a few
|
||||
prerequisites must be put into place:
|
||||
|
||||
Install CUDA Toolkit 13.0 following the [CUDA Installation Guide for Linux](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/)
|
||||
Install CUDA Toolkit 13.1 following the [CUDA Installation Guide for Linux](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/)
|
||||
and make sure `CUDA_HOME` environment variable is properly set.
|
||||
|
||||
The `cuda-compat-13-0` package may be required depending on your system's NVIDIA GPU
|
||||
The `cuda-compat-13-1` package may be required depending on your system's NVIDIA GPU
|
||||
driver version. For additional information, refer to the [CUDA Forward Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/forward-compatibility.html).
|
||||
|
||||
```bash
|
||||
# By default, PyTorch CUDA 12.8 package is installed. Install PyTorch CUDA 13.0 package to align with the CUDA version used for building TensorRT LLM wheels.
|
||||
pip3 install torch==2.9.0 torchvision --index-url https://download.pytorch.org/whl/cu130
|
||||
pip3 install torch==2.9.1 torchvision --index-url https://download.pytorch.org/whl/cu130
|
||||
|
||||
sudo apt-get -y install libopenmpi-dev
|
||||
|
||||
@ -39,6 +39,9 @@
|
||||
```bash
|
||||
pip3 install --upgrade pip setuptools && pip3 install tensorrt_llm
|
||||
```
|
||||
|
||||
> **Note:** The TensorRT LLM wheel on PyPI is built with PyTorch 2.9.1. This version may be incompatible with the NVIDIA NGC PyTorch 25.12 container, which uses a more recent PyTorch build from the main branch. If you are using this container or a similar environment, please install the pre-built wheel located at `/app/tensorrt_llm` inside the TensorRT LLM NGC Release container instead.
|
||||
|
||||
**This project will download and install additional third-party open source software projects. Review the license terms of these open source projects before use.**
|
||||
|
||||
2. Sanity check the installation by running the following in Python (tested on Python 3.12):
|
||||
|
||||
@ -154,9 +154,9 @@ The following table shows the supported software for TensorRT-LLM.
|
||||
* -
|
||||
- Software Compatibility
|
||||
* - Container
|
||||
- [25.10](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html)
|
||||
- [25.12](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html)
|
||||
* - TensorRT
|
||||
- [10.13](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/index.html)
|
||||
- [10.14](https://docs.nvidia.com/deeplearning/tensorrt/release-notes/index.html)
|
||||
* - Precision
|
||||
-
|
||||
- Blackwell (SM100/SM103/SM120) - FP32, FP16, BF16, FP8, FP4, INT8, INT4
|
||||
|
||||
@ -454,7 +454,7 @@ def runLLMBuild(pipeline, buildFlags, tarName, is_linux_x86_64)
|
||||
def llmPath = sh (script: "realpath ${LLM_ROOT}",returnStdout: true).trim()
|
||||
// TODO: Remove after the cmake version is upgraded to 3.31.8
|
||||
// Get triton tag from docker/dockerfile.multi
|
||||
def tritonShortTag = "r25.10"
|
||||
def tritonShortTag = "r25.12"
|
||||
sh "cd ${LLM_ROOT}/triton_backend/inflight_batcher_llm && mkdir build && cd build && cmake .. -DTRTLLM_DIR=${llmPath} -DTRITON_COMMON_REPO_TAG=${tritonShortTag} -DTRITON_CORE_REPO_TAG=${tritonShortTag} -DTRITON_THIRD_PARTY_REPO_TAG=${tritonShortTag} -DTRITON_BACKEND_REPO_TAG=${tritonShortTag} -DUSE_CXX11_ABI=ON && make -j${buildJobs} install"
|
||||
|
||||
// Step 3: packaging wheels into tarfile
|
||||
|
||||
@ -1988,7 +1988,7 @@ def launchTestListCheck(pipeline)
|
||||
def llmPath = sh (script: "realpath .", returnStdout: true).trim()
|
||||
def llmSrc = "${llmPath}/TensorRT-LLM/src"
|
||||
trtllm_utils.llmExecStepWithRetry(pipeline, script: "pip3 install -r ${llmSrc}/requirements-dev.txt")
|
||||
sh "NVIDIA_TRITON_SERVER_VERSION=25.10 LLM_ROOT=${llmSrc} LLM_BACKEND_ROOT=${llmSrc}/triton_backend python3 ${llmSrc}/scripts/check_test_list.py --l0 --qa --waive"
|
||||
sh "NVIDIA_TRITON_SERVER_VERSION=25.12 LLM_ROOT=${llmSrc} LLM_BACKEND_ROOT=${llmSrc}/triton_backend python3 ${llmSrc}/scripts/check_test_list.py --l0 --qa --waive"
|
||||
} catch (InterruptedException e) {
|
||||
throw e
|
||||
} catch (Exception e) {
|
||||
@ -3431,7 +3431,9 @@ def launchTestJobs(pipeline, testFilter)
|
||||
]
|
||||
|
||||
aarch64SanityCheckConfigs = [
|
||||
/* //Disable PY312-UB2404 temporarily since lack of official PyTorch for CUDA 13.1.
|
||||
// Workaround PyTorch 2.9.1 vs. 2.10.0a0 incompatibility issue. Once resolved, change back to:
|
||||
// 1. DLFW_IMAGE -> UBUNTU_24_04_IMAGE
|
||||
// 2. Extra PyTorch CUDA install: false -> true
|
||||
"PY312-UB2404": [
|
||||
LLM_DOCKER_IMAGE,
|
||||
"GH200",
|
||||
@ -3440,7 +3442,7 @@ def launchTestJobs(pipeline, testFilter)
|
||||
"",
|
||||
DLFW_IMAGE,
|
||||
false, // Extra PyTorch CUDA 13.0 install
|
||||
],*/
|
||||
],
|
||||
"PY312-DLFW": [
|
||||
LLM_DOCKER_IMAGE,
|
||||
"GH200",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user