[TRTLLM-10561][infra] Fix jaraco-context and wheel vulnerability (#10901)

Signed-off-by: Yiqing Yan <yiqingy@nvidia.com>
This commit is contained in:
Yiqing Yan 2026-02-03 09:54:11 +08:00 committed by GitHub
parent 897eb0df2b
commit 13420178fc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 28 additions and 13 deletions

View File

@ -1,4 +1,6 @@
# These vulnerabilities were inherited from the base image (pytorch:25.10-py3) and should be removed when the base image
# These vulnerabilities were inherited from the base image (pytorch:25.12-py3) and should be removed when the base image
# is updated.
# WAR against https://github.com/advisories/GHSA-38jv-5279-wg99
urllib3>=2.6.3
# WAR against https://github.com/advisories/GHSA-8rrh-rw8j-w5fx
wheel>=0.46.2

View File

@ -75,7 +75,7 @@ RUN GITHUB_MIRROR=${GITHUB_MIRROR} \
COPY constraints.txt /tmp/constraints.txt
RUN pip3 install --no-cache-dir -r /tmp/constraints.txt && rm /tmp/constraints.txt
# Remove nbconvert to avoid the nbconvert vulnerability issue in the base NGC PyTorch image.
# Remove nbconvert to avoid https://github.com/advisories/GHSA-xm59-rqc7-hhvf in the base NGC PyTorch image.
RUN pip3 uninstall -y nbconvert || true
# Install UCX, NIXL, etcd
@ -85,7 +85,13 @@ RUN GITHUB_MIRROR=${GITHUB_MIRROR} bash ./install_ucx.sh && \
bash ./install_etcd.sh && \
rm install_ucx.sh && \
rm install_nixl.sh && \
rm install_etcd.sh
rm install_etcd.sh && \
rm -rf /root/.cache/pip && \
rm -rf /root/.cache/uv/archive-v0 && \
# WAR against https://github.com/advisories/GHSA-58pv-8j8x-9vj2
rm -rf /usr/local/lib/python3.12/dist-packages/setuptools/_vendor/jaraco.context-5.3.0.dist-info && \
# WAR against https://github.com/advisories/GHSA-8rrh-rw8j-w5fx
rm -rf /usr/local/lib/python3.12/dist-packages/setuptools/_vendor/wheel-0.45.1.dist-info
# Generate OSS attribution file for devel image
ARG TRT_LLM_VER
@ -178,7 +184,12 @@ RUN chmod -R a+w examples && \
benchmarks/cpp/gptManagerBenchmark.cpp \
benchmarks/cpp/disaggServerBenchmark.cpp \
benchmarks/cpp/CMakeLists.txt && \
rm -rf /root/.cache/pip
rm -rf /root/.cache/pip && \
rm -rf /root/.cache/uv/archive-v0 && \
# WAR against https://github.com/advisories/GHSA-58pv-8j8x-9vj2
rm -rf /usr/local/lib/python3.12/dist-packages/setuptools/_vendor/jaraco.context-5.3.0.dist-info && \
# WAR against https://github.com/advisories/GHSA-8rrh-rw8j-w5fx
rm -rf /usr/local/lib/python3.12/dist-packages/setuptools/_vendor/wheel-0.45.1.dist-info
ARG GIT_COMMIT
ARG TRT_LLM_VER

View File

@ -37,7 +37,7 @@
Once all prerequisites are in place, TensorRT LLM can be installed as follows:
```bash
pip3 install --upgrade pip setuptools && pip3 install tensorrt_llm
pip3 install --ignore-installed pip setuptools wheel && pip3 install tensorrt_llm
```
> **Note:** The TensorRT LLM wheel on PyPI is built with PyTorch 2.9.1. This version may be incompatible with the NVIDIA NGC PyTorch 25.12 container, which uses a more recent PyTorch build from the main branch. If you are using this container or a similar environment, please install the pre-built wheel located at `/app/tensorrt_llm` inside the TensorRT LLM NGC Release container instead.
@ -75,5 +75,5 @@ There are some known limitations when you pip install pre-built TensorRT LLM whe
```bash
CURRENT_TORCH_VERSION=$(python3 -c "import torch; print(torch.__version__)")
echo "torch==$CURRENT_TORCH_VERSION" > /tmp/torch-constraint.txt
pip3 install --upgrade pip setuptools && pip3 install tensorrt_llm -c /tmp/torch-constraint.txt
pip3 install --ignore-installed pip setuptools wheel && pip3 install tensorrt_llm -c /tmp/torch-constraint.txt
```

View File

@ -13,7 +13,7 @@
# images are adopted from PostMerge pipelines, the abbreviated commit hash is used instead.
IMAGE_NAME=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm
LLM_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.12-py3-x86_64-ubuntu24.04-trt10.14.1.48-skip-tritondevel-202601281024-10117
LLM_SBSA_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.12-py3-aarch64-ubuntu24.04-trt10.14.1.48-skip-tritondevel-202601281024-10117
LLM_ROCKYLINUX8_PY310_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.1.0-devel-rocky8-x86_64-rocky8-py310-trt10.14.1.48-skip-tritondevel-202601281024-10117
LLM_ROCKYLINUX8_PY312_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.1.0-devel-rocky8-x86_64-rocky8-py312-trt10.14.1.48-skip-tritondevel-202601281024-10117
LLM_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.12-py3-x86_64-ubuntu24.04-trt10.14.1.48-skip-tritondevel-202602011118-10901
LLM_SBSA_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.12-py3-aarch64-ubuntu24.04-trt10.14.1.48-skip-tritondevel-202602011118-10901
LLM_ROCKYLINUX8_PY310_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.1.0-devel-rocky8-x86_64-rocky8-py310-trt10.14.1.48-skip-tritondevel-202602011118-10901
LLM_ROCKYLINUX8_PY312_DOCKER_IMAGE=urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.1.0-devel-rocky8-x86_64-rocky8-py312-trt10.14.1.48-skip-tritondevel-202602011118-10901

View File

@ -37,7 +37,6 @@ pydantic>=2.9.1
pydantic-settings[yaml]
omegaconf
pillow
wheel<=0.45.1
optimum
# evaluate needs datasets>=2.0.0 which triggers datasets>3.1.0 which is not stable: https://github.com/huggingface/datasets/issues/7467
datasets==3.1.0

View File

@ -183,8 +183,11 @@ def test_pip_install():
subprocess.check_call("apt-get -y install libzmq3-dev", shell=True)
subprocess.check_call("apt-get -y install python3-pip", shell=True)
subprocess.check_call("pip3 install --upgrade pip || true", shell=True)
subprocess.check_call("pip3 install --upgrade setuptools || true",
subprocess.check_call("pip3 install --ignore-installed pip || true",
shell=True)
subprocess.check_call("pip3 install --ignore-installed setuptools || true",
shell=True)
subprocess.check_call("pip3 install --ignore-installed wheel || true",
shell=True)
download_wheel(args)