mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
chore: remove extra PYTHONPATH (#4453)
Signed-off-by: Aurelien Chartier <2567591+achartier@users.noreply.github.com>
This commit is contained in:
parent
e1b42be3d1
commit
1681e9fd1e
@ -15,7 +15,6 @@ nvidia-smi
|
||||
pushd $LLM_BACKEND_ROOT
|
||||
source tools/utils.sh
|
||||
TRITON_REPO="triton_repo"
|
||||
export PYTHONPATH=$(pwd):$PYTHONPATH
|
||||
|
||||
kill_triton_server () {
|
||||
pkill -9 -f trtllmExecutorWorker || true
|
||||
|
||||
@ -487,8 +487,7 @@ def test_triton_extensive(tritonserver_test_root, test_name, llm_root):
|
||||
backend_path = os.path.join(llm_root, "triton_backend")
|
||||
run_shell_command(
|
||||
f"cd {backend_path}/ci/L0_backend_trtllm && "
|
||||
f"PYTHONPATH={backend_path}:$PYTHONPATH BACKEND_ROOT={backend_path} bash -ex test.sh",
|
||||
llm_root)
|
||||
f"BACKEND_ROOT={backend_path} bash -ex test.sh", llm_root)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("test_name", ["llmapi-unit-tests"], indirect=True)
|
||||
|
||||
@ -35,7 +35,6 @@ from contextlib import asynccontextmanager
|
||||
import numpy as np
|
||||
import triton_python_backend_utils as pb_utils
|
||||
import yaml
|
||||
# PYTHONPATH needs to include the root directory of all_models to import all_models.helper.helpers properly
|
||||
from helpers import (get_input_tensor_by_name, get_output_config_from_request,
|
||||
get_sampling_params_from_request,
|
||||
get_streaming_from_request)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user