From ee588a73ac273e38e8b034167343d9ab3e00f671 Mon Sep 17 00:00:00 2001 From: chinamaoge <32806476+chinamaoge@users.noreply.github.com> Date: Thu, 16 Oct 2025 13:37:43 +0800 Subject: [PATCH] =?UTF-8?q?[None][fix]=20Fix=20the=20error=20where=20check?= =?UTF-8?q?point=5Fdir=20is=20assigned=20as=20NONE=20wh=E2=80=A6=20(#8401)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: maoge Co-authored-by: maoge --- tensorrt_llm/_torch/model_config.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tensorrt_llm/_torch/model_config.py b/tensorrt_llm/_torch/model_config.py index aede5b94c2..9590000980 100644 --- a/tensorrt_llm/_torch/model_config.py +++ b/tensorrt_llm/_torch/model_config.py @@ -414,16 +414,23 @@ class ModelConfig(Generic[TConfig]): # Use file lock to prevent race conditions when multiple processes # try to import/cache the same remote model config file with config_file_lock(): - pretrained_config = transformers.AutoConfig.from_pretrained( - checkpoint_dir, - trust_remote_code=trust_remote_code, - ) + # When handling the case where model_format is TLLM_ENGINE + # send cyclic requests to the NONE URL. + if checkpoint_dir is not None: + pretrained_config = transformers.AutoConfig.from_pretrained( + checkpoint_dir, + trust_remote_code=trust_remote_code, + ) - # Find the cache path by looking for the config.json file which should be in all - # huggingface models - model_dir = Path( - transformers.utils.hub.cached_file(checkpoint_dir, - 'config.json')).parent + # Find the cache path by looking for the config.json file which should be in all + # huggingface models + model_dir = Path( + transformers.utils.hub.cached_file(checkpoint_dir, + 'config.json')).parent + else: + raise ValueError( + "checkpoint_dir is None. Cannot load model config without a valid checkpoint directory." + ) quant_config = QuantConfig() layer_quant_config = None