mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-13 22:18:36 +08:00
init
This commit is contained in:
parent
0777969c17
commit
5494710593
@ -412,7 +412,7 @@ class RayExecutor(RpcExecutorMixin, GenerationExecutor):
|
||||
else:
|
||||
raise ValueError(f"No global placement group is found.")
|
||||
|
||||
def _get_from_default(tp_size):
|
||||
def _get_default(tp_size):
|
||||
head_tag = f"node:{self.master_address}"
|
||||
nodes = ray.nodes()
|
||||
gpus_per_node = int(nodes[0]["Resources"].get(
|
||||
@ -453,7 +453,7 @@ class RayExecutor(RpcExecutorMixin, GenerationExecutor):
|
||||
if bundle_indices := os.getenv("TRTLLM_RAY_BUNDLE_INDICES", None):
|
||||
return _get_from_env(bundle_indices)
|
||||
# path 2
|
||||
return _get_from_default(tp_size)
|
||||
return _get_default(tp_size)
|
||||
|
||||
@property
|
||||
def enable_postprocess_parallel(self) -> bool:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user