TensorRT-LLMs/tests/unittest/api_stability/references/llm.yaml

174 lines
5.2 KiB
YAML

methods:
__init__:
parameters:
# Parallelism
gpus_per_node:
annotation: Optional[int]
default: null
moe_cluster_parallel_size:
annotation: Optional[int]
default: null
enable_attention_dp:
annotation: bool
default: False
cp_config:
annotation: Optional[dict]
default: null
# Stats
iter_stats_max_iterations:
annotation: Optional[int]
default: null
request_stats_max_iterations:
annotation: Optional[int]
default: null
# Bindings and mirrored configs
peft_cache_config:
annotation: Optional[tensorrt_llm.llmapi.llm_args.PeftCacheConfig]
default: null
scheduler_config:
annotation: tensorrt_llm.llmapi.llm_args.SchedulerConfig
default: null
cache_transceiver_config:
annotation: Optional[tensorrt_llm.llmapi.llm_args.CacheTransceiverConfig]
default: null
batching_type:
annotation: Optional[tensorrt_llm.llmapi.llm_args.BatchingType]
default: null
normalize_log_probs:
annotation: bool
default: False
gather_generation_logits:
annotation: bool
default: False
num_postprocess_workers:
annotation: int
default: 0
postprocess_tokenizer_dir:
annotation: Optional[str]
default: null
stream_interval:
annotation: int
default: 1
# reasoning
reasoning_parser:
annotation: Optional[str]
default: null
# Runtime behavior
fail_fast_on_attention_window_too_large:
annotation: bool
default: false
garbage_collection_gen0_threshold:
annotation: int
default: 20000
# Misc
backend:
annotation: Optional[str]
default: null
build_config:
annotation: Optional[tensorrt_llm.llmapi.llm_args.BuildConfig]
default: null
cuda_graph_config:
annotation: Optional[tensorrt_llm.llmapi.llm_args.CudaGraphConfig]
default: null
checkpoint_loader:
annotation: Optional[tensorrt_llm._torch.BaseCheckpointLoader]
default: null
checkpoint_format:
annotation: Optional[str]
default: null
disable_overlap_scheduler:
annotation: bool
default: False
moe_config:
annotation: tensorrt_llm.llmapi.llm_args.MoeConfig
default: null
attn_backend:
annotation: str
default: TRTLLM
enable_mixed_sampler:
annotation: bool
default: False
enable_trtllm_sampler:
annotation: bool
default: False
kv_cache_dtype:
annotation: str
default: auto
enable_iter_perf_stats:
annotation: bool
default: False
enable_iter_req_stats:
annotation: bool
default: False
print_iter_log:
annotation: bool
default: False
torch_compile_config:
annotation: Optional[tensorrt_llm.llmapi.llm_args.TorchCompileConfig]
default: null
enable_autotuner:
annotation: bool
default: True
enable_layerwise_nvtx_marker:
annotation: bool
default: False
enable_min_latency:
annotation: bool
default: False
force_dynamic_quantization:
annotation: bool
default: False
allreduce_strategy:
annotation: Optional[Literal['AUTO', 'NCCL', 'UB', 'MINLATENCY', 'ONESHOT', 'TWOSHOT', 'LOWPRECISION', 'MNNVL']]
default: AUTO
return_annotation: None
generate:
parameters:
disaggregated_params:
annotation: Union[tensorrt_llm.disaggregated_params.DisaggregatedParams, Sequence[tensorrt_llm.disaggregated_params.DisaggregatedParams], NoneType]
default: null
kv_cache_retention_config:
annotation: Union[tensorrt_llm.bindings.executor.KvCacheRetentionConfig, Sequence[tensorrt_llm.bindings.executor.KvCacheRetentionConfig], NoneType]
default: null
return_annotation: Union[tensorrt_llm.llmapi.llm.RequestOutput, List[tensorrt_llm.llmapi.llm.RequestOutput]]
generate_async:
parameters:
disaggregated_params:
annotation: Optional[tensorrt_llm.disaggregated_params.DisaggregatedParams]
default: null
kv_cache_retention_config:
annotation: Optional[tensorrt_llm.bindings.executor.KvCacheRetentionConfig]
default: null
return_annotation: tensorrt_llm.llmapi.llm.RequestOutput
get_kv_cache_events:
parameters:
timeout:
annotation: Optional[float]
default: 2
return_annotation: List[dict]
get_kv_cache_events_async:
parameters:
timeout:
annotation: Optional[float]
default: 2
return_annotation: tensorrt_llm.executor.result.IterationResult
get_stats:
parameters:
timeout:
annotation: Optional[float]
default: 2
return_annotation: List[dict]
get_stats_async:
parameters:
timeout:
annotation: Optional[float]
default: 2
return_annotation: tensorrt_llm.executor.result.IterationResult
shutdown:
parameters: {}
return_annotation: None
properties:
llm_id:
annotation: str
default: inspect._empty