TensorRT-LLMs/tensorrt_llm/llmapi/__init__.py
Yan Chunwei 9bd42ecf9b
[TRTLLM-5208][BREAKING CHANGE] chore: make pytorch LLM the default (#5312)
Signed-off-by: Superjomn <328693+Superjomn@users.noreply.github.com>
2025-06-20 03:01:10 +08:00

54 lines
1.7 KiB
Python

from ..disaggregated_params import DisaggregatedParams
from ..executor import CompletionOutput, RequestError
from ..sampling_params import GuidedDecodingParams, SamplingParams
from .build_cache import BuildCacheConfig
from .llm import LLM, RequestOutput
# yapf: disable
from .llm_args import (BatchingType, CacheTransceiverConfig, CalibConfig,
CapacitySchedulerPolicy, ContextChunkingPolicy,
DraftTargetDecodingConfig, DynamicBatchConfig,
EagleDecodingConfig, ExtendedRuntimePerfKnobConfig,
KvCacheConfig, LlmArgs, LookaheadDecodingConfig,
MedusaDecodingConfig, MTPDecodingConfig,
NGramDecodingConfig, SchedulerConfig, TorchCompileConfig,
TorchLlmArgs, TrtLlmArgs)
# yapf: enable
from .llm_utils import (BuildConfig, KvCacheRetentionConfig, QuantAlgo,
QuantConfig)
from .mpi_session import MpiCommSession
__all__ = [
'LLM',
'CompletionOutput',
'RequestOutput',
'GuidedDecodingParams',
'SamplingParams',
'DisaggregatedParams',
'KvCacheConfig',
'KvCacheRetentionConfig',
'LookaheadDecodingConfig',
'MedusaDecodingConfig',
'EagleDecodingConfig',
'MTPDecodingConfig',
'SchedulerConfig',
'CapacitySchedulerPolicy',
'BuildConfig',
'QuantConfig',
'QuantAlgo',
'CalibConfig',
'BuildCacheConfig',
'RequestError',
'MpiCommSession',
'ExtendedRuntimePerfKnobConfig',
'BatchingType',
'ContextChunkingPolicy',
'DynamicBatchConfig',
'CacheTransceiverConfig',
'NGramDecodingConfig',
'TorchCompileConfig',
'DraftTargetDecodingConfig',
'LlmArgs',
'TorchLlmArgs',
'TrtLlmArgs',
]