TensorRT-LLMs/tensorrt_llm/inputs/__init__.py
rakib-hasan 2923eb88a1
[None][fix] Refactoring input prep to allow out-of-tree models (#6497)
Signed-off-by: Rakib Hasan <rhasan@nvidia.com>
2025-08-12 20:29:10 -04:00

45 lines
1.6 KiB
Python

from .data import PromptInputs, TextPrompt, TokensPrompt, prompt_inputs
from .multimodal import MultimodalInput
from .registry import (ExtraProcessedInputs, InputProcessor,
MultimodalPlaceholderMetadata,
MultimodalPlaceholderPlacement, create_input_processor,
create_input_processor_with_hash,
register_input_processor)
from .utils import (ALL_SUPPORTED_AUDIO_MODELS, ALL_SUPPORTED_IMAGE_MODELS,
ALL_SUPPORTED_MULTIMODAL_MODELS, ALL_SUPPORTED_VIDEO_MODELS,
ConversationMessage, MultimodalData, MultimodalDataTracker,
add_multimodal_placeholders, async_load_audio,
async_load_image, async_load_video,
default_multimodal_input_loader,
encode_base64_content_from_url, load_image, load_video)
__all__ = [
"ALL_SUPPORTED_MULTIMODAL_MODELS",
"ALL_SUPPORTED_IMAGE_MODELS",
"ALL_SUPPORTED_VIDEO_MODELS",
"ALL_SUPPORTED_AUDIO_MODELS",
"PromptInputs",
"prompt_inputs",
"TextPrompt",
"TokensPrompt",
"InputProcessor",
"create_input_processor",
"create_input_processor_with_hash",
"register_input_processor",
"ExtraProcessedInputs",
"MultimodalPlaceholderMetadata",
"MultimodalPlaceholderPlacement",
"ConversationMessage",
"MultimodalDataTracker",
"MultimodalData",
"MultimodalInput",
"async_load_audio",
"async_load_image",
"async_load_video",
"add_multimodal_placeholders",
"default_multimodal_input_loader",
"encode_base64_content_from_url",
"load_image",
"load_video",
]