TensorRT-LLMs/tensorrt_llm/inputs/__init__.py
Wanli Jiang 2d2b8bae32
feat: TRTLLM-5574 Add phi-4-multimodal pytorch-backend support (#5644)
Signed-off-by: Wanli Jiang <35160485+Wanli-Jiang@users.noreply.github.com>
2025-07-17 06:30:58 +08:00

37 lines
1.3 KiB
Python

from .data import PromptInputs, TextPrompt, TokensPrompt, prompt_inputs
from .multimodal import MultimodalInput
from .registry import (ExtraProcessedInputs, InputProcessor,
create_input_processor, create_input_processor_with_hash,
register_input_processor)
from .utils import (ALL_SUPPORTED_MULTIMODAL_MODELS, ConversationMessage,
MultimodalData, MultimodalDataTracker,
add_multimodal_placeholders, async_load_audio,
async_load_image, async_load_video,
default_multimodal_input_loader,
encode_base64_content_from_url, load_image, load_video)
__all__ = [
"PromptInputs",
"prompt_inputs",
"TextPrompt",
"TokensPrompt",
"InputProcessor",
"create_input_processor",
"create_input_processor_with_hash",
"register_input_processor",
"ExtraProcessedInputs",
"ALL_SUPPORTED_MULTIMODAL_MODELS",
"ConversationMessage",
"MultimodalDataTracker",
"MultimodalData",
"MultimodalInput",
"async_load_audio",
"async_load_image",
"async_load_video",
"add_multimodal_placeholders",
"default_multimodal_input_loader",
"encode_base64_content_from_url",
"load_image",
"load_video",
]