mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* refactoring the multimodal input prep Signed-off-by: Rakib Hasan <rhasan@nvidia.com> * adding out-of-tree override option Signed-off-by: Rakib Hasan <rhasan@nvidia.com> * adding exceptional case for llava-next Signed-off-by: Rakib Hasan <rhasan@nvidia.com> * fixing typo Signed-off-by: Rakib Hasan <rhasan@nvidia.com> * addressing review comments, adding placement option, handling tokenizer variations Signed-off-by: Rakib Hasan <rhasan@nvidia.com> * addressing pytest-asyncio behavior change Signed-off-by: Rakib Hasan <rhasan@nvidia.com> --------- Signed-off-by: Rakib Hasan <rhasan@nvidia.com>
31 lines
1.0 KiB
Python
31 lines
1.0 KiB
Python
from .data import PromptInputs, TextPrompt, TokensPrompt, prompt_inputs
|
|
from .registry import (ExtraProcessedInputs, InputProcessor,
|
|
create_input_processor, register_input_processor)
|
|
from .utils import (ALL_SUPPORTED_MULTIMODAL_MODELS, ConversationMessage,
|
|
MultimodalData, MultimodalDataTracker,
|
|
add_multimodal_placeholders, async_load_image,
|
|
async_load_video, default_multimodal_input_loader,
|
|
encode_base64_content_from_url, load_image, load_video)
|
|
|
|
__all__ = [
|
|
"PromptInputs",
|
|
"prompt_inputs",
|
|
"TextPrompt",
|
|
"TokensPrompt",
|
|
"InputProcessor",
|
|
"create_input_processor",
|
|
"register_input_processor",
|
|
"ExtraProcessedInputs",
|
|
"ALL_SUPPORTED_MULTIMODAL_MODELS",
|
|
"ConversationMessage",
|
|
"MultimodalDataTracker",
|
|
"MultimodalData",
|
|
"async_load_image",
|
|
"async_load_video",
|
|
"add_multimodal_placeholders",
|
|
"default_multimodal_input_loader",
|
|
"encode_base64_content_from_url",
|
|
"load_image",
|
|
"load_video",
|
|
]
|