mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* feat: enhance trtllm serve multimodal 1. made the load_image and load_video asynchronous 2. add image_encoded input support to be compatible with genai-perf 3. support text-only on multimodal mdoels(currently, Qwen2-VL & Qwen2.5-VL) Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * add test Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * fix bandit Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * trimming uils Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * trimming for test Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * genai perf command fix Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * command fix Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * refactor chat_utils Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * stress test genai-perf command Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> --------- Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com>
89 lines
2.7 KiB
Bash
89 lines
2.7 KiB
Bash
#! /usr/bin/env bash
|
|
|
|
# SINGLE IMAGE INFERENCE
|
|
curl http://localhost:8000/v1/chat/completions \
|
|
-H "Content-Type: application/json" \
|
|
-d '{
|
|
"model": "Qwen2.5-VL-3B-Instruct",
|
|
"messages":[{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant."
|
|
}, {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "Describe the natural environment in the image."
|
|
},
|
|
{
|
|
"type":"image_url",
|
|
"image_url": {
|
|
"url": "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore.png"
|
|
}
|
|
}
|
|
]
|
|
}],
|
|
"max_tokens": 64,
|
|
"temperature": 0
|
|
}'
|
|
|
|
# MULTI IMAGE INFERENCE
|
|
curl http://localhost:8000/v1/chat/completions \
|
|
-H "Content-Type: application/json" \
|
|
-d '{
|
|
"model": "Qwen2.5-VL-3B-Instruct",
|
|
"messages":[{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant."
|
|
}, {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text":"Tell me the difference between two images"
|
|
},
|
|
{
|
|
"type":"image_url",
|
|
"image_url": {
|
|
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"
|
|
}
|
|
},
|
|
{
|
|
"type":"image_url",
|
|
"image_url": {
|
|
"url": "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore.png"
|
|
}
|
|
}
|
|
]
|
|
}],
|
|
"max_tokens": 64,
|
|
"temperature": 0
|
|
}'
|
|
|
|
# SINGLE VIDEO INFERENCE
|
|
curl http://localhost:8000/v1/chat/completions \
|
|
-H "Content-Type: application/json" \
|
|
-d '{
|
|
"model": "Qwen2.5-VL-3B-Instruct",
|
|
"messages":[{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant."
|
|
}, {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text":"Tell me what you see in the video briefly."
|
|
},
|
|
{
|
|
"type":"video_url",
|
|
"video_url": {
|
|
"url": "https://huggingface.co/datasets/Efficient-Large-Model/VILA-inference-demos/resolve/main/OAI-sora-tokyo-walk.mp4"
|
|
}
|
|
}
|
|
]
|
|
}],
|
|
"max_tokens": 64,
|
|
"temperature": 0
|
|
}'
|