mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* feat: enhance trtllm serve multimodal 1. made the load_image and load_video asynchronous 2. add image_encoded input support to be compatible with genai-perf 3. support text-only on multimodal mdoels(currently, Qwen2-VL & Qwen2.5-VL) Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * add test Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * fix bandit Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * trimming uils Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * trimming for test Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * genai perf command fix Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * command fix Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * refactor chat_utils Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> * stress test genai-perf command Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com> --------- Signed-off-by: yechank <161688079+yechank-nvidia@users.noreply.github.com>
20 lines
543 B
Bash
20 lines
543 B
Bash
#! /usr/bin/env bash
|
|
|
|
genai-perf profile \
|
|
-m Qwen2.5-VL-3B-Instruct \
|
|
--tokenizer Qwen/Qwen2.5-VL-3B-Instruct \
|
|
--endpoint-type multimodal \
|
|
--random-seed 123 \
|
|
--image-width-mean 64 \
|
|
--image-height-mean 64 \
|
|
--image-format png \
|
|
--synthetic-input-tokens-mean 128 \
|
|
--synthetic-input-tokens-stddev 0 \
|
|
--output-tokens-mean 128 \
|
|
--output-tokens-stddev 0 \
|
|
--request-count 5 \
|
|
--request-rate 1 \
|
|
--profile-export-file my_profile_export.json \
|
|
--url localhost:8000 \
|
|
--streaming
|