TensorRT-LLMs/tests/llmapi/run_llm.py
Kaiyu Xie 75057cd036
Update TensorRT-LLM (#2333)
* Update TensorRT-LLM

---------

Co-authored-by: Puneesh Khanna <puneesh.khanna@tii.ae>
Co-authored-by: Ethan Zhang <26497102+ethnzhng@users.noreply.github.com>
2024-10-15 15:28:40 +08:00

32 lines
910 B
Python

#!/usr/bin/env python3
import os
import click
from tensorrt_llm.llmapi import LLM, KvCacheConfig, SamplingParams
@click.command()
@click.option("--model_dir", type=str, required=True)
@click.option("--tp_size", type=int, required=True)
@click.option("--engine_dir", type=str, default=None)
def main(model_dir: str, tp_size: int, engine_dir: str):
llm = LLM(model_dir,
tensor_parallel_size=tp_size,
kv_cache_config=KvCacheConfig(free_gpu_memory_fraction=0.4))
if engine_dir is not None and os.path.abspath(
engine_dir) != os.path.abspath(model_dir):
llm.save(engine_dir)
sampling_params = SamplingParams(max_tokens=10, end_id=-1)
prompt_token_ids = [45, 12, 13]
for output in llm.generate([prompt_token_ids],
sampling_params=sampling_params):
print(output)
if __name__ == '__main__':
main()