mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-02-12 22:14:03 +08:00
* Update TensorRT-LLM --------- Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Tlntin <TlntinDeng01@Gmail.com> Co-authored-by: ZHENG, Zhen <zhengzhen.z@qq.com> Co-authored-by: Pham Van Ngoan <ngoanpham1196@gmail.com> Co-authored-by: Nathan Price <nathan@abridge.com> Co-authored-by: Tushar Goel <tushar.goel.ml@gmail.com> Co-authored-by: Mati <132419219+matichon-vultureprime@users.noreply.github.com>
18 lines
480 B
Python
18 lines
480 B
Python
from tensorrt_llm.hlapi import LLM, ModelConfig
|
|
from tensorrt_llm.hlapi.utils import download_hf_model
|
|
|
|
prompts = ["A B C"]
|
|
|
|
|
|
def test_download_hf_model():
|
|
dir = download_hf_model("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
|
assert dir.exists()
|
|
print(f"Downloaded model to {dir}")
|
|
|
|
|
|
def test_llm_with_model_downloaded():
|
|
config = ModelConfig(model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
|
llm = LLM(config)
|
|
for output in llm.generate(prompts):
|
|
print(output)
|