TensorRT-LLMs/examples/high-level-api/run_quant_examples.py
Kaiyu Xie 035b99e0d0
Update TensorRT-LLM (#1427)
* Update TensorRT-LLM

---------

Co-authored-by: meghagarwal <16129366+megha95@users.noreply.github.com>
2024-04-09 17:03:34 +08:00

24 lines
654 B
Python

#!/usr/bin/env python
import os
import subprocess
import sys
PROMPT = "Tell a story"
LLAMA_MODEL_DIR = sys.argv[1]
EXAMPLES_ROOT = sys.argv[2] if len(sys.argv) > 2 else ""
LLM_EXAMPLES = os.path.join(EXAMPLES_ROOT, 'llm_examples.py')
run_cmd = [
sys.executable, LLM_EXAMPLES, "--task=run_llm_with_quantization",
f"--prompt={PROMPT}", f"--hf_model_dir={LLAMA_MODEL_DIR}",
"--quant_type=int4_awq"
]
subprocess.run(run_cmd, check=True)
run_cmd = [
sys.executable, LLM_EXAMPLES, "--task=run_llm_with_quantization",
f"--prompt={PROMPT}", f"--hf_model_dir={LLAMA_MODEL_DIR}",
"--quant_type=fp8"
]
subprocess.run(run_cmd, check=True)