mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
349 lines
33 KiB
HTML
349 lines
33 KiB
HTML
<!DOCTYPE html>
|
||
<html class="writer-html5" lang="en" data-content_root="../">
|
||
<head>
|
||
<meta charset="utf-8" /><meta name="viewport" content="width=device-width, initial-scale=1" />
|
||
|
||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||
<title>Common Customizations — tensorrt_llm documentation</title>
|
||
<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=80d5e7a1" />
|
||
<link rel="stylesheet" type="text/css" href="../_static/css/theme.css?v=19f00094" />
|
||
|
||
|
||
<!--[if lt IE 9]>
|
||
<script src="../_static/js/html5shiv.min.js"></script>
|
||
<![endif]-->
|
||
|
||
<script src="../_static/jquery.js?v=5d32c60e"></script>
|
||
<script src="../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||
<script src="../_static/documentation_options.js?v=5929fcd5"></script>
|
||
<script src="../_static/doctools.js?v=888ff710"></script>
|
||
<script src="../_static/sphinx_highlight.js?v=dc90522c"></script>
|
||
<script src="../_static/js/theme.js"></script>
|
||
<link rel="index" title="Index" href="../genindex.html" />
|
||
<link rel="search" title="Search" href="../search.html" />
|
||
<link rel="next" title="Examples" href="llm_api_examples.html" />
|
||
<link rel="prev" title="LLM Examples Introduction" href="index.html" />
|
||
</head>
|
||
|
||
<body class="wy-body-for-nav">
|
||
<div class="wy-grid-for-nav">
|
||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||
<div class="wy-side-scroll">
|
||
<div class="wy-side-nav-search" >
|
||
|
||
|
||
|
||
<a href="../index.html" class="icon icon-home">
|
||
tensorrt_llm
|
||
</a>
|
||
<div role="search">
|
||
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
|
||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||
<input type="hidden" name="check_keywords" value="yes" />
|
||
<input type="hidden" name="area" value="default" />
|
||
</form>
|
||
</div>
|
||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||
<p class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../overview.html">Overview</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../quick-start-guide.html">Quick Start Guide</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../key-features.html">Key Features</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../release-notes.html">Release Notes</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Installation</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/linux.html">Installing on Linux</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/windows.html">Installing on Windows</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-windows.html">Building from Source Code on Windows</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">LLM API Examples</span></p>
|
||
<ul class="current">
|
||
<li class="toctree-l1"><a class="reference internal" href="index.html">LLM Examples Introduction</a></li>
|
||
<li class="toctree-l1 current"><a class="current reference internal" href="#">Common Customizations</a><ul>
|
||
<li class="toctree-l2"><a class="reference internal" href="#quantization">Quantization</a></li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#sampling">Sampling</a></li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#build-configuration">Build Configuration</a></li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#runtime-customization">Runtime Customization</a></li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#tokenizer-customization">Tokenizer Customization</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#disable-tokenizer">Disable Tokenizer</a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#generation">Generation</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#asyncio-based-generation">Asyncio-Based Generation</a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#future-style-generation">Future-Style Generation</a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l1"><a class="reference internal" href="llm_api_examples.html">Examples</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">LLM API</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api/index.html">API Reference</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Model Definition API</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.layers.html">Layers</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.functional.html">Functionals</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.models.html">Models</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.plugin.html">Plugin</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.quantization.html">Quantization</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.runtime.html">Runtime</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/executor.html">Executor</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/runtime.html">Runtime</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Architecture</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/overview.html">TensorRT-LLM Architecture</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html">Model Definition</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#compilation">Compilation</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#runtime">Runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#multi-gpu-and-multi-node-support">Multi-GPU and Multi-Node Support</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/add-model.html">Adding a Model</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Advanced</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/gpt-runtime.html">C++ GPT Runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/graph-rewriting.html">Graph Rewriting Module</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/batch-manager.html">The Batch Manager in TensorRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/inference-request.html">Inference Request</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/inference-request.html#responses">Responses</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/lora.html">Run gpt-2b + LoRA using GptManager / cpp runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Performance</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-overview.html">Overview</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-best-practices.html">Best Practices for Tuning the Performance of TensorRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-analysis.html">Performance Analysis</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Reference</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/troubleshooting.html">Troubleshooting</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Blogs</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</div>
|
||
</nav>
|
||
|
||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||
<a href="../index.html">tensorrt_llm</a>
|
||
</nav>
|
||
|
||
<div class="wy-nav-content">
|
||
<div class="rst-content">
|
||
<div role="navigation" aria-label="Page navigation">
|
||
<ul class="wy-breadcrumbs">
|
||
<li><a href="../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||
<li class="breadcrumb-item active">Common Customizations</li>
|
||
<li class="wy-breadcrumbs-aside">
|
||
<a href="../_sources/llm-api-examples/customization.md.txt" rel="nofollow"> View page source</a>
|
||
</li>
|
||
</ul>
|
||
<hr/>
|
||
</div>
|
||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||
<div itemprop="articleBody">
|
||
|
||
<section id="common-customizations">
|
||
<h1>Common Customizations<a class="headerlink" href="#common-customizations" title="Link to this heading"></a></h1>
|
||
<section id="quantization">
|
||
<h2>Quantization<a class="headerlink" href="#quantization" title="Link to this heading"></a></h2>
|
||
<p>TensorRT-LLM can quantize the Hugging Face model automatically. By setting the appropriate flags in the <code class="docutils literal notranslate"><span class="pre">LLM</span></code> instance. For example, to perform an Int4 AWQ quantization, the following code triggers the model quantization. Please refer to complete list of <a class="reference external" href="https://nvidia.github.io/TensorRT-LLM/_modules/tensorrt_llm/quantization/mode.html#QuantAlgo">supported flags</a> and acceptable values.</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">tensorrt_llm.hlapi</span> <span class="kn">import</span> <span class="n">QuantConfig</span><span class="p">,</span> <span class="n">QuantAlgo</span>
|
||
|
||
<span class="n">quant_config</span> <span class="o">=</span> <span class="n">QuantConfig</span><span class="p">(</span><span class="n">quant_algo</span><span class="o">=</span><span class="n">QuantAlgo</span><span class="o">.</span><span class="n">W4A16_AWQ</span><span class="p">)</span>
|
||
|
||
<span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">model</span><span class="o">-</span><span class="nb">dir</span><span class="o">></span><span class="p">,</span> <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
</section>
|
||
<section id="sampling">
|
||
<h2>Sampling<a class="headerlink" href="#sampling" title="Link to this heading"></a></h2>
|
||
<p>SamplingParams can customize the sampling strategy to control LLM generated responses, such as beam search, temperature, and <a class="reference external" href="https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/hlapi/utils.py#L55-L76">others</a>.</p>
|
||
<p>As an example, to enable beam search with a beam size of 4, set the <code class="docutils literal notranslate"><span class="pre">sampling_params</span></code> as follows:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">tensorrt_llm.hlapi</span> <span class="kn">import</span> <span class="n">LLM</span><span class="p">,</span> <span class="n">SamplingParams</span><span class="p">,</span> <span class="n">BuildConfig</span>
|
||
|
||
<span class="n">build_config</span> <span class="o">=</span> <span class="n">BuildConfig</span><span class="p">()</span>
|
||
<span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="mi">4</span>
|
||
|
||
<span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">,</span> <span class="n">build_config</span><span class="o">=</span><span class="n">build_config</span><span class="p">)</span>
|
||
<span class="c1"># Let the LLM object generate text with the default sampling strategy, or</span>
|
||
<span class="c1"># you can create a SamplingParams object as well with several fields set manually</span>
|
||
<span class="n">sampling_params</span> <span class="o">=</span> <span class="n">SamplingParams</span><span class="p">(</span><span class="n">beam_width</span><span class="o">=</span><span class="mi">4</span><span class="p">)</span> <span class="c1"># current limitation: beam_width should be equal to max_beam_width</span>
|
||
|
||
<span class="k">for</span> <span class="n">output</span> <span class="ow">in</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate</span><span class="p">(</span><span class="o"><</span><span class="n">prompt</span><span class="o">></span><span class="p">,</span> <span class="n">sampling_params</span><span class="o">=</span><span class="n">sampling_params</span><span class="p">):</span>
|
||
<span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p><code class="docutils literal notranslate"><span class="pre">SamplingParams</span></code> manages and dispatches fields to C++ classes including:</p>
|
||
<ul class="simple">
|
||
<li><p><a class="reference external" href="https://nvidia.github.io/TensorRT-LLM/_cpp_gen/runtime.html#_CPPv4N12tensorrt_llm7runtime14SamplingConfigE">SamplingConfig</a></p></li>
|
||
<li><p><a class="reference external" href="https://nvidia.github.io/TensorRT-LLM/_cpp_gen/executor.html#_CPPv4N12tensorrt_llm8executor12OutputConfigE">OutputConfig</a></p></li>
|
||
</ul>
|
||
<p>Refer to the <a class="reference external" href="https://nvidia.github.io/TensorRT-LLM/llm-api/index.html#tensorrt_llm.hlapi.SamplingParams">class documentation</a> for more details.</p>
|
||
</section>
|
||
<section id="build-configuration">
|
||
<h2>Build Configuration<a class="headerlink" href="#build-configuration" title="Link to this heading"></a></h2>
|
||
<p>Apart from the arguments mentioned above, you can also customize the build configuration with the <code class="docutils literal notranslate"><span class="pre">build_config</span></code> class and other arguments borrowed from the trtllm-build CLI. These build configuration options provide flexibility in building engines for the target hardware and use cases. Refer to the following example:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">model</span><span class="o">-</span><span class="n">path</span><span class="o">></span><span class="p">,</span>
|
||
<span class="n">build_config</span><span class="o">=</span><span class="n">BuildConfig</span><span class="p">(</span>
|
||
<span class="n">max_new_tokens</span><span class="o">=</span><span class="mi">4096</span><span class="p">,</span>
|
||
<span class="n">max_batch_size</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
|
||
<span class="n">max_beam_width</span><span class="o">=</span><span class="mi">4</span><span class="p">))</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>Refer to the <a class="reference external" href="https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/builder.py#L470-L501">buildconfig documentation</a> for more details.</p>
|
||
</section>
|
||
<section id="runtime-customization">
|
||
<h2>Runtime Customization<a class="headerlink" href="#runtime-customization" title="Link to this heading"></a></h2>
|
||
<p>Similar to <code class="docutils literal notranslate"><span class="pre">build_config</span></code>, you can also customize the runtime configuration with the <code class="docutils literal notranslate"><span class="pre">runtime_config</span></code>, <code class="docutils literal notranslate"><span class="pre">peft_cache_config</span></code> or other <a class="reference external" href="https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/hlapi/llm_utils.py#L186-L223">arguments</a> borrowed from the lower-level APIs. These runtime configuration options provide additional flexibility with respect to KV cache management, GPU memory allocation and so on. Refer to the following example:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">tensorrt_llm.hlapi</span> <span class="kn">import</span> <span class="n">LLM</span><span class="p">,</span> <span class="n">KvCacheConfig</span>
|
||
|
||
<span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">,</span>
|
||
<span class="n">kv_cache_config</span><span class="o">=</span><span class="n">KvCacheConfig</span><span class="p">(</span>
|
||
<span class="n">max_new_tokens</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span>
|
||
<span class="n">free_gpu_memory_fraction</span><span class="o">=</span><span class="mf">0.8</span><span class="p">))</span>
|
||
</pre></div>
|
||
</div>
|
||
</section>
|
||
<section id="tokenizer-customization">
|
||
<h2>Tokenizer Customization<a class="headerlink" href="#tokenizer-customization" title="Link to this heading"></a></h2>
|
||
<p>By default, the high-level API uses transformers’ <code class="docutils literal notranslate"><span class="pre">AutoTokenizer</span></code>. You can override it with your own tokenizer by passing it when creating the LLM object. Refer to the following example:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">,</span> <span class="n">tokenizer</span><span class="o">=<</span><span class="n">my_faster_one</span><span class="o">></span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>The LLM() workflow should use your tokenizer instead.</p>
|
||
<p>It is also possible to input token IDs directly without <code class="docutils literal notranslate"><span class="pre">Tokenizers</span></code> with the following code. The code produces token IDs without text because the tokenizer is not used.</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">)</span>
|
||
|
||
<span class="k">for</span> <span class="n">output</span> <span class="ow">in</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate</span><span class="p">([</span><span class="mi">32</span><span class="p">,</span> <span class="mi">12</span><span class="p">]):</span>
|
||
<span class="o">...</span>
|
||
</pre></div>
|
||
</div>
|
||
<section id="disable-tokenizer">
|
||
<h3>Disable Tokenizer<a class="headerlink" href="#disable-tokenizer" title="Link to this heading"></a></h3>
|
||
<p>For performance considerations, you can disable the tokenizer by passing <code class="docutils literal notranslate"><span class="pre">skip_tokenizer_init=True</span></code> when creating <code class="docutils literal notranslate"><span class="pre">LLM</span></code>. In this case, <code class="docutils literal notranslate"><span class="pre">LLM.generate</span></code> and <code class="docutils literal notranslate"><span class="pre">LLM.generate_async</span></code> will expect prompt token ids as input. Refer to the following example:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="o"><</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">)</span>
|
||
<span class="k">for</span> <span class="n">output</span> <span class="ow">in</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate</span><span class="p">([[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">12</span><span class="p">]],</span> <span class="n">skip_tokenizer_init</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
|
||
<span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>You will get something like:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">RequestOutput</span><span class="p">(</span><span class="n">request_id</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">prompt</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">prompt_token_ids</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">15043</span><span class="p">,</span> <span class="mi">29892</span><span class="p">,</span> <span class="mi">590</span><span class="p">,</span> <span class="mi">1024</span><span class="p">,</span> <span class="mi">338</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="n">CompletionOutput</span><span class="p">(</span><span class="n">index</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">text</span><span class="o">=</span><span class="s1">''</span><span class="p">,</span> <span class="n">token_ids</span><span class="o">=</span><span class="p">[</span><span class="mi">518</span><span class="p">,</span> <span class="mi">10858</span><span class="p">,</span> <span class="mi">4408</span><span class="p">,</span> <span class="mi">29962</span><span class="p">,</span> <span class="mi">322</span><span class="p">,</span> <span class="mi">306</span><span class="p">,</span> <span class="mi">626</span><span class="p">,</span> <span class="mi">263</span><span class="p">,</span> <span class="mi">518</span><span class="p">,</span> <span class="mi">10858</span><span class="p">,</span> <span class="mi">20627</span><span class="p">,</span> <span class="mi">29962</span><span class="p">,</span> <span class="mi">472</span><span class="p">,</span> <span class="mi">518</span><span class="p">,</span> <span class="mi">10858</span><span class="p">,</span> <span class="mi">6938</span><span class="p">,</span> <span class="mi">1822</span><span class="p">,</span> <span class="mi">306</span><span class="p">,</span> <span class="mi">626</span><span class="p">,</span> <span class="mi">5007</span><span class="p">,</span> <span class="mi">304</span><span class="p">,</span> <span class="mi">4653</span><span class="p">,</span> <span class="mi">590</span><span class="p">,</span> <span class="mi">4066</span><span class="p">,</span> <span class="mi">297</span><span class="p">,</span> <span class="mi">278</span><span class="p">,</span> <span class="mi">518</span><span class="p">,</span> <span class="mi">11947</span><span class="p">,</span> <span class="mi">18527</span><span class="p">,</span> <span class="mi">29962</span><span class="p">,</span> <span class="mi">2602</span><span class="p">,</span> <span class="mi">472</span><span class="p">],</span> <span class="n">cumulative_logprob</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">logprobs</span><span class="o">=</span><span class="p">[])],</span> <span class="n">finished</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>Note that the <code class="docutils literal notranslate"><span class="pre">text</span></code> field in <code class="docutils literal notranslate"><span class="pre">CompletionOutput</span></code> is empty since the tokenizer is deactivated.</p>
|
||
</section>
|
||
</section>
|
||
<section id="generation">
|
||
<h2>Generation<a class="headerlink" href="#generation" title="Link to this heading"></a></h2>
|
||
<section id="asyncio-based-generation">
|
||
<h3>Asyncio-Based Generation<a class="headerlink" href="#asyncio-based-generation" title="Link to this heading"></a></h3>
|
||
<p>With the LLM API, you can also perform asynchronous generation with the <code class="docutils literal notranslate"><span class="pre">generate_async</span></code> method. Refer to the following example:</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">llm</span> <span class="o">=</span> <span class="n">LLM</span><span class="p">(</span><span class="n">model</span><span class="o">=<</span><span class="n">llama_model_path</span><span class="o">></span><span class="p">)</span>
|
||
|
||
<span class="k">async</span> <span class="k">for</span> <span class="n">output</span> <span class="ow">in</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate_async</span><span class="p">(</span><span class="o"><</span><span class="n">prompt</span><span class="o">></span><span class="p">,</span> <span class="n">streaming</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
|
||
<span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>When the <code class="docutils literal notranslate"><span class="pre">streaming</span></code> flag is set to <code class="docutils literal notranslate"><span class="pre">True</span></code>, the <code class="docutils literal notranslate"><span class="pre">generate_async</span></code> method will return a generator that yields each token as soon as it is available. Otherwise, it returns a generator that wait for and yields only the final results.</p>
|
||
</section>
|
||
<section id="future-style-generation">
|
||
<h3>Future-Style Generation<a class="headerlink" href="#future-style-generation" title="Link to this heading"></a></h3>
|
||
<p>The result of the <code class="docutils literal notranslate"><span class="pre">generate_async</span></code> method is a <a class="reference external" href="https://docs.python.org/3/library/asyncio-future.html#asyncio.Future">Future-like</a> object, it doesn’t block the thread unless the <code class="docutils literal notranslate"><span class="pre">.result()</span></code> is called.</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># This will not block the main thread</span>
|
||
<span class="n">generation</span> <span class="o">=</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate_async</span><span class="p">(</span><span class="o"><</span><span class="n">prompt</span><span class="o">></span><span class="p">)</span>
|
||
<span class="c1"># Do something else here</span>
|
||
<span class="c1"># call .result() to explicitly block the main thread and wait for the result when needed</span>
|
||
<span class="n">output</span> <span class="o">=</span> <span class="n">generation</span><span class="o">.</span><span class="n">result</span><span class="p">()</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>The <code class="docutils literal notranslate"><span class="pre">.result()</span></code> method works like the <a class="reference external" href="https://docs.python.org/zh-cn/3/library/asyncio-future.html#asyncio.Future.result">result</a> method in the Python Future, you can specify a timeout to wait for the result.</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">output</span> <span class="o">=</span> <span class="n">generation</span><span class="o">.</span><span class="n">result</span><span class="p">(</span><span class="n">timeout</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
|
||
</pre></div>
|
||
</div>
|
||
<p>There is an async version, where the <code class="docutils literal notranslate"><span class="pre">.aresult()</span></code> is used.</p>
|
||
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">generation</span> <span class="o">=</span> <span class="n">llm</span><span class="o">.</span><span class="n">generate_async</span><span class="p">(</span><span class="o"><</span><span class="n">prompt</span><span class="o">></span><span class="p">)</span>
|
||
<span class="n">output</span> <span class="o">=</span> <span class="k">await</span> <span class="n">generation</span><span class="o">.</span><span class="n">aresult</span><span class="p">()</span>
|
||
</pre></div>
|
||
</div>
|
||
</section>
|
||
</section>
|
||
</section>
|
||
|
||
|
||
</div>
|
||
</div>
|
||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||
<a href="index.html" class="btn btn-neutral float-left" title="LLM Examples Introduction" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||
<a href="llm_api_examples.html" class="btn btn-neutral float-right" title="Examples" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||
</div>
|
||
|
||
<hr/>
|
||
|
||
<div role="contentinfo">
|
||
<jinja2.runtime.BlockReference object at 0x7f4253999ed0>
|
||
|
||
<div class="footer">
|
||
<p>
|
||
Copyright © 2024 NVIDIA Corporation
|
||
</p>
|
||
<p>
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-policy/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Privacy Policy</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-center/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Manage My Privacy</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/preferences/start/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Do Not Sell or Share My Data</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/terms-of-service/" target="_blank"
|
||
rel="noopener" data-cms-ai="0">Terms of Service</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/accessibility/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Accessibility</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/company-policies/" target="_blank"
|
||
rel="noopener" data-cms-ai="0">Corporate Policies</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/product-security/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Product Security</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/contact/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Contact</a>
|
||
</p>
|
||
</div>
|
||
|
||
|
||
</div>
|
||
|
||
|
||
|
||
</footer>
|
||
</div>
|
||
</div>
|
||
</section>
|
||
</div>
|
||
<script>
|
||
jQuery(function () {
|
||
SphinxRtdTheme.Navigation.enable(true);
|
||
});
|
||
</script>
|
||
|
||
</body>
|
||
</html> |