Update GitHub pages in root to v0.21.0rc2

This commit is contained in:
Kaiyu Xie 2025-06-18 05:57:03 +00:00
parent f176d4d421
commit abcebb8ffa
192 changed files with 4969 additions and 1892 deletions

View File

@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 6d408ca198781361fe3feb19254966dc
config: 8e10976759c98fbc1fa1e519991f5ea4
tags: 645f666f9bcd5a90fca523b33c5a78b7

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1991,6 +1992,28 @@
<span id="_CPPv3N12tensorrt_llm8executor13Serialization14serializedSizeERK18PromptTuningConfig"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization14serializedSizeERK18PromptTuningConfig"></span><span id="tensorrt_llm::executor::Serialization::serializedSize__PromptTuningConfigCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1a64114e901f6976ad2ede341a4ce46623"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serializedSize</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor18PromptTuningConfigE" title="tensorrt_llm::executor::PromptTuningConfig"><span class="n"><span class="pre">PromptTuningConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">config</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK18PromptTuningConfig" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization26deserializeMultimodalInputERNSt7istreamE">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization26deserializeMultimodalInputERNSt7istreamE"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization26deserializeMultimodalInputERNSt7istreamE"></span><span id="tensorrt_llm::executor::Serialization::deserializeMultimodalInput__isR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1ac0e4bdab5f93ebfb2b738106cbc337c9"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">deserializeMultimodalInput</span></span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">istream</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">is</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization26deserializeMultimodalInputERNSt7istreamE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK15MultimodalInputRNSt7ostreamE">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization9serializeERK15MultimodalInputRNSt7ostreamE"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization9serializeERK15MultimodalInputRNSt7ostreamE"></span><span id="tensorrt_llm::executor::Serialization::serialize__MultimodalInputCR.osR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1a4000bf5bdd80377efb5d22068e08f822"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serialize</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">multimodalInput</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">ostream</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">os</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK15MultimodalInputRNSt7ostreamE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK15MultimodalInput">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization14serializedSizeERK15MultimodalInput"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization14serializedSizeERK15MultimodalInput"></span><span id="tensorrt_llm::executor::Serialization::serializedSize__MultimodalInputCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1aefd676a92e5574e4ac379ce5c074e053"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serializedSize</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">multimodalInput</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK15MultimodalInput" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization22deserializeMropeConfigERNSt7istreamE">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization22deserializeMropeConfigERNSt7istreamE"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization22deserializeMropeConfigERNSt7istreamE"></span><span id="tensorrt_llm::executor::Serialization::deserializeMropeConfig__isR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1ab8a512a4577e8df9c91b4fab9bed3ed5"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor11MropeConfigE" title="tensorrt_llm::executor::MropeConfig"><span class="n"><span class="pre">MropeConfig</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">deserializeMropeConfig</span></span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">istream</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">is</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization22deserializeMropeConfigERNSt7istreamE" title="Link to this definition">#</a><br /></dt>
@ -8265,6 +8288,74 @@
</div>
</dd></dl>
<dl class="cpp class">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor15MultimodalInputE">
<span id="_CPPv3N12tensorrt_llm8executor15MultimodalInputE"></span><span id="_CPPv2N12tensorrt_llm8executor15MultimodalInputE"></span><span id="tensorrt_llm::executor::MultimodalInput"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput"></span><span class="k"><span class="pre">class</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">MultimodalInput</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="Link to this definition">#</a><br /></dt>
<dd><div class="docutils container">
<em>#include &lt;executor.h&gt;</em></div>
<p>Multimodal input data class. </p>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor15MultimodalInput15MultimodalInputENSt6vectorINSt6vectorI10SizeType32EEEENSt6vectorI10SizeType32EENSt6vectorI10SizeType32EE">
<span id="_CPPv3N12tensorrt_llm8executor15MultimodalInput15MultimodalInputENSt6vectorINSt6vectorI10SizeType32EEEENSt6vectorI10SizeType32EENSt6vectorI10SizeType32EE"></span><span id="_CPPv2N12tensorrt_llm8executor15MultimodalInput15MultimodalInputENSt6vectorINSt6vectorI10SizeType32EEEENSt6vectorI10SizeType32EENSt6vectorI10SizeType32EE"></span><span id="tensorrt_llm::executor::MultimodalInput::MultimodalInput__std::vector:std::vector:SizeType32::.std::vector:SizeType32:.std::vector:SizeType32:"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1ae2fdf1d8dd612dfe4193d692c175292a"></span><span class="k"><span class="pre">explicit</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">MultimodalInput</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">multimodalHashes</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">multimodalPositions</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">multimodalLengths</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput15MultimodalInputENSt6vectorINSt6vectorI10SizeType32EEEENSt6vectorI10SizeType32EENSt6vectorI10SizeType32EE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor15MultimodalInput19getMultimodalHashesEv">
<span id="_CPPv3NK12tensorrt_llm8executor15MultimodalInput19getMultimodalHashesEv"></span><span id="_CPPv2NK12tensorrt_llm8executor15MultimodalInput19getMultimodalHashesEv"></span><span id="tensorrt_llm::executor::MultimodalInput::getMultimodalHashesC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1a69a1998b196e3234d40f73cb77f08dcb"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getMultimodalHashes</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput19getMultimodalHashesEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor15MultimodalInput22getMultimodalPositionsEv">
<span id="_CPPv3NK12tensorrt_llm8executor15MultimodalInput22getMultimodalPositionsEv"></span><span id="_CPPv2NK12tensorrt_llm8executor15MultimodalInput22getMultimodalPositionsEv"></span><span id="tensorrt_llm::executor::MultimodalInput::getMultimodalPositionsC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1affbf7a499a907a777819bba94a08e3a8"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getMultimodalPositions</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput22getMultimodalPositionsEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor15MultimodalInput20getMultimodalLengthsEv">
<span id="_CPPv3NK12tensorrt_llm8executor15MultimodalInput20getMultimodalLengthsEv"></span><span id="_CPPv2NK12tensorrt_llm8executor15MultimodalInput20getMultimodalLengthsEv"></span><span id="tensorrt_llm::executor::MultimodalInput::getMultimodalLengthsC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1a966bc4554314a394e144e2f1f89d9349"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getMultimodalLengths</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput20getMultimodalLengthsEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-private-members">Private Members</p>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor15MultimodalInput17mMultimodalHashesE">
<span id="_CPPv3N12tensorrt_llm8executor15MultimodalInput17mMultimodalHashesE"></span><span id="_CPPv2N12tensorrt_llm8executor15MultimodalInput17mMultimodalHashesE"></span><span id="tensorrt_llm::executor::MultimodalInput::mMultimodalHashes__std::vector:std::vector:SizeType32::"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1a00014fd00f0c80a12a74fd1fa5f9fe20"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">mMultimodalHashes</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput17mMultimodalHashesE" title="Link to this definition">#</a><br /></dt>
<dd><p>The multimodal hashes. </p>
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor15MultimodalInput20mMultimodalPositionsE">
<span id="_CPPv3N12tensorrt_llm8executor15MultimodalInput20mMultimodalPositionsE"></span><span id="_CPPv2N12tensorrt_llm8executor15MultimodalInput20mMultimodalPositionsE"></span><span id="tensorrt_llm::executor::MultimodalInput::mMultimodalPositions__std::vector:SizeType32:"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1af8d670fddf552e181d5f2cbd8aedcb8d"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">mMultimodalPositions</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput20mMultimodalPositionsE" title="Link to this definition">#</a><br /></dt>
<dd><p>The multimodal positions. </p>
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor15MultimodalInput18mMultimodalLengthsE">
<span id="_CPPv3N12tensorrt_llm8executor15MultimodalInput18mMultimodalLengthsE"></span><span id="_CPPv2N12tensorrt_llm8executor15MultimodalInput18mMultimodalLengthsE"></span><span id="tensorrt_llm::executor::MultimodalInput::mMultimodalLengths__std::vector:SizeType32:"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1MultimodalInput_1ab19a53dd86441f78f7e9adb865038fea"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10SizeType32E" title="tensorrt_llm::executor::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">mMultimodalLengths</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput18mMultimodalLengthsE" title="Link to this definition">#</a><br /></dt>
<dd><p>The multimodal lengths. </p>
</dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-friends">Friends</p>
<dl>
<dt class="sig sig-object cpp">
<em class="property"><span class="pre">friend</span> <span class="pre">class</span></em> <span class="pre">Serialization</span></dt>
</dl>
</div>
</dd></dl>
<dl class="cpp class">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor18OrchestratorConfigE">
<span id="_CPPv3N12tensorrt_llm8executor18OrchestratorConfigE"></span><span id="_CPPv2N12tensorrt_llm8executor18OrchestratorConfigE"></span><span id="tensorrt_llm::executor::OrchestratorConfig"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1OrchestratorConfig"></span><span class="k"><span class="pre">class</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">OrchestratorConfig</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor18OrchestratorConfigE" title="Link to this definition">#</a><br /></dt>
@ -8845,8 +8936,8 @@
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE">
<span id="_CPPv3N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"></span><span id="_CPPv2N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"></span><span id="tensorrt_llm::executor::Request::Request__VecTokens.SizeType32.b.SamplingConfigCR.OutputConfigCR.std::optional:SizeType32:CR.std::optional:SizeType32:CR.std::optional:std::vector:SizeType32::.std::optional:std::list:VecTokens::.std::optional:std::list:VecTokens::.std::optional:Tensor:.std::optional:ExternalDraftTokensConfig:.std::optional:PromptTuningConfig:.std::optional:Tensor:.std::optional:MropeConfig:.std::optional:LoraConfig:.std::optional:LookaheadDecodingConfig:.std::optional:KvCacheRetentionConfig:.std::optional:ss:.std::optional:LogitsPostProcessor:.std::optional:VecTokens:.std::optional:IdType:.b.PriorityType.RequestType.std::optional:ContextPhaseParams:.std::optional:Tensor:.std::optional:SizeType32:.std::optional:Tensor:.SizeType32.std::optional:EagleConfig:.std::optional:Tensor:.std::optional:GuidedDecodingParams:.std::optional:SizeType32:.std::optional:MillisecondsType:"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1a8d17dbdbbc0062aa074e3f63b9c6db5d"></span><span class="sig-name descname"><span class="n"><span class="pre">Request</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI15MultimodalInputEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE">
<span id="_CPPv3N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI15MultimodalInputEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"></span><span id="_CPPv2N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI15MultimodalInputEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"></span><span id="tensorrt_llm::executor::Request::Request__VecTokens.SizeType32.b.SamplingConfigCR.OutputConfigCR.std::optional:SizeType32:CR.std::optional:SizeType32:CR.std::optional:std::vector:SizeType32::.std::optional:std::list:VecTokens::.std::optional:std::list:VecTokens::.std::optional:Tensor:.std::optional:ExternalDraftTokensConfig:.std::optional:PromptTuningConfig:.std::optional:MultimodalInput:.std::optional:Tensor:.std::optional:MropeConfig:.std::optional:LoraConfig:.std::optional:LookaheadDecodingConfig:.std::optional:KvCacheRetentionConfig:.std::optional:ss:.std::optional:LogitsPostProcessor:.std::optional:VecTokens:.std::optional:IdType:.b.PriorityType.RequestType.std::optional:ContextPhaseParams:.std::optional:Tensor:.std::optional:SizeType32:.std::optional:Tensor:.SizeType32.std::optional:EagleConfig:.std::optional:Tensor:.std::optional:GuidedDecodingParams:.std::optional:SizeType32:.std::optional:MillisecondsType:"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1a2fb713f2115d2bd59a9da42501707fe1"></span><span class="sig-name descname"><span class="n"><span class="pre">Request</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor9VecTokensE" title="tensorrt_llm::executor::VecTokens"><span class="n"><span class="pre">VecTokens</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">inputTokenIds</span></span></em>,</dd>
@ -8862,6 +8953,7 @@
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor6TensorE" title="tensorrt_llm::executor::Tensor"><span class="n"><span class="pre">Tensor</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">embeddingBias</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor25ExternalDraftTokensConfigE" title="tensorrt_llm::executor::ExternalDraftTokensConfig"><span class="n"><span class="pre">ExternalDraftTokensConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">externalDraftTokensConfig</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor18PromptTuningConfigE" title="tensorrt_llm::executor::PromptTuningConfig"><span class="n"><span class="pre">PromptTuningConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">pTuningConfig</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">multimodalInput</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor6TensorE" title="tensorrt_llm::executor::Tensor"><span class="n"><span class="pre">Tensor</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">multimodalEmbedding</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor11MropeConfigE" title="tensorrt_llm::executor::MropeConfig"><span class="n"><span class="pre">MropeConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">mRopeConfig</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor10LoraConfigE" title="tensorrt_llm::executor::LoraConfig"><span class="n"><span class="pre">LoraConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">loraConfig</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
@ -8886,7 +8978,7 @@
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor16MillisecondsTypeE" title="tensorrt_llm::executor::MillisecondsType"><span class="n"><span class="pre">MillisecondsType</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">allottedTimeMs</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI15MultimodalInputEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE" title="Link to this definition">#</a><br /></dt>
<dd><p>The <a class="reference internal" href="#classtensorrt__llm_1_1executor_1_1Request"><span class="std std-ref">Request</span></a> constructor. </p>
<dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
@ -8904,6 +8996,7 @@
<li><p><strong>embeddingBias</strong> The embedding bias tensor. Expected shape is [vocab_size] </p></li>
<li><p><strong>externalDraftTokensConfig</strong> The speculative decoding with external draft tokens configuration </p></li>
<li><p><strong>pTuningConfig</strong> The prompt tuning configuration </p></li>
<li><p><strong>multimodalInput</strong> The multimodal input {multimodalHashes, multimodalPositions, multimodalLengths} </p></li>
<li><p><strong>multimodalEmbedding</strong> The multimodal embedding tensor. Expected shape is [num_multimodal_tokens, hidden_dim] </p></li>
<li><p><strong>mRopeConfig</strong> The mrope configuration </p></li>
<li><p><strong>loraConfig</strong> The LoRA configuration </p></li>
@ -9026,6 +9119,11 @@
<span id="_CPPv3NK12tensorrt_llm8executor7Request21getPromptTuningConfigEv"></span><span id="_CPPv2NK12tensorrt_llm8executor7Request21getPromptTuningConfigEv"></span><span id="tensorrt_llm::executor::Request::getPromptTuningConfigC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1add08e33a6e9719b85ab0ddfb288c62d2"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor18PromptTuningConfigE" title="tensorrt_llm::executor::PromptTuningConfig"><span class="n"><span class="pre">PromptTuningConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getPromptTuningConfig</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor7Request21getPromptTuningConfigEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor7Request18getMultimodalInputEv">
<span id="_CPPv3NK12tensorrt_llm8executor7Request18getMultimodalInputEv"></span><span id="_CPPv2NK12tensorrt_llm8executor7Request18getMultimodalInputEv"></span><span id="tensorrt_llm::executor::Request::getMultimodalInputC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1a614eadec8b4a146f625bebc8e2d9108d"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getMultimodalInput</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor7Request18getMultimodalInputEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor7Request22getMultimodalEmbeddingEv">
<span id="_CPPv3NK12tensorrt_llm8executor7Request22getMultimodalEmbeddingEv"></span><span id="_CPPv2NK12tensorrt_llm8executor7Request22getMultimodalEmbeddingEv"></span><span id="tensorrt_llm::executor::Request::getMultimodalEmbeddingC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1a77ea94602c1e7258176ae9999e36bf44"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor6TensorE" title="tensorrt_llm::executor::Tensor"><span class="n"><span class="pre">Tensor</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getMultimodalEmbedding</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor7Request22getMultimodalEmbeddingEv" title="Link to this definition">#</a><br /></dt>
@ -9217,6 +9315,11 @@
<span id="_CPPv3N12tensorrt_llm8executor7Request22setMultimodalEmbeddingERK6Tensor"></span><span id="_CPPv2N12tensorrt_llm8executor7Request22setMultimodalEmbeddingERK6Tensor"></span><span id="tensorrt_llm::executor::Request::setMultimodalEmbedding__TensorCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1aed1946173f299ff493f98f0fd0fd8a42"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setMultimodalEmbedding</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor6TensorE" title="tensorrt_llm::executor::Tensor"><span class="n"><span class="pre">Tensor</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">multimodalEmbedding</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor7Request22setMultimodalEmbeddingERK6Tensor" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor7Request18setMultimodalInputERK15MultimodalInput">
<span id="_CPPv3N12tensorrt_llm8executor7Request18setMultimodalInputERK15MultimodalInput"></span><span id="_CPPv2N12tensorrt_llm8executor7Request18setMultimodalInputERK15MultimodalInput"></span><span id="tensorrt_llm::executor::Request::setMultimodalInput__MultimodalInputCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1ab9763068bbff784beb3b5a9e70b02626"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setMultimodalInput</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE" title="tensorrt_llm::executor::MultimodalInput"><span class="n"><span class="pre">MultimodalInput</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">multimodalInput</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor7Request18setMultimodalInputERK15MultimodalInput" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor7Request14setMropeConfigERK11MropeConfig">
<span id="_CPPv3N12tensorrt_llm8executor7Request14setMropeConfigERK11MropeConfig"></span><span id="_CPPv2N12tensorrt_llm8executor7Request14setMropeConfigERK11MropeConfig"></span><span id="tensorrt_llm::executor::Request::setMropeConfig__MropeConfigCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Request_1ae2a2b91a7a51f369b93965cb8ca4e479"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setMropeConfig</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor11MropeConfigE" title="tensorrt_llm::executor::MropeConfig"><span class="n"><span class="pre">MropeConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">mRopeConfig</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor7Request14setMropeConfigERK11MropeConfig" title="Link to this definition">#</a><br /></dt>
@ -11391,6 +11494,9 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization29deserializePromptTuningConfigERNSt7istreamE"><code class="docutils literal notranslate"><span class="pre">deserializePromptTuningConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK18PromptTuningConfigRNSt7ostreamE"><code class="docutils literal notranslate"><span class="pre">serialize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK18PromptTuningConfig"><code class="docutils literal notranslate"><span class="pre">serializedSize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization26deserializeMultimodalInputERNSt7istreamE"><code class="docutils literal notranslate"><span class="pre">deserializeMultimodalInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK15MultimodalInputRNSt7ostreamE"><code class="docutils literal notranslate"><span class="pre">serialize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK15MultimodalInput"><code class="docutils literal notranslate"><span class="pre">serializedSize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization22deserializeMropeConfigERNSt7istreamE"><code class="docutils literal notranslate"><span class="pre">deserializeMropeConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK11MropeConfigRNSt7ostreamE"><code class="docutils literal notranslate"><span class="pre">serialize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK11MropeConfig"><code class="docutils literal notranslate"><span class="pre">serializedSize()</span></code></a></li>
@ -12391,6 +12497,16 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor11MropeConfig20mMRopePositionDeltasE"><code class="docutils literal notranslate"><span class="pre">mMRopePositionDeltas</span></code></a></li>
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInputE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::MultimodalInput</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput15MultimodalInputENSt6vectorINSt6vectorI10SizeType32EEEENSt6vectorI10SizeType32EENSt6vectorI10SizeType32EE"><code class="docutils literal notranslate"><span class="pre">MultimodalInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput19getMultimodalHashesEv"><code class="docutils literal notranslate"><span class="pre">getMultimodalHashes()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput22getMultimodalPositionsEv"><code class="docutils literal notranslate"><span class="pre">getMultimodalPositions()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor15MultimodalInput20getMultimodalLengthsEv"><code class="docutils literal notranslate"><span class="pre">getMultimodalLengths()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput17mMultimodalHashesE"><code class="docutils literal notranslate"><span class="pre">mMultimodalHashes</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput20mMultimodalPositionsE"><code class="docutils literal notranslate"><span class="pre">mMultimodalPositions</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor15MultimodalInput18mMultimodalLengthsE"><code class="docutils literal notranslate"><span class="pre">mMultimodalLengths</span></code></a></li>
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor18OrchestratorConfigE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::OrchestratorConfig</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor18OrchestratorConfig18OrchestratorConfigEbNSt6stringENSt10shared_ptrIN3mpi7MpiCommEEEb"><code class="docutils literal notranslate"><span class="pre">OrchestratorConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor18OrchestratorConfig17getIsOrchestratorEv"><code class="docutils literal notranslate"><span class="pre">getIsOrchestrator()</span></code></a></li>
@ -12482,7 +12598,7 @@
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7RequestE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::Request</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"><code class="docutils literal notranslate"><span class="pre">Request()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestE9VecTokens10SizeType32bRK14SamplingConfigRK12OutputConfigRKNSt8optionalI10SizeType32EERKNSt8optionalI10SizeType32EENSt8optionalINSt6vectorI10SizeType32EEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalINSt4listI9VecTokensEEEENSt8optionalI6TensorEENSt8optionalI25ExternalDraftTokensConfigEENSt8optionalI18PromptTuningConfigEENSt8optionalI15MultimodalInputEENSt8optionalI6TensorEENSt8optionalI11MropeConfigEENSt8optionalI10LoraConfigEENSt8optionalI23LookaheadDecodingConfigEENSt8optionalI22KvCacheRetentionConfigEENSt8optionalINSt6stringEEENSt8optionalI19LogitsPostProcessorEENSt8optionalI9VecTokensEENSt8optionalI6IdTypeEEb12PriorityType11RequestTypeNSt8optionalI18ContextPhaseParamsEENSt8optionalI6TensorEENSt8optionalI10SizeType32EENSt8optionalI6TensorEE10SizeType32NSt8optionalI11EagleConfigEENSt8optionalI6TensorEENSt8optionalI20GuidedDecodingParamsEENSt8optionalI10SizeType32EENSt8optionalI16MillisecondsTypeEE"><code class="docutils literal notranslate"><span class="pre">Request()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestERK7Request"><code class="docutils literal notranslate"><span class="pre">Request()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request7RequestERR7Request"><code class="docutils literal notranslate"><span class="pre">Request()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7RequestaSERK7Request"><code class="docutils literal notranslate"><span class="pre">operator=()</span></code></a></li>
@ -12501,6 +12617,7 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request16getEmbeddingBiasEv"><code class="docutils literal notranslate"><span class="pre">getEmbeddingBias()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request28getExternalDraftTokensConfigEv"><code class="docutils literal notranslate"><span class="pre">getExternalDraftTokensConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request21getPromptTuningConfigEv"><code class="docutils literal notranslate"><span class="pre">getPromptTuningConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request18getMultimodalInputEv"><code class="docutils literal notranslate"><span class="pre">getMultimodalInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request22getMultimodalEmbeddingEv"><code class="docutils literal notranslate"><span class="pre">getMultimodalEmbedding()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request14getMropeConfigEv"><code class="docutils literal notranslate"><span class="pre">getMropeConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor7Request13getLoraConfigEv"><code class="docutils literal notranslate"><span class="pre">getLoraConfig()</span></code></a></li>
@ -12535,6 +12652,7 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request28setExternalDraftTokensConfigERK25ExternalDraftTokensConfig"><code class="docutils literal notranslate"><span class="pre">setExternalDraftTokensConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request21setPromptTuningConfigERK18PromptTuningConfig"><code class="docutils literal notranslate"><span class="pre">setPromptTuningConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request22setMultimodalEmbeddingERK6Tensor"><code class="docutils literal notranslate"><span class="pre">setMultimodalEmbedding()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request18setMultimodalInputERK15MultimodalInput"><code class="docutils literal notranslate"><span class="pre">setMultimodalInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request14setMropeConfigERK11MropeConfig"><code class="docutils literal notranslate"><span class="pre">setMropeConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request13setLoraConfigERK10LoraConfig"><code class="docutils literal notranslate"><span class="pre">setLoraConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor7Request18setLookaheadConfigERK23LookaheadDecodingConfig"><code class="docutils literal notranslate"><span class="pre">setLookaheadConfig()</span></code></a></li>
@ -12925,9 +13043,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -3025,6 +3026,18 @@
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig21getNumKvHeadsPerLayerEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime11ModelConfig27getNumKvHeadsForGivenLayersERKNSt6vectorI10SizeType32EEb">
<span id="_CPPv3NK12tensorrt_llm7runtime11ModelConfig27getNumKvHeadsForGivenLayersERKNSt6vectorI10SizeType32EEb"></span><span id="_CPPv2NK12tensorrt_llm7runtime11ModelConfig27getNumKvHeadsForGivenLayersERKNSt6vectorI10SizeType32EEb"></span><span id="tensorrt_llm::runtime::ModelConfig::getNumKvHeadsForGivenLayers__std::vector:SizeType32:CR.bC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1ModelConfig_1a6db144b9d47e4800bf01c4e10cd6de30"></span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getNumKvHeadsForGivenLayers</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">layers</span></span></em>,</dd>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">isCrossAttention</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig27getNumKvHeadsForGivenLayersERKNSt6vectorI10SizeType32EEb" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime11ModelConfig31getNumKvHeadsPerLayerLocalRangeE10SizeType3210SizeType32b">
<span id="_CPPv3NK12tensorrt_llm7runtime11ModelConfig31getNumKvHeadsPerLayerLocalRangeE10SizeType3210SizeType32b"></span><span id="_CPPv2NK12tensorrt_llm7runtime11ModelConfig31getNumKvHeadsPerLayerLocalRangeE10SizeType3210SizeType32b"></span><span id="tensorrt_llm::runtime::ModelConfig::getNumKvHeadsPerLayerLocalRange__SizeType32.SizeType32.bC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1ModelConfig_1a05ea49becb87faba3755e95a98484aac"></span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">pair</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">const_iterator</span></span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">const_iterator</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getNumKvHeadsPerLayerLocalRange</span></span></span><span class="sig-paren">(</span>
@ -3060,19 +3073,6 @@
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfig26setNumKvHeadsPerCrossLayerERKNSt6vectorI10SizeType32EE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime11ModelConfig18getSumLocalKvHeadsE10SizeType3210SizeType32b">
<span id="_CPPv3NK12tensorrt_llm7runtime11ModelConfig18getSumLocalKvHeadsE10SizeType3210SizeType32b"></span><span id="_CPPv2NK12tensorrt_llm7runtime11ModelConfig18getSumLocalKvHeadsE10SizeType3210SizeType32b"></span><span id="tensorrt_llm::runtime::ModelConfig::getSumLocalKvHeads__SizeType32.SizeType32.bC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1ModelConfig_1a639038d8052a0789c54b0d6c7f1ee040"></span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getSumLocalKvHeads</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">pipelineParallelism</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">1</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">pipelineParallelismRank</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span></em>,</dd>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">isCrossAttention</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">false</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig18getSumLocalKvHeadsE10SizeType3210SizeType32b" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime11ModelConfig19skipCrossAttnBlocksEv">
<span id="_CPPv3NK12tensorrt_llm7runtime11ModelConfig19skipCrossAttnBlocksEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime11ModelConfig19skipCrossAttnBlocksEv"></span><span id="tensorrt_llm::runtime::ModelConfig::skipCrossAttnBlocksCCE"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1ModelConfig_1a144f4f5b81ad7d6d42159033418f1a94"></span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="k"><span class="pre">constexpr</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">skipCrossAttnBlocks</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="k"><span class="pre">noexcept</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig19skipCrossAttnBlocksEv" title="Link to this definition">#</a><br /></dt>
@ -5655,8 +5655,8 @@
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE">
<span id="_CPPv3N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="_CPPv2N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="tensorrt_llm::runtime::GptDecoder::GptDecoder__executor::DecodingModeCR.s.s.s.s.s.CudaStreamPtrCR.std::shared_ptr:SpeculativeDecodingModuleC:"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoder_1a32447c408fdde4dade4948f894f754cb"></span><span class="sig-name descname"><span class="n"><span class="pre">GptDecoder</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE">
<span id="_CPPv3N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="_CPPv2N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="tensorrt_llm::runtime::GptDecoder::GptDecoder__executor::DecodingModeCR.s.s.s.s.CudaStreamPtrCR.std::shared_ptr:SpeculativeDecodingModuleC:"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoder_1a549dc4039d82696e00bf04d127ff3deb"></span><span class="sig-name descname"><span class="n"><span class="pre">GptDecoder</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor12DecodingModeE" title="tensorrt_llm::executor::DecodingMode"><span class="n"><span class="pre">DecodingMode</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">mode</span></span></em>,</dd>
@ -5664,28 +5664,34 @@
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">maxBeamWidth</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">vocabSize</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">vocabSizePadded</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">maxSequenceLength</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder13CudaStreamPtrE" title="tensorrt_llm::runtime::GptDecoder::CudaStreamPtr"><span class="n"><span class="pre">CudaStreamPtr</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">stream</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">shared_ptr</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime25SpeculativeDecodingModuleE" title="tensorrt_llm::runtime::SpeculativeDecodingModule"><span class="n"><span class="pre">SpeculativeDecodingModule</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">speculativeDecodingModule</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">nullptr</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE">
<span id="_CPPv3N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"></span><span id="_CPPv2N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"></span><span id="tensorrt_llm::runtime::GptDecoder::setup__SamplingConfigCR.s.TensorConstPtrCR.std::optional:DecodingOutput:CR.std::optional:std::vector:decoder_batch::Request:C:CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoder_1a5f4053070f354e9a1a4a55b35553a980"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE">
<span id="_CPPv3N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"></span><span id="_CPPv2N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"></span><span id="tensorrt_llm::runtime::GptDecoder::setup__SamplingConfigCR.s.TensorConstPtrCR.std::optional:DecodingOutput:CR.std::optional:nvinfer1::DataType:.std::optional:std::vector:TensorConstPtr::CR.std::optional:std::vector:executor::LookaheadDecodingConfig::CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoder_1a9556613b0918a30a169081da8dade1e3"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime14SamplingConfigE" title="tensorrt_llm::runtime::SamplingConfig"><span class="n"><span class="pre">SamplingConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">samplingConfig</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">batchSize</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">TensorConstPtr</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">batchSlots</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime14DecodingOutputE" title="tensorrt_llm::runtime::DecodingOutput"><span class="n"><span class="pre">DecodingOutput</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7RequestE" title="tensorrt_llm::runtime::decoder_batch::Request"><span class="n"><span class="pre">Request</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">requests</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv48nvinfer1" title="nvinfer1"><span class="n"><span class="pre">nvinfer1</span></span></a><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">DataType</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">explicitDraftTokensDType</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">TensorConstPtr</span></span><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">lookaheadPrompt</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor23LookaheadDecodingConfigE" title="tensorrt_llm::executor::LookaheadDecodingConfig"><span class="n"><span class="pre">LookaheadDecodingConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">lookaheadAlgoConfigs</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE" title="Link to this definition">#</a><br /></dt>
<dd><dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>explicitDraftTokensDType</strong> is only used by ExplicitDraftTokens model to WAR the lack of bf16 decoder. </p>
</dd>
</dl>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime10GptDecoder12forwardAsyncER14DecodingOutputRK13DecodingInput">
@ -5800,19 +5806,26 @@
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE">
<span id="_CPPv3N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"></span><span id="_CPPv2N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"></span><span id="tensorrt_llm::runtime::IGptDecoder::setup__SamplingConfigCR.s.TensorConstPtrCR.std::optional:DecodingOutput:CR.std::optional:std::vector:decoder_batch::Request:C:CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoder_1af15d3c58f50e58cac3f44d4580e6db84"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE">
<span id="_CPPv3N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"></span><span id="_CPPv2N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"></span><span id="tensorrt_llm::runtime::IGptDecoder::setup__SamplingConfigCR.s.TensorConstPtrCR.std::optional:DecodingOutput:CR.std::optional:nvinfer1::DataType:.std::optional:std::vector:TensorConstPtr::CR.std::optional:std::vector:executor::LookaheadDecodingConfig::CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoder_1a2574cb482ead5325a6ee30003455c188"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime14SamplingConfigE" title="tensorrt_llm::runtime::SamplingConfig"><span class="n"><span class="pre">SamplingConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">samplingConfig</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">batchSize</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder14TensorConstPtrE" title="tensorrt_llm::runtime::IGptDecoder::TensorConstPtr"><span class="n"><span class="pre">TensorConstPtr</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">batchSlots</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime14DecodingOutputE" title="tensorrt_llm::runtime::DecodingOutput"><span class="n"><span class="pre">DecodingOutput</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7RequestE" title="tensorrt_llm::runtime::decoder_batch::Request"><span class="n"><span class="pre">Request</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">requests</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv48nvinfer1" title="nvinfer1"><span class="n"><span class="pre">nvinfer1</span></span></a><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">DataType</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">explicitDraftTokensDType</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder14TensorConstPtrE" title="tensorrt_llm::runtime::IGptDecoder::TensorConstPtr"><span class="n"><span class="pre">TensorConstPtr</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">lookaheadPrompt</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor23LookaheadDecodingConfigE" title="tensorrt_llm::executor::LookaheadDecodingConfig"><span class="n"><span class="pre">LookaheadDecodingConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">lookaheadAlgoConfigs</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE" title="Link to this definition">#</a><br /></dt>
<dd><dl class="field-list simple">
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
<dd class="field-odd"><p><strong>explicitDraftTokensDType</strong> is only used by ExplicitDraftTokens model to WAR the lack of bf16 decoder. </p>
</dd>
</dl>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime11IGptDecoder12forwardAsyncER14DecodingOutputRK13DecodingInput">
@ -5860,8 +5873,8 @@
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-static-functions">Public Static Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE">
<span id="_CPPv3N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="_CPPv2N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="tensorrt_llm::runtime::IGptDecoder::create__executor::DecodingModeCR.nvinfer1::DataType.s.s.s.s.s.BufferManager::CudaStreamPtrCR.std::shared_ptr:SpeculativeDecodingModuleC:CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoder_1a16c5211cd06c17f8100edacde78c6477"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">unique_ptr</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoderE" title="tensorrt_llm::runtime::IGptDecoder"><span class="n"><span class="pre">IGptDecoder</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">create</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE">
<span id="_CPPv3N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="_CPPv2N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"></span><span id="tensorrt_llm::runtime::IGptDecoder::create__executor::DecodingModeCR.nvinfer1::DataType.s.s.s.s.BufferManager::CudaStreamPtrCR.std::shared_ptr:SpeculativeDecodingModuleC:CR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoder_1a5af03bad9aa78a2159ae16bfe470106c"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="k"><span class="pre">inline</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">unique_ptr</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoderE" title="tensorrt_llm::runtime::IGptDecoder"><span class="n"><span class="pre">IGptDecoder</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">create</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor12DecodingModeE" title="tensorrt_llm::executor::DecodingMode"><span class="n"><span class="pre">DecodingMode</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">mode</span></span></em>,</dd>
@ -5870,12 +5883,11 @@
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">maxBeamWidth</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">vocabSize</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">vocabSizePadded</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">maxSequenceLength</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13BufferManagerE" title="tensorrt_llm::runtime::BufferManager"><span class="n"><span class="pre">BufferManager</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13BufferManager13CudaStreamPtrE" title="tensorrt_llm::runtime::BufferManager::CudaStreamPtr"><span class="n"><span class="pre">CudaStreamPtr</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">stream</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">shared_ptr</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime25SpeculativeDecodingModuleE" title="tensorrt_llm::runtime::SpeculativeDecodingModule"><span class="n"><span class="pre">SpeculativeDecodingModule</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">speculativeDecodingModule</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">nullptr</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
@ -9589,20 +9601,19 @@ one more than decoding draft tokens for prediction from primary head </p>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::setup__executor::DecodingModeCR.SizeType32.SizeType32.SizeType32.nvinfer1::DataType.ModelConfigCR.WorldConfigCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a5ef9dff42e3a44389c190c14914b8458"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::setup__executor::DecodingModeCR.SizeType32.SizeType32.nvinfer1::DataType.ModelConfigCR.WorldConfigCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a8b5d621dae01ad7a3b4262a41e2d0916"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor12DecodingModeE" title="tensorrt_llm::executor::DecodingMode"><span class="n"><span class="pre">DecodingMode</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">mode</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBatchSize</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBeamWidth</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxSequenceLength</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv48nvinfer1" title="nvinfer1"><span class="n"><span class="pre">nvinfer1</span></span></a><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">DataType</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">dtype</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfigE" title="tensorrt_llm::runtime::ModelConfig"><span class="n"><span class="pre">ModelConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">modelConfig</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11WorldConfigE" title="tensorrt_llm::runtime::WorldConfig"><span class="n"><span class="pre">WorldConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">worldConfig</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1ab71a988f92d801a763c8b7b960fd0769"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
</dd></dl>
@ -10000,20 +10011,19 @@ one more than decoding draft tokens for prediction from primary head </p>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::setup__executor::DecodingModeCR.SizeType32.SizeType32.SizeType32.nvinfer1::DataType.ModelConfigCR.WorldConfigCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a8977d359344bba9f572e60c556b9a890"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::setup__executor::DecodingModeCR.SizeType32.SizeType32.nvinfer1::DataType.ModelConfigCR.WorldConfigCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a2cd9e672353c234e41d31cb7dbdb103a"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setup</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor12DecodingModeE" title="tensorrt_llm::executor::DecodingMode"><span class="n"><span class="pre">DecodingMode</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">mode</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBatchSize</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBeamWidth</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxSequenceLength</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv48nvinfer1" title="nvinfer1"><span class="n"><span class="pre">nvinfer1</span></span></a><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">DataType</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">dtype</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfigE" title="tensorrt_llm::runtime::ModelConfig"><span class="n"><span class="pre">ModelConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">modelConfig</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime11WorldConfigE" title="tensorrt_llm::runtime::WorldConfig"><span class="n"><span class="pre">WorldConfig</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">worldConfig</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a41740e026890310d78a3ac98c22e3132"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
</dd></dl>
@ -10840,11 +10850,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch7Request13medusaTreeIdsE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch7Request13medusaTreeIdsE"></span><span id="tensorrt_llm::runtime::decoder_batch::Request::medusaTreeIds__TensorPtr"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Request_1a53cd490ea4a4acc421b66a24ede31697"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request9TensorPtrE" title="tensorrt_llm::runtime::decoder_batch::Request::TensorPtr"><span class="n"><span class="pre">TensorPtr</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">medusaTreeIds</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request13medusaTreeIdsE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request5dtypeE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch7Request5dtypeE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch7Request5dtypeE"></span><span id="tensorrt_llm::runtime::decoder_batch::Request::dtype__nvinfer1::DataType"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Request_1a3b1b508a767e8f4af619a2553239319b"></span><a class="reference internal" href="#_CPPv48nvinfer1" title="nvinfer1"><span class="n"><span class="pre">nvinfer1</span></span></a><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">DataType</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">dtype</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request5dtypeE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request22lookaheadRuntimeConfigE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch7Request22lookaheadRuntimeConfigE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch7Request22lookaheadRuntimeConfigE"></span><span id="tensorrt_llm::runtime::decoder_batch::Request::lookaheadRuntimeConfig__std::optional:executor::LookaheadDecodingConfig:"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Request_1a724413e68cfc7bea981a1b1b334a1704"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executorE" title="tensorrt_llm::executor"><span class="n"><span class="pre">executor</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="executor.html#_CPPv4N12tensorrt_llm8executor23LookaheadDecodingConfigE" title="tensorrt_llm::executor::LookaheadDecodingConfig"><span class="n"><span class="pre">LookaheadDecodingConfig</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">lookaheadRuntimeConfig</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request22lookaheadRuntimeConfigE" title="Link to this definition">#</a><br /></dt>
@ -11005,36 +11010,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<dd><p>Setup buffers for speculative decoding. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24setupExplicitDraftTokensEN26ExplicitDraftTokensBuffers6InputsE">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState24setupExplicitDraftTokensEN26ExplicitDraftTokensBuffers6InputsE"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState24setupExplicitDraftTokensEN26ExplicitDraftTokensBuffers6InputsE"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::setupExplicitDraftTokens__ExplicitDraftTokensBuffers::InputsC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a0a23ac6c04636c4b581b0d5b86dceecf"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setupExplicitDraftTokens</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime26ExplicitDraftTokensBuffersE" title="tensorrt_llm::runtime::ExplicitDraftTokensBuffers"><span class="n"><span class="pre">ExplicitDraftTokensBuffers</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime26ExplicitDraftTokensBuffers6InputsE" title="tensorrt_llm::runtime::ExplicitDraftTokensBuffers::Inputs"><span class="n"><span class="pre">Inputs</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">explicitDraftTokensBuffers</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24setupExplicitDraftTokensEN26ExplicitDraftTokensBuffers6InputsE" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup buffers for ExplicitDraftTokens decoding. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState14setupLookaheadE24LookaheadDecodingBuffers">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState14setupLookaheadE24LookaheadDecodingBuffers"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState14setupLookaheadE24LookaheadDecodingBuffers"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::setupLookahead__LookaheadDecodingBuffersC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a98617f124fbe9bd6a6b25a9880cc84a2"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setupLookahead</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime24LookaheadDecodingBuffersE" title="tensorrt_llm::runtime::LookaheadDecodingBuffers"><span class="n"><span class="pre">LookaheadDecodingBuffers</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">lookaheadDecodingBuffers</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState14setupLookaheadE24LookaheadDecodingBuffers" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup buffers for Lookahead decoding. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState10setupEagleEN12EagleBuffers6InputsE">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState10setupEagleEN12EagleBuffers6InputsE"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState10setupEagleEN12EagleBuffers6InputsE"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::setupEagle__EagleBuffers::InputsC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a234f3ff6476354e26bc1d3ab150b1e65"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setupEagle</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime12EagleBuffersE" title="tensorrt_llm::runtime::EagleBuffers"><span class="n"><span class="pre">EagleBuffers</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime12EagleBuffers6InputsE" title="tensorrt_llm::runtime::EagleBuffers::Inputs"><span class="n"><span class="pre">Inputs</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">eagleBuffers</span></span></em><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState10setupEagleEN12EagleBuffers6InputsE" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup buffers for Eagle decoding. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector">
<span id="_CPPv3N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector"></span><span id="_CPPv2N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::disableLookahead__RequestVectorCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a3cdb65ce4c92a02193e39f6d6cd73606"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">disableLookahead</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState13RequestVectorE" title="tensorrt_llm::runtime::decoder::DecoderState::RequestVector"><span class="n"><span class="pre">RequestVector</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">genRequests</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector" title="Link to this definition">#</a><br /></dt>
@ -11325,7 +11300,31 @@ one more than decoding draft tokens for prediction from primary head </p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState26getSpeculativeDecodingModeEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState26getSpeculativeDecodingModeEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState26getSpeculativeDecodingModeEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getSpeculativeDecodingModeC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1ae4cc9e8d67a255be108af23fec4a60bf"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime23SpeculativeDecodingModeE" title="tensorrt_llm::runtime::SpeculativeDecodingMode"><span class="n"><span class="pre">SpeculativeDecodingMode</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getSpeculativeDecodingMode</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState26getSpeculativeDecodingModeEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dd><p>Get the speculative decoding mode. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState29getExplicitDraftTokensBuffersEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState29getExplicitDraftTokensBuffersEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState29getExplicitDraftTokensBuffersEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getExplicitDraftTokensBuffersC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1aedbc8cfee155e5552e8ce838aa82f6d2"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime26ExplicitDraftTokensBuffersE" title="tensorrt_llm::runtime::ExplicitDraftTokensBuffers"><span class="n"><span class="pre">ExplicitDraftTokensBuffers</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime26ExplicitDraftTokensBuffers6InputsE" title="tensorrt_llm::runtime::ExplicitDraftTokensBuffers::Inputs"><span class="n"><span class="pre">Inputs</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="sig-name descname"><span class="n"><span class="pre">getExplicitDraftTokensBuffers</span></span></span><span class="sig-paren">(</span>
<dl>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState29getExplicitDraftTokensBuffersEv" title="Link to this definition">#</a><br /></dt>
<dd><p>Get the explicit draft tokens buffers. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getEagleBuffersC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a4a0cce0aa607216165923c9a7b376e29"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime12EagleBuffersE" title="tensorrt_llm::runtime::EagleBuffers"><span class="n"><span class="pre">EagleBuffers</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime12EagleBuffers6InputsE" title="tensorrt_llm::runtime::EagleBuffers::Inputs"><span class="n"><span class="pre">Inputs</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="sig-name descname"><span class="n"><span class="pre">getEagleBuffers</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv" title="Link to this definition">#</a><br /></dt>
<dd><p>Get the eagle buffers. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getLookaheadBuffersC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a6dee18bf1de594bf7ed1d94ec739178f"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime24LookaheadDecodingBuffersE" title="tensorrt_llm::runtime::LookaheadDecodingBuffers"><span class="n"><span class="pre">LookaheadDecodingBuffers</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="sig-name descname"><span class="n"><span class="pre">getLookaheadBuffers</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv" title="Link to this definition">#</a><br /></dt>
<dd><p>Get the lookahead buffers. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState20getBeamSearchBuffersEv">
@ -12275,10 +12274,10 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig12getModelNameEv"><code class="docutils literal notranslate"><span class="pre">getModelName()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfig12setModelNameERKNSt6stringE"><code class="docutils literal notranslate"><span class="pre">setModelName()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig21getNumKvHeadsPerLayerEv"><code class="docutils literal notranslate"><span class="pre">getNumKvHeadsPerLayer()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig27getNumKvHeadsForGivenLayersERKNSt6vectorI10SizeType32EEb"><code class="docutils literal notranslate"><span class="pre">getNumKvHeadsForGivenLayers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig31getNumKvHeadsPerLayerLocalRangeE10SizeType3210SizeType32b"><code class="docutils literal notranslate"><span class="pre">getNumKvHeadsPerLayerLocalRange()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfig21setNumKvHeadsPerLayerERKNSt6vectorI10SizeType32EE"><code class="docutils literal notranslate"><span class="pre">setNumKvHeadsPerLayer()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfig26setNumKvHeadsPerCrossLayerERKNSt6vectorI10SizeType32EE"><code class="docutils literal notranslate"><span class="pre">setNumKvHeadsPerCrossLayer()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig18getSumLocalKvHeadsE10SizeType3210SizeType32b"><code class="docutils literal notranslate"><span class="pre">getSumLocalKvHeads()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig19skipCrossAttnBlocksEv"><code class="docutils literal notranslate"><span class="pre">skipCrossAttnBlocks()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11ModelConfig22setSkipCrossAttnBlocksEb"><code class="docutils literal notranslate"><span class="pre">setSkipCrossAttnBlocks()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime11ModelConfig15getNumLanguagesEv"><code class="docutils literal notranslate"><span class="pre">getNumLanguages()</span></code></a></li>
@ -12667,8 +12666,8 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4I0EN12tensorrt_llm7runtime10GptDecoderE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::runtime::GptDecoder</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder13CudaStreamPtrE"><code class="docutils literal notranslate"><span class="pre">CudaStreamPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"><code class="docutils literal notranslate"><span class="pre">GptDecoder()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder10GptDecoderERKN8executor12DecodingModeE6size_t6size_t6size_t6size_tRK13CudaStreamPtrNSt10shared_ptrIK25SpeculativeDecodingModuleEE"><code class="docutils literal notranslate"><span class="pre">GptDecoder()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder12forwardAsyncER14DecodingOutputRK13DecodingInput"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder11forwardSyncER14DecodingOutputRK13DecodingInput"><code class="docutils literal notranslate"><span class="pre">forwardSync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime10GptDecoder17getSamplingConfigEv"><code class="docutils literal notranslate"><span class="pre">getSamplingConfig()</span></code></a></li>
@ -12687,12 +12686,12 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder14TensorConstPtrE"><code class="docutils literal notranslate"><span class="pre">TensorConstPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoderD0Ev"><code class="docutils literal notranslate"><span class="pre">~IGptDecoder()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEERKNSt8optionalIKNSt6vectorIN13decoder_batch7RequestEEEEE"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder5setupERK14SamplingConfig6size_tRK14TensorConstPtrRKNSt8optionalI14DecodingOutputEENSt8optionalIN8nvinfer18DataTypeEEERKNSt8optionalINSt6vectorI14TensorConstPtrEEEERKNSt8optionalINSt6vectorIN8executor23LookaheadDecodingConfigEEEEE"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder12forwardAsyncER14DecodingOutputRK13DecodingInput"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder11forwardSyncER14DecodingOutputRK13DecodingInput"><code class="docutils literal notranslate"><span class="pre">forwardSync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder17getSamplingConfigEv"><code class="docutils literal notranslate"><span class="pre">getSamplingConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder16disableLookaheadERKNSt8optionalI14SamplingConfigEE10SizeType3214TensorConstPtr"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"><code class="docutils literal notranslate"><span class="pre">create()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime11IGptDecoder6createERKN8executor12DecodingModeEN8nvinfer18DataTypeE6size_t6size_t6size_t6size_tRKN13BufferManager13CudaStreamPtrERKNSt10shared_ptrIK25SpeculativeDecodingModuleEE"><code class="docutils literal notranslate"><span class="pre">create()</span></code></a></li>
</ul>
</li>
</ul>
@ -13261,7 +13260,7 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched13LlmRequestPtrE"><code class="docutils literal notranslate"><span class="pre">LlmRequestPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched13RequestVectorE"><code class="docutils literal notranslate"><span class="pre">RequestVector</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched16disableLookaheadERK13RequestVectorRK9TensorPtr"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
@ -13330,7 +13329,7 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched13RequestVectorE"><code class="docutils literal notranslate"><span class="pre">RequestVector</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched17GptDecoderBatchedE13CudaStreamPtr"><code class="docutils literal notranslate"><span class="pre">GptDecoderBatched()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched16disableLookaheadERK13RequestVectorRK9TensorPtr"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
@ -13469,7 +13468,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request11draftLogitsE"><code class="docutils literal notranslate"><span class="pre">draftLogits</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request11medusaPathsE"><code class="docutils literal notranslate"><span class="pre">medusaPaths</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request13medusaTreeIdsE"><code class="docutils literal notranslate"><span class="pre">medusaTreeIds</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request5dtypeE"><code class="docutils literal notranslate"><span class="pre">dtype</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request22lookaheadRuntimeConfigE"><code class="docutils literal notranslate"><span class="pre">lookaheadRuntimeConfig</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch7Request11eagleConfigE"><code class="docutils literal notranslate"><span class="pre">eagleConfig</span></code></a></li>
</ul>
@ -13495,9 +13493,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState34allocateSpeculativeDecodingBuffersE23SpeculativeDecodingModeN8nvinfer18DataTypeERK13BufferManager"><code class="docutils literal notranslate"><span class="pre">allocateSpeculativeDecodingBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState5setupE10SizeType3210SizeType3210SizeType3210SizeType3210SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState24setupSpeculativeDecodingERK23SpeculativeDecodingMode10SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager"><code class="docutils literal notranslate"><span class="pre">setupSpeculativeDecoding()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24setupExplicitDraftTokensEN26ExplicitDraftTokensBuffers6InputsE"><code class="docutils literal notranslate"><span class="pre">setupExplicitDraftTokens()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState14setupLookaheadE24LookaheadDecodingBuffers"><code class="docutils literal notranslate"><span class="pre">setupLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState10setupEagleEN12EagleBuffers6InputsE"><code class="docutils literal notranslate"><span class="pre">setupEagle()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState14getFinishedSumEv"><code class="docutils literal notranslate"><span class="pre">getFinishedSum()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState16getFinishReasonsEv"><code class="docutils literal notranslate"><span class="pre">getFinishReasons()</span></code></a></li>
@ -13528,6 +13523,9 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState26getNumDecodingEngineTokensE10SizeType32"><code class="docutils literal notranslate"><span class="pre">getNumDecodingEngineTokens()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState26setNumDecodingEngineTokensE10SizeType3210SizeType32"><code class="docutils literal notranslate"><span class="pre">setNumDecodingEngineTokens()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState26getSpeculativeDecodingModeEv"><code class="docutils literal notranslate"><span class="pre">getSpeculativeDecodingMode()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState29getExplicitDraftTokensBuffersEv"><code class="docutils literal notranslate"><span class="pre">getExplicitDraftTokensBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv"><code class="docutils literal notranslate"><span class="pre">getEagleBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv"><code class="docutils literal notranslate"><span class="pre">getLookaheadBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState20getBeamSearchBuffersEv"><code class="docutils literal notranslate"><span class="pre">getBeamSearchBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv"><code class="docutils literal notranslate"><span class="pre">getJointDecodingInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState22getJointDecodingOutputEv"><code class="docutils literal notranslate"><span class="pre">getJointDecodingOutput()</span></code></a></li>
@ -13708,9 +13706,9 @@ one more than decoding draft tokens for prediction from primary head </p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -126,7 +126,7 @@ class Attention(nn.Module):
weight_mode=WeightMode.FUSED_QKV_LINEAR),
quant_config=config.get_quant_config(),
skip_create_weights_in_init=config.skip_create_weights_in_init,
)
allreduce_strategy=config.allreduce_strategy)
self.o_lora = LoraLayer([LoraModuleType.ATTENTION_DENSE],
[self.hidden_size])
@ -140,7 +140,7 @@ class Attention(nn.Module):
quant_config=config.get_quant_config(),
skip_create_weights_in_init=config.skip_create_weights_in_init,
lora=self.o_lora,
)
allreduce_strategy=config.allreduce_strategy)
self.quant_config = config.get_quant_config()
self.attn_backend = config.attn_backend
@ -481,7 +481,8 @@ class MLA(nn.Module):
mapping=mapping,
tensor_parallel_mode=TensorParallelMode.COLUMN,
quant_config=quant_config,
skip_create_weights_in_init=config.skip_create_weights_in_init)
skip_create_weights_in_init=config.skip_create_weights_in_init,
allreduce_strategy=config.allreduce_strategy)
else:
self.fused_a = Linear(
hidden_size,
@ -501,7 +502,7 @@ class MLA(nn.Module):
tensor_parallel_mode=TensorParallelMode.COLUMN,
quant_config=quant_config,
skip_create_weights_in_init=config.skip_create_weights_in_init,
)
allreduce_strategy=config.allreduce_strategy)
self.q_b_proj = self.q_proj
self.kv_a_layernorm = RMSNorm(hidden_size=kv_lora_rank,
@ -517,7 +518,8 @@ class MLA(nn.Module):
mapping=mapping,
tensor_parallel_mode=TensorParallelMode.COLUMN,
quant_config=quant_config,
skip_create_weights_in_init=config.skip_create_weights_in_init)
skip_create_weights_in_init=config.skip_create_weights_in_init,
allreduce_strategy=config.allreduce_strategy)
# This parameter will view into self.kv_b_proj.weight after loading weights.
# For dummy weight initialization, this parameter is initialized with empty tensor.
# Used in forward_generation only
@ -538,7 +540,7 @@ class MLA(nn.Module):
tensor_parallel_mode=TensorParallelMode.ROW,
quant_config=quant_config,
skip_create_weights_in_init=config.skip_create_weights_in_init,
)
allreduce_strategy=config.allreduce_strategy)
def yarn_get_mscale(scale=1, mscale=1):
if scale <= 1:
@ -593,12 +595,14 @@ class MLA(nn.Module):
self.rope_fusion = self.mha.support_fused_rope()
self.support_fused_qkv = self.mha.support_fused_qkv()
self.rotary_emb = RotaryEmbedding(
pos_embd_params.rope,
head_dim=self.qk_rope_head_dim,
is_neox=pos_embd_params.is_neox,
)
self.rotary_emb = None
self.apply_rotary_emb = not self.rope_fusion
if self.apply_rotary_emb:
self.rotary_emb = RotaryEmbedding(
pos_embd_params.rope,
head_dim=self.qk_rope_head_dim,
is_neox=pos_embd_params.is_neox,
)
if not config.skip_create_weights_in_init:
self.create_weights()
@ -737,8 +741,7 @@ class MLA(nn.Module):
attn_output_context = self.forward_context(q_ctx, compressed_kv_ctx,
k_pe_ctx, attn_metadata,
latent_cache_ctx,
position_ids)
latent_cache_ctx)
else:
attn_output_context = None
@ -834,47 +837,15 @@ class MLA(nn.Module):
def forward_context_with_cached_kv(
self,
q: torch.Tensor,
compressed_kv: torch.Tensor,
k_pe: torch.Tensor,
latent_cache: torch.Tensor,
attn_metadata: AttentionMetadata,
position_ids: Optional[torch.IntTensor] = None,
) -> torch.Tensor:
assert latent_cache is not None
trtllm_attention = cast(TrtllmAttention, self.mha)
# split current q into q_nope and q_pe
q_nope, q_pe = q.view([
-1, self.num_heads, self.qk_nope_head_dim + self.qk_rope_head_dim
]).split([self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
# apply rope to current q_pe and k_pe
assert position_ids is not None
assert position_ids.dim() == 1 or (position_ids.dim() == 2
and position_ids.shape[0] == 1)
assert self.rotary_emb is not None
assert self.rotary_emb.head_dim == self.qk_rope_head_dim
assert q_pe.shape[0] == k_pe.shape[0]
q_pe = q_pe.contiguous().view(-1,
self.num_heads * self.qk_rope_head_dim)
q_pe, k_pe = self.rotary_emb(
position_ids[..., :attn_metadata.num_ctx_tokens], [q_pe, k_pe])
k_pe = k_pe.contiguous()
# build q for attention op
q_view = q.view(-1, self.num_heads,
self.qk_nope_head_dim + self.qk_rope_head_dim)
q_view[:, :,
self.qk_nope_head_dim:] = q_pe.view(-1, self.num_heads,
self.qk_rope_head_dim)
q = q_view.view(
-1,
self.num_heads * (self.qk_nope_head_dim + self.qk_rope_head_dim))
assert q.is_contiguous()
# append paged kv cache for mla
trtllm_attention.append_paged_kv_cache_for_mla(
compressed_kv,
k_pe,
attn_metadata,
)
# apply RoPE, append compressed_kv + k_pe to paged kv cache and assign q_pe to q
trtllm_attention.mla_rope_append_paged_kv_assign_q(
q, latent_cache, attn_metadata)
# copy full_compressed_kv and full_k_pe from paged kv cache
full_compressed_kv, full_k_pe = trtllm_attention.load_paged_kv_cache_for_mla(
@ -901,10 +872,10 @@ class MLA(nn.Module):
self.qk_nope_head_dim)
full_v = full_v.view(-1, self.num_heads, self.v_head_dim)
# build full_k and full_v
# build paged_full_kv
tokens_per_block = attn_metadata.kv_cache_manager.tokens_per_block
# paged kv cache should be initialized to 0 to avoid NaN
paged_full_kv = torch.zeros([
# paged_full_kv will be initialized to 0 in the kernel to avoid NaN
paged_full_kv = torch.empty([
attn_metadata.num_contexts, 2,
(attn_metadata.max_ctx_kv_len + tokens_per_block - 1) //
tokens_per_block, self.num_heads, tokens_per_block,
@ -920,6 +891,13 @@ class MLA(nn.Module):
attn_metadata,
)
# release pytorch activation memory
full_compressed_kv = None
full_k_pe = None
full_kv = None
full_k_nope = None
full_v = None
# out_scale = getattr(self.o_proj, "inv_input_scale", None)
out_scale = None # Currently we use BF16 MHA for context phase
@ -945,14 +923,13 @@ class MLA(nn.Module):
k_pe: torch.Tensor,
attn_metadata: AttentionMetadata,
latent_cache: Optional[torch.Tensor] = None,
position_ids: Optional[torch.IntTensor] = None,
) -> torch.Tensor:
if isinstance(self.mha, TrtllmAttention):
assert isinstance(attn_metadata, TrtllmAttentionMetadata)
trtllm_attention = cast(TrtllmAttention, self.mha)
if trtllm_attention.has_cached_kv_for_mla_context(attn_metadata):
return self.forward_context_with_cached_kv(
q, compressed_kv, k_pe, attn_metadata, position_ids)
q, latent_cache, attn_metadata)
return self.forward_context_default(q, compressed_kv, k_pe,
attn_metadata, latent_cache)

View File

@ -1,3 +1,4 @@
import copy
import json
import math
import os
@ -752,7 +753,7 @@ class BaseLlmArgs(BaseModel):
"""
model_config = {
"arbitrary_types_allowed": True,
"extra": "allow",
"extra": "forbid",
}
# Explicit arguments
@ -800,7 +801,9 @@ class BaseLlmArgs(BaseModel):
description="The context parallel size.")
gpus_per_node: Optional[int] = Field(
default=None, description="The number of GPUs per node.")
default=None,
description="The number of GPUs per node.",
validate_default=True)
moe_cluster_parallel_size: Optional[int] = Field(
default=None,
@ -855,7 +858,7 @@ class BaseLlmArgs(BaseModel):
# Quantization and calibration configurations
quant_config: Optional[QuantConfig] = Field(
default=None, description="Quantization config.")
default=None, description="Quantization config.", validate_default=True)
# Several options from ExecutorConfig, expanded here for less hierarchy
kv_cache_config: KvCacheConfig = Field(default_factory=KvCacheConfig,
@ -908,40 +911,35 @@ class BaseLlmArgs(BaseModel):
description="The maximum batch size.")
# generation constraints
max_input_len: int = Field(default=1024,
description="The maximum input length.")
max_input_len: Optional[int] = Field(
default=None, description="The maximum input length.")
max_seq_len: Optional[int] = Field(
default=None, description="The maximum sequence length.")
max_beam_width: int = Field(default=1,
description="The maximum beam width.")
max_beam_width: Optional[int] = Field(default=None,
description="The maximum beam width.")
max_num_tokens: Optional[int] = Field(
default=None, description="The maximum number of tokens.")
backend: Optional[str] = Field(default=None,
description="The backend to use.",
exclude=True)
gather_generation_logits: bool = Field(
default=False, description="Gather generation logits.")
# private fields those are unstable and just for internal use
num_postprocess_workers: int = Field(
default=0,
description="The number of postprocess worker processes.",
alias="_num_postprocess_workers")
description=
"The number of processes used for postprocessing the generated tokens, including detokenization."
)
postprocess_tokenizer_dir: Optional[str] = Field(
default=None,
description="The postprocess tokenizer directory.",
alias="_postprocess_tokenizer_dir")
description="The path to the tokenizer directory for postprocessing.")
reasoning_parser: Optional[str] = Field(
default=None,
description="The parser to separate reasoning content from output.",
alias="_reasoning_parser")
description="The parser to separate reasoning content from output.")
# TODO[Superjomn]: To deprecate this config.
decoding_config: Optional[object] = Field(
@ -955,51 +953,37 @@ class BaseLlmArgs(BaseModel):
default=None,
description="The optional MPI session to use for this LLM instance.",
json_schema_extra={"type": "Optional[MpiSession]"},
exclude=True, # exclude from serialization
exclude=True,
alias="_mpi_session")
@print_traceback_on_error
def model_post_init(self, __context: Any):
backend: Optional[str] = Field(
default=None,
description="The backend to use for this LLM instance.",
exclude_json_schema=True, # hide from API references
validate_default=True,
)
if self.skip_tokenizer_init:
self.tokenizer = None
else:
self.tokenizer = tokenizer_factory(
self.tokenizer,
trust_remote_code=self.trust_remote_code,
use_fast=self.tokenizer_mode != 'slow')
_parallel_config: Optional[object] = PrivateAttr(default=None)
_model_format: Optional[_ModelFormatKind] = PrivateAttr(default=None)
_speculative_model: Optional[str] = PrivateAttr(default=None)
_speculative_model_format: Optional[_ModelFormatKind] = PrivateAttr(
default=None)
if torch.cuda.get_device_properties(0).major < 8:
if self.dtype == 'auto':
self.dtype = 'float16'
if self.dtype == 'bfloat16':
raise RuntimeError("Pre SM 80 GPUs do not support bfloat16")
@property
def parallel_config(self) -> _ParallelConfig:
return self._parallel_config
if self.gpus_per_node is None:
logger.warning(
f"Using default gpus_per_node: {torch.cuda.device_count()}")
self.gpus_per_node = torch.cuda.device_count()
assert self.gpus_per_node is not None
@property
def model_format(self) -> _ModelFormatKind:
return self._model_format
if self.moe_cluster_parallel_size is None:
self.moe_cluster_parallel_size = -1
@property
def speculative_model(self) -> Optional[_ModelFormatKind]:
return self._speculative_model
if self.moe_tensor_parallel_size is None:
self.moe_tensor_parallel_size = -1
if self.moe_expert_parallel_size is None:
self.moe_expert_parallel_size = -1
self.parallel_config = _ParallelConfig(
tp_size=self.tensor_parallel_size,
pp_size=self.pipeline_parallel_size,
cp_size=self.context_parallel_size,
gpus_per_node=self.gpus_per_node,
moe_cluster_size=self.moe_cluster_parallel_size,
moe_tp_size=self.moe_tensor_parallel_size,
moe_ep_size=self.moe_expert_parallel_size,
enable_attention_dp=self.enable_attention_dp,
cp_config=self.cp_config)
@property
def speculative_model_format(self) -> _ModelFormatKind:
return self._speculative_model_format
@classmethod
def from_kwargs(cls, **kwargs: Any) -> "BaseLlmArgs":
@ -1011,9 +995,9 @@ class BaseLlmArgs(BaseModel):
Returns:
tensorrt_llm.llmapi.llm_utils.BaseLlmArgs: The `BaseLlmArgs` instance.
"""
kwargs = BaseLlmArgs._maybe_update_config_for_consistency(dict(kwargs))
kwargs = BaseLlmArgs._check_consistency(dict(kwargs))
ret = cls(**kwargs)
ret._setup()
return ret
def to_dict(self) -> dict:
@ -1022,11 +1006,13 @@ class BaseLlmArgs(BaseModel):
Returns:
dict: The dict that contains all fields of the `LlmArgs` instance.
"""
return self.model_dump()
model_dict = self.model_dump(mode='json')
# TODO: the BuildConfig.to_dict and from_dict don't work well with pydantic
model_dict['build_config'] = copy.deepcopy(self.build_config)
return model_dict
@staticmethod
def _maybe_update_config_for_consistency(
kwargs_dict: Dict[str, Any]) -> Dict[str, Any]:
def _check_consistency(kwargs_dict: Dict[str, Any]) -> Dict[str, Any]:
# max_beam_width is not included since vague behavior due to lacking the support for dynamic beam width during
# generation
black_list = set(["max_beam_width"])
@ -1041,57 +1027,99 @@ class BaseLlmArgs(BaseModel):
llm_args_attr
), f"New options found in underlying ExecutorConfig: {llm_args_attr - executor_config_attrs}"
# ensure build_config and LlmArgsBase consistency
if kwargs_dict.get("backend") != "pytorch" and kwargs_dict.get(
"build_config"):
# TODO: move this to _perform_config_arbitration() once it's default-on.
for field_name in [
"max_input_len", "max_seq_len", "max_beam_width"
]:
build_val = getattr(kwargs_dict["build_config"], field_name,
None)
llmargs_val = kwargs_dict.get(
field_name) or BaseLlmArgs.model_fields[field_name]
if build_val != llmargs_val:
logger.warning(
f"Overriding LlmArgsBase.{field_name} ({llmargs_val}) with build_config.{field_name} ({build_val})."
)
kwargs_dict[field_name] = build_val
return kwargs_dict
def _setup(self):
''' This method will setup the configs right before building the model. '''
@field_validator("dtype")
@classmethod
def validate_dtype(cls, v, info):
if torch.cuda.get_device_properties(0).major < 8:
if v == 'auto':
v = 'float16'
if v == 'bfloat16':
raise RuntimeError("Pre SM 80 GPUs do not support bfloat16")
return v
is_trt_llm_args = isinstance(self, TrtLlmArgs)
@field_validator("quant_config", mode='before')
@classmethod
def validate_quant_config(cls, v, info):
if v is None:
v = QuantConfig()
return v
assert isinstance(self.model,
(str, Path)), f"Invalid model: {self.model}"
@field_validator("gpus_per_node", mode='before')
@classmethod
def validate_gpus_per_node(cls, v, info):
if v is None:
logger.warning(
f"Using default gpus_per_node: {torch.cuda.device_count()}")
v = torch.cuda.device_count()
return v
if is_trt_llm_args:
self._setup_embedding_parallel_mode()
@field_validator("model")
@classmethod
def validate_model(cls, v, info):
if not isinstance(v, (str, Path)):
raise ValueError(f"Invalid model: {v}")
return v
if is_trt_llm_args and self.enable_build_cache:
self.enable_build_cache = BuildCacheConfig() if isinstance(
self.enable_build_cache, bool) else self.enable_build_cache
if not isinstance(self.enable_build_cache, BuildCacheConfig):
raise ValueError(
f"Invalid build_cache_config: {self.enable_build_cache}")
@model_validator(mode="after")
def validate_parallel_config(self):
if self.moe_cluster_parallel_size is None:
self.moe_cluster_parallel_size = -1
if self.moe_tensor_parallel_size is None:
self.moe_tensor_parallel_size = -1
if self.moe_expert_parallel_size is None:
self.moe_expert_parallel_size = -1
self._parallel_config = _ParallelConfig(
tp_size=self.tensor_parallel_size,
pp_size=self.pipeline_parallel_size,
cp_size=self.context_parallel_size,
gpus_per_node=self.gpus_per_node,
moe_cluster_size=self.moe_cluster_parallel_size,
moe_tp_size=self.moe_tensor_parallel_size,
moe_ep_size=self.moe_expert_parallel_size,
enable_attention_dp=self.enable_attention_dp,
cp_config=self.cp_config)
return self
@model_validator(mode="after")
def set_default_max_input_len(self):
if self.max_input_len is None:
self.max_input_len = 1024
return self
@model_validator(mode="after")
def validate_and_init_tokenizer(self):
"""Initialize tokenizer based on configuration."""
if self.skip_tokenizer_init:
self.tokenizer = None
else:
self.tokenizer = tokenizer_factory(
self.tokenizer,
trust_remote_code=self.trust_remote_code,
use_fast=self.tokenizer_mode != 'slow')
return self
@model_validator(mode="after")
def validate_model_format_misc(self):
'''
Load the model format, and do the following:
1. Load the build_config if got an engine.
2. Load the parallel_config if got a checkpoint.
'''
model_obj = _ModelWrapper(self.model)
self.speculative_model = getattr(self.speculative_config,
"speculative_model", None)
speculative_model_obj = _ModelWrapper(
self.speculative_model
) if self.speculative_model is not None else None
if model_obj.is_local_model and self.backend not in [
'pytorch', '_autodeploy'
]:
# Load parallel_config from the engine.
self.model_format = get_model_format(self.model)
model_format = get_model_format(self.model)
if self.model_format is _ModelFormatKind.TLLM_ENGINE:
if model_format is _ModelFormatKind.TLLM_ENGINE:
if self.build_config is not None:
logger.warning(
"The build_config is ignored for model format of TLLM_ENGINE."
@ -1103,65 +1131,128 @@ class BaseLlmArgs(BaseModel):
runtime_defaults)
# Load parallel_config from the checkpoint.
elif self.model_format is _ModelFormatKind.TLLM_CKPT:
elif model_format is _ModelFormatKind.TLLM_CKPT:
# We need to create a temporary instance to call _load_config_from_ckpt
self._load_config_from_ckpt(model_obj.model_dir)
else:
self.model_format = _ModelFormatKind.HF
model_format = _ModelFormatKind.HF
if self.speculative_model and speculative_model_obj.is_local_model:
self.speculative_model_format = _ModelFormatKind.HF
# Store the model format in the values
self._model_format = model_format
return self
self.quant_config = self.quant_config or QuantConfig()
@model_validator(mode="after")
def init_build_config(self):
"""
Creating a default BuildConfig if none is provided
"""
if self.build_config is None:
kwargs = {}
if self.max_batch_size:
kwargs["max_batch_size"] = self.max_batch_size
if self.max_num_tokens:
kwargs["max_num_tokens"] = self.max_num_tokens
if self.max_seq_len:
kwargs["max_seq_len"] = self.max_seq_len
if self.max_beam_width:
kwargs["max_beam_width"] = self.max_beam_width
if self.max_input_len:
kwargs["max_input_len"] = self.max_input_len
self.build_config = BuildConfig(**kwargs)
if is_trt_llm_args:
self.calib_config = self.calib_config or CalibConfig()
assert isinstance(
self.build_config, BuildConfig
), f"build_config is not initialized: {self.build_config}"
return self
@model_validator(mode="after")
def set_runtime_knobs_from_build_config(self):
# TODO: remove this after PyT become default to adapt PyT with build_config as input
assert self.build_config is not None, "build_config is not initialized"
if self.backend == "pytorch":
if self.build_config:
for key in [
"max_batch_size", "max_num_tokens", "max_seq_len",
"max_input_len", "max_beam_width"
]:
if getattr(self.build_config, key) is not None:
if (v := getattr(self, key,
None)) is not None and v != getattr(
self.build_config, key):
logger.warning(
f"overriding {key} from build_config")
setattr(self, key, getattr(self.build_config, key))
return self
@model_validator(mode="after")
def validate_build_config_with_runtime_params(self):
# Note: max_batch_size and max_num_tokens in LlmArgs are for runtime,
# which will be passed to the C++ Executor API, overwriting the values
# from an built engine. In order to set build configuration, it is
# recommended to use build_config instead.
if self.build_config is not None:
if self.max_batch_size and self.build_config.max_batch_size != self.max_batch_size:
assert isinstance(
self.build_config, BuildConfig
), f"build_config is not initialized: {self.build_config}"
if self.max_batch_size is not None:
if self.max_batch_size > self.build_config.max_batch_size:
raise ValueError(
f"max_batch_size [{self.max_batch_size}] is greater than build_config.max_batch_size [{self.build_config.max_batch_size}] in build_config"
)
if self.max_num_tokens is not None:
if self.max_num_tokens > self.build_config.max_num_tokens:
raise ValueError(
f"max_num_tokens [{self.max_num_tokens}] is greater than build_config.max_num_tokens [{self.build_config.max_num_tokens}] in build_config"
)
if self.max_seq_len is not None:
if self.max_seq_len != self.build_config.max_seq_len:
logger.warning(
f"Conflict detected in LlmArgs build_config.max_batch_size "
f"({self.build_config.max_batch_size}) != max_batch_size ({self.max_batch_size})."
f"The 'max_batch_size' specified in LlmArgs is ignored at "
f"engine build and will override at runtime.")
if self.max_num_tokens and self.build_config.max_num_tokens != self.max_num_tokens:
f"max_seq_len [{self.max_seq_len}] is overridden by build_config.max_seq_len [{self.build_config.max_seq_len}] in build_config"
)
if self.max_beam_width is not None:
if self.max_beam_width != self.build_config.max_beam_width:
logger.warning(
f"Conflict detected in LlmArgs build_config.max_num_tokens "
f"({self.build_config.max_num_tokens}) != max_batch_size ({self.max_num_tokens})."
f"The 'max_num_tokens' specified in LlmArgs is ignored at "
f"engine build and will override at runtime.")
else:
self.build_config = BuildConfig()
if self.max_batch_size:
self.build_config.max_batch_size = self.max_batch_size
if self.max_num_tokens:
self.build_config.max_num_tokens = self.max_num_tokens
f"max_beam_width [{self.max_beam_width}] is overridden by build_config.max_beam_width [{self.build_config.max_beam_width}] in build_config"
)
if self.max_input_len is not None:
if self.max_input_len != self.build_config.max_input_len:
logger.warning(
f"max_input_len [{self.max_input_len}] is overridden by build_config.max_input_len [{self.build_config.max_input_len}] in build_config"
)
return self
@model_validator(mode="after")
def validate_build_config_remaining(self):
is_trt_llm_args = isinstance(self, TrtLlmArgs)
# TODO: remove the checker when manage weights support all data types
if is_trt_llm_args and self.fast_build and (
self.quant_config.quant_algo is QuantAlgo.FP8
or self.quant_config.quant_algo is None):
if is_trt_llm_args and self.fast_build and (self.quant_config.quant_algo
is QuantAlgo.FP8):
self._update_plugin_config("manage_weights", True)
if self.parallel_config._world_size == 1:
if self.parallel_config._world_size == 1 and self.build_config:
self.build_config.plugin_config.nccl_plugin = None
self._ensure_lora_config_consistency()
if self.enable_lora and self.lora_config is None and self.backend != 'pytorch':
self.build_config.plugin_config.lora_plugin = 'auto'
if self.max_lora_rank is not None:
self.build_config.lora_config.max_lora_rank = self.max_lora_rank
self._setup_speculative_config()
if self.enable_prompt_adapter:
self.build_config.max_prompt_embedding_table_size = self.max_prompt_adapter_token * self.build_config.max_batch_size
def _setup_speculative_config(self):
if self.max_beam_width is None:
if self.build_config:
self.max_beam_width = self.build_config.max_beam_width
else:
self.max_beam_width = 1
return self
@model_validator(mode="after")
def validate_speculative_config(self):
if self.speculative_config:
if isinstance(self.speculative_config, LookaheadDecodingConfig):
lookahead_config = self.speculative_config
@ -1240,7 +1331,18 @@ class BaseLlmArgs(BaseModel):
else:
self.decoding_config = None
def _ensure_lora_config_consistency(self):
self._speculative_model = getattr(self.speculative_config,
"speculative_model", None)
speculative_model_obj = _ModelWrapper(
self._speculative_model
) if self._speculative_model is not None else None
if self._speculative_model and speculative_model_obj.is_local_model:
self._speculative_model_format = _ModelFormatKind.HF
return self
@model_validator(mode="after")
def validate_lora_config_consistency(self):
if self.lora_config:
if self.max_lora_rank is not None:
logger.warning(
@ -1276,10 +1378,7 @@ class BaseLlmArgs(BaseModel):
)
self.lora_config.lora_target_modules = list(
default_trtllm_modules_to_hf_modules.keys())
@property
def _build_config_mutable(self) -> bool:
return self.model_format is not _ModelFormatKind.TLLM_ENGINE
return self
def _update_plugin_config(self, key: str, value: Any):
setattr(self.build_config.plugin_config, key, value)
@ -1303,7 +1402,7 @@ class BaseLlmArgs(BaseModel):
raise ValueError(
f"cp_size {self.parallel_config.cp_size} is not consistent with the engine's cp_size {mapping.cp_size}"
)
self.parallel_config = _ParallelConfig(
self._parallel_config = _ParallelConfig(
tp_size=mapping.tp_size,
pp_size=mapping.pp_size,
cp_size=mapping.cp_size,
@ -1342,7 +1441,7 @@ class BaseLlmArgs(BaseModel):
f"auto parallel with world_size {self.parallel_config.world_size} does not support checkpoint with "
"world_size {world_size} > 1")
if not self.parallel_config.auto_parallel:
self.parallel_config = _ParallelConfig(
self._parallel_config = _ParallelConfig(
tp_size=tp_size,
pp_size=pp_size,
cp_size=cp_size,
@ -1351,20 +1450,6 @@ class BaseLlmArgs(BaseModel):
moe_tp_size=moe_tp_size,
moe_ep_size=moe_ep_size)
def _setup_embedding_parallel_mode(self):
if self.embedding_parallel_mode == 'NONE':
self._convert_checkpoint_options['use_parallel_embedding'] = False
elif self.embedding_parallel_mode == 'SHARDING_ALONG_VOCAB':
self._convert_checkpoint_options['use_parallel_embedding'] = True
self._convert_checkpoint_options['embedding_sharding_dim'] = 0
elif self.embedding_parallel_mode == 'SHARDING_ALONG_HIDDEN':
self._convert_checkpoint_options['use_parallel_embedding'] = True
self._convert_checkpoint_options['embedding_sharding_dim'] = 1
else:
raise ValueError(
f"Invalid embedding_parallel_mode: {self.llm_args.embedding_parallel_mode}"
)
class TrtLlmArgs(BaseLlmArgs):
@ -1385,12 +1470,6 @@ class TrtLlmArgs(BaseLlmArgs):
enable_tqdm: bool = Field(default=False,
description="Enable tqdm for progress bar.")
# BuildConfig is introduced to give users a familiar interface to configure the model building.
build_config: Optional[object] = Field(
default=None,
description="Build config.",
json_schema_extra={"type": f"Optional[{get_type_repr(BuildConfig)}]"})
workspace: Optional[str] = Field(default=None,
description="The workspace for the model.")
@ -1407,7 +1486,7 @@ class TrtLlmArgs(BaseLlmArgs):
default=None, description="Extended runtime perf knob config.")
calib_config: Optional[CalibConfig] = Field(
default=None, description="Calibration config.")
default=None, description="Calibration config.", validate_default=True)
embedding_parallel_mode: str = Field(
default='SHARDING_ALONG_VOCAB',
@ -1415,6 +1494,12 @@ class TrtLlmArgs(BaseLlmArgs):
fast_build: bool = Field(default=False, description="Enable fast build.")
# BuildConfig is introduced to give users a familiar interface to configure the model building.
build_config: Optional[object] = Field(
default=None,
description="Build config.",
json_schema_extra={"type": f"Optional[{get_type_repr(BuildConfig)}]"})
# Private attributes
_auto_parallel_config: Optional[AutoParallelConfig] = PrivateAttr(
default=None)
@ -1426,10 +1511,28 @@ class TrtLlmArgs(BaseLlmArgs):
def auto_parallel_config(self) -> AutoParallelConfig:
return self._auto_parallel_config
@print_traceback_on_error
def model_post_init(self, __context):
super().model_post_init(__context)
@field_validator('calib_config', mode='before')
@classmethod
def init_calib_config(cls, v):
if v is None:
return CalibConfig()
return v
@model_validator(mode="after")
def setup_embedding_parallel_mode(self):
if self.embedding_parallel_mode == 'NONE':
self._convert_checkpoint_options['use_parallel_embedding'] = False
elif self.embedding_parallel_mode == 'SHARDING_ALONG_VOCAB':
self._convert_checkpoint_options['use_parallel_embedding'] = True
self._convert_checkpoint_options['embedding_sharding_dim'] = 0
elif self.embedding_parallel_mode == 'SHARDING_ALONG_HIDDEN':
self._convert_checkpoint_options['use_parallel_embedding'] = True
self._convert_checkpoint_options['embedding_sharding_dim'] = 1
# No else clause needed since validation already happened
return self
@model_validator(mode="after")
def validate_auto_parallel(self):
self._auto_parallel_config = AutoParallelConfig(
sharded_io_allowlist=[
"past_key_value_\\d+",
@ -1446,6 +1549,19 @@ class TrtLlmArgs(BaseLlmArgs):
if self.parallel_config.auto_parallel:
self.parallel_config.world_size = self.auto_parallel_world_size
return self
@model_validator(mode="after")
def validate_enable_build_cache(self):
if not self.enable_build_cache:
return self
self.enable_build_cache = BuildCacheConfig() if isinstance(
self.enable_build_cache, bool) else self.enable_build_cache
if not isinstance(self.enable_build_cache, BuildCacheConfig):
raise ValueError(
f"Invalid build_cache_config: {self.enable_build_cache}")
return self
LlmArgs = TrtLlmArgs
@ -1459,6 +1575,27 @@ class LoadFormat(Enum):
DUMMY = 1
class TorchCompileConfig(BaseModel):
"""
Configuration for torch.compile.
"""
torch_compile_fullgraph: bool = Field(
default=True,
description="Enable full graph compilation in torch.compile.")
torch_compile_inductor_enabled: bool = Field(
default=False, description="Enable inductor backend in torch.compile.")
torch_compile_piecewise_cuda_graph: bool = Field(
default=False,
description="Enable piecewise CUDA graph in torch.compile.")
torch_compile_enable_userbuffers: bool = Field(
default=True,
description=
"When torch compile is enabled, userbuffers is enabled by default.")
class TorchLlmArgs(BaseLlmArgs):
# Just a dummy BuildConfig to allow code reuse with the TrtLlmArgs
@ -1524,9 +1661,6 @@ class TorchLlmArgs(BaseLlmArgs):
kv_cache_dtype: str = Field(default="auto",
description="Data type for KV cache.")
use_kv_cache: bool = Field(default=True,
description="Whether to use KV cache.")
enable_iter_perf_stats: bool = Field(
default=False, description="Enable iteration performance statistics.")
@ -1539,24 +1673,8 @@ class TorchLlmArgs(BaseLlmArgs):
print_iter_log: bool = Field(default=False,
description="Print iteration logs.")
torch_compile_enabled: bool = Field(
default=False, description="Enable torch.compile optimization.")
torch_compile_fullgraph: bool = Field(
default=True,
description="Enable full graph compilation in torch.compile.")
torch_compile_inductor_enabled: bool = Field(
default=False, description="Enable inductor backend in torch.compile.")
torch_compile_piecewise_cuda_graph: bool = Field(
default=False,
description="Enable piecewise CUDA graph in torch.compile.")
torch_compile_enable_userbuffers: bool = Field(
default=True,
description=
"When torch compile is enabled, userbuffers is enabled by default.")
torch_compile_config: Optional[TorchCompileConfig] = Field(
default=None, description="Torch compile config.")
autotuner_enabled: bool = Field(
default=True,
@ -1565,12 +1683,6 @@ class TorchLlmArgs(BaseLlmArgs):
enable_layerwise_nvtx_marker: bool = Field(
default=False, description="If true, enable layerwise nvtx marker.")
auto_deploy_config: Optional[object] = Field(
default=None,
description="Auto deploy config.",
exclude_from_json=True,
json_schema_extra={"type": f"Optional[AutoDeployConfig]"})
load_format: Union[str, LoadFormat] = Field(
default=LoadFormat.AUTO,
description=
@ -1583,6 +1695,13 @@ class TorchLlmArgs(BaseLlmArgs):
"If true, enable min-latency mode. Currently only used for Llama4.",
)
# TODO: remove backend later
@field_validator('backend', mode='before')
def init_backend(cls, v):
if v is None:
return 'pytorch'
return v
@field_validator('load_format', mode='before')
@classmethod
def convert_load_format(cls, v):
@ -1609,13 +1728,9 @@ class TorchLlmArgs(BaseLlmArgs):
def extra_resource_managers(self, value: Dict[str, object]) -> None:
self._extra_resource_managers = value
@print_traceback_on_error
def model_post_init(self, __context):
@model_validator(mode="after")
def validate_moe_load_balancer(self):
from .._torch.model_config import MoeLoadBalancerConfig
super().model_post_init(__context)
self.model_format = _ModelFormatKind.HF
if isinstance(self.moe_load_balancer, str):
if not os.path.exists(self.moe_load_balancer):
raise FileNotFoundError(
@ -1630,6 +1745,7 @@ class TorchLlmArgs(BaseLlmArgs):
raise ValueError(
f"Failed to load MoE load balancer config file: {self.moe_load_balancer}"
) from e
return self
# TODO: Remove this after the PyTorch backend is fully migrated to TorchLlmArgs from ExecutorConfig
def get_pytorch_backend_config(self) -> "PyTorchConfig":
@ -1649,17 +1765,22 @@ class TorchLlmArgs(BaseLlmArgs):
mixed_sampler=self.mixed_sampler,
enable_trtllm_sampler=self.enable_trtllm_sampler,
kv_cache_dtype=self.kv_cache_dtype,
use_kv_cache=self.use_kv_cache,
enable_iter_perf_stats=self.enable_iter_perf_stats,
enable_iter_req_stats=self.enable_iter_req_stats,
print_iter_log=self.print_iter_log,
torch_compile_enabled=self.torch_compile_enabled,
torch_compile_fullgraph=self.torch_compile_fullgraph,
torch_compile_inductor_enabled=self.torch_compile_inductor_enabled,
torch_compile_piecewise_cuda_graph=self.
torch_compile_piecewise_cuda_graph,
torch_compile_enable_userbuffers=self.
torch_compile_enable_userbuffers,
torch_compile_enabled=bool(self.torch_compile_config is not None),
torch_compile_fullgraph=self.torch_compile_config.
torch_compile_fullgraph
if self.torch_compile_config is not None else True,
torch_compile_inductor_enabled=self.torch_compile_config.
torch_compile_inductor_enabled
if self.torch_compile_config is not None else False,
torch_compile_piecewise_cuda_graph=self.torch_compile_config.
torch_compile_piecewise_cuda_graph
if self.torch_compile_config is not None else False,
torch_compile_enable_userbuffers=self.torch_compile_config.
torch_compile_enable_userbuffers
if self.torch_compile_config is not None else True,
autotuner_enabled=self.autotuner_enabled,
enable_layerwise_nvtx_marker=self.enable_layerwise_nvtx_marker,
load_format=self.load_format,
@ -1673,38 +1794,6 @@ class TorchLlmArgs(BaseLlmArgs):
raise ValueError("cuda_graph_max_batch_size must be non-negative")
return v
@staticmethod
def _generate_cuda_graph_batch_sizes(max_batch_size: int,
padding_enabled: bool) -> List[int]:
"""Generate a list of batch sizes for CUDA graphs.
Args:
max_batch_size: Maximum batch size to generate up to
padding_enabled: Whether padding is enabled, which affects the batch size distribution
Returns:
List of batch sizes to create CUDA graphs for
"""
if padding_enabled:
batch_sizes = [1, 2, 4] + [i * 8 for i in range(1, 17)]
else:
batch_sizes = list(range(1, 32)) + [32, 64, 128]
# Add powers of 2 up to max_batch_size
batch_sizes += [
2**i for i in range(8, math.floor(math.log(max_batch_size, 2)))
]
# Filter and sort batch sizes
batch_sizes = sorted(
[size for size in batch_sizes if size <= max_batch_size])
# Add max_batch_size if not already included
if max_batch_size != batch_sizes[-1]:
batch_sizes.append(max_batch_size)
return batch_sizes
@model_validator(mode='after')
def validate_cuda_graph_config(self) -> 'TorchLlmArgs':
"""Validate CUDA graph configuration.
@ -1738,6 +1827,38 @@ class TorchLlmArgs(BaseLlmArgs):
return self
@staticmethod
def _generate_cuda_graph_batch_sizes(max_batch_size: int,
padding_enabled: bool) -> List[int]:
"""Generate a list of batch sizes for CUDA graphs.
Args:
max_batch_size: Maximum batch size to generate up to
padding_enabled: Whether padding is enabled, which affects the batch size distribution
Returns:
List of batch sizes to create CUDA graphs for
"""
if padding_enabled:
batch_sizes = [1, 2, 4] + [i * 8 for i in range(1, 17)]
else:
batch_sizes = list(range(1, 32)) + [32, 64, 128]
# Add powers of 2 up to max_batch_size
batch_sizes += [
2**i for i in range(8, math.floor(math.log(max_batch_size, 2)))
]
# Filter and sort batch sizes
batch_sizes = sorted(
[size for size in batch_sizes if size <= max_batch_size])
# Add max_batch_size if not already included
if max_batch_size != batch_sizes[-1]:
batch_sizes.append(max_batch_size)
return batch_sizes
class _AutoDeployLlmArgs(TorchLlmArgs):
"""LLM arguments specifically for AutoDeploy backend.
@ -1797,6 +1918,12 @@ class _AutoDeployLlmArgs(TorchLlmArgs):
"properly passed through.",
)
checkpoint_device: Optional[str] = Field(
default=None,
description="Device on which to load the model checkpoint. "
"Defaults to the same device as the rest of the pipeline.",
)
@field_validator("free_mem_ratio")
@classmethod
def validate_free_mem_ratio(cls, v):
@ -1847,7 +1974,6 @@ def update_llm_args_with_extra_dict(
llm_args_dict: Dict,
extra_llm_api_options: Optional[str] = None) -> Dict:
from .._torch.pyexecutor.config import PyTorchConfig
field_mapping = {
"quant_config": QuantConfig,
"calib_config": CalibConfig,
@ -1860,18 +1986,19 @@ def update_llm_args_with_extra_dict(
"speculative_config": DecodingBaseConfig,
"batching_type": BatchingType,
"extended_runtime_perf_knob_config": ExtendedRuntimePerfKnobConfig,
"pytorch_backend_config": PyTorchConfig,
"cache_transceiver_config": CacheTransceiverConfig,
"lora_config": LoraConfig,
}
for field, field_type in field_mapping.items():
if field in llm_args_dict:
if field == "speculative_config":
llm_args_dict[field] = field_type.from_dict(
llm_args_dict[field])
for field_name, field_type in field_mapping.items():
if field_name in llm_args_dict:
if field_name == "speculative_config":
llm_args_dict[field_name] = field_type.from_dict(
llm_args_dict[field_name])
else:
llm_args_dict[field] = field_type(**llm_args_dict[field])
llm_args_dict[field_name] = field_type(
**llm_args_dict[field_name])
extra_llm_str = f"because it's specified in {extra_llm_api_options}" if extra_llm_api_options else ""
logger.warning(f"Overriding {field} {extra_llm_str}")
logger.warning(f"Overriding {field_name} {extra_llm_str}")
llm_args = llm_args | llm_args_dict
return llm_args

View File

@ -31,6 +31,7 @@ class RootArgs(BaseModel):
task_id: int
std_out: bool
rand_task_id: Optional[Tuple[int, int]]
lora_dir: Optional[str] = None
@field_validator('tokenizer')
def get_tokenizer(cls,
@ -73,6 +74,10 @@ class RootArgs(BaseModel):
default=None,
nargs=2,
help="Random LoRA Tasks")
@click.option("--lora-dir",
type=str,
default=None,
help="Directory containing LoRA adapters")
@click.option("--log-level",
default="info",
type=click.Choice(['info', 'debug']),
@ -92,7 +97,8 @@ def cli(ctx, **kwargs):
std_out=kwargs['stdout'],
random_seed=kwargs['random_seed'],
task_id=kwargs['task_id'],
rand_task_id=kwargs['rand_task_id'])
rand_task_id=kwargs['rand_task_id'],
lora_dir=kwargs['lora_dir'])
cli.add_command(dataset)

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -687,9 +688,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -558,6 +559,9 @@
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">obj</span><span class="p">,</span> <span class="n">KVCacheType</span><span class="p">):</span>
<span class="c1"># For KVCacheType, convert it to string by split of &#39;KVCacheType.PAGED&#39;.</span>
<span class="k">return</span> <span class="n">obj</span><span class="o">.</span><span class="fm">__str__</span><span class="p">()</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s1">&#39;.&#39;</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="k">elif</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">obj</span><span class="p">,</span> <span class="s1">&#39;model_dump&#39;</span><span class="p">):</span>
<span class="c1"># Handle Pydantic models (including DecodingBaseConfig and subclasses)</span>
<span class="k">return</span> <span class="n">obj</span><span class="o">.</span><span class="n">model_dump</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s1">&#39;json&#39;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">default</span><span class="p">(</span><span class="n">obj</span><span class="p">)</span>
@ -1993,9 +1997,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -517,8 +518,7 @@
<a class="viewcode-back" href="../../llm-api/reference.html#tensorrt_llm.llmapi.DisaggregatedParams">[docs]</a>
<span class="nd">@dataclass</span><span class="p">(</span><span class="n">slots</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">kw_only</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">DisaggregatedParams</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Disaggregated seving parameters</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Disaggregated seving parameters.</span>
<span class="sd"> Args:</span>
<span class="sd"> request_type (str): The type of request (&quot;context_only&quot; or &quot;generation_only&quot;)</span>
@ -536,10 +536,9 @@
<div class="viewcode-block" id="DisaggregatedParams.get_context_phase_params">
<a class="viewcode-back" href="../../llm-api/reference.html#tensorrt_llm.llmapi.DisaggregatedParams.get_context_phase_params">[docs]</a>
<span class="k">def</span><span class="w"> </span><span class="nf">get_context_phase_params</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">tllme</span><span class="o">.</span><span class="n">ContextPhaseParams</span><span class="p">:</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">ContextPhaseParams</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">first_gen_tokens</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">ctx_request_id</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">opaque_state</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">draft_tokens</span><span class="p">)</span></div>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">ContextPhaseParams</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">first_gen_tokens</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">ctx_request_id</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">opaque_state</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">draft_tokens</span>
<span class="p">)</span></div>
<div class="viewcode-block" id="DisaggregatedParams.get_request_type">
@ -553,7 +552,8 @@
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">RequestType</span><span class="o">.</span><span class="n">REQUEST_TYPE_CONTEXT_AND_GENERATION</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Unknown request type: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">request_type</span><span class="si">}</span><span class="s2">. Must be context_only, generation_only or context_and_generation&quot;</span>
<span class="sa">f</span><span class="s2">&quot;Unknown request type: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">request_type</span><span class="si">}</span><span class="s2">. Must be context_only, generation_only or &quot;</span>
<span class="s2">&quot;context_and_generation&quot;</span>
<span class="p">)</span></div>
</div>
@ -668,9 +668,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -568,6 +569,11 @@
<span class="bp">self</span><span class="o">.</span><span class="n">_response</span> <span class="o">=</span> <span class="n">response</span>
<span class="bp">self</span><span class="o">.</span><span class="n">logprobs</span> <span class="o">=</span> <span class="n">logprobs</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_is_llm_response</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">response</span> <span class="o">=</span> <span class="nb">object</span><span class="o">.</span><span class="fm">__getattribute__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s1">&#39;_response&#39;</span><span class="p">)</span>
<span class="k">return</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">response</span><span class="p">,</span> <span class="n">tllm</span><span class="o">.</span><span class="n">Response</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__getattr__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">):</span>
<span class="n">response</span> <span class="o">=</span> <span class="nb">object</span><span class="o">.</span><span class="fm">__getattribute__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s1">&#39;_response&#39;</span><span class="p">)</span>
<span class="k">return</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">response</span><span class="p">,</span> <span class="n">name</span><span class="p">)</span>
@ -1269,9 +1275,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -656,11 +657,8 @@
<span class="k">def</span><span class="w"> </span><span class="nf">is_llm_response</span><span class="p">(</span><span class="n">instance</span><span class="p">):</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm._torch.pyexecutor.llm_request</span><span class="w"> </span><span class="kn">import</span> \
<span class="n">LlmResponse</span> <span class="k">as</span> <span class="n">PyLlmResponse</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.result</span><span class="w"> </span><span class="kn">import</span> <span class="n">ResponseWrapper</span>
<span class="k">return</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">instance</span><span class="p">,</span> <span class="p">(</span><span class="n">Response</span><span class="p">,</span> <span class="n">PyLlmResponse</span><span class="p">,</span> <span class="n">ResponseWrapper</span><span class="p">))</span>
<span class="k">return</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">instance</span><span class="p">,</span> <span class="n">Response</span><span class="p">)</span> <span class="ow">or</span> \
<span class="p">(</span><span class="nb">hasattr</span><span class="p">(</span><span class="n">instance</span><span class="p">,</span> <span class="s1">&#39;_is_llm_response&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">instance</span><span class="o">.</span><span class="n">_is_llm_response</span><span class="p">)</span>
</pre></div>
</article>
@ -772,9 +770,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -529,6 +530,7 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">typing</span><span class="w"> </span><span class="kn">import</span> <span class="n">List</span><span class="p">,</span> <span class="n">Optional</span><span class="p">,</span> <span class="n">Sequence</span><span class="p">,</span> <span class="n">Tuple</span><span class="p">,</span> <span class="n">Union</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">numpy</span><span class="w"> </span><span class="k">as</span><span class="w"> </span><span class="nn">np</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">torch</span>
<span class="c1"># isort: off</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">tensorrt</span><span class="w"> </span><span class="k">as</span><span class="w"> </span><span class="nn">trt</span>
@ -4732,7 +4734,8 @@
<span class="n">AUTO</span> <span class="o">=</span> <span class="mi">3</span>
<span class="n">ONESHOT</span> <span class="o">=</span> <span class="mi">4</span>
<span class="n">TWOSHOT</span> <span class="o">=</span> <span class="mi">5</span>
<span class="n">LOWPRECISION</span> <span class="o">=</span> <span class="mi">6</span></div>
<span class="n">LOWPRECISION</span> <span class="o">=</span> <span class="mi">6</span>
<span class="n">MNNVL</span> <span class="o">=</span> <span class="mi">7</span></div>
@ -4747,7 +4750,7 @@
<span class="n">RESIDUAL_RMS_NORM_QUANT_NVFP4</span> <span class="o">=</span> <span class="mi">5</span>
<span class="n">RESIDUAL_RMS_NORM_OUT_QUANT_FP8</span> <span class="o">=</span> <span class="mi">6</span>
<span class="n">RESIDUAL_RMS_NORM_OUT_QUANT_NVFP4</span> <span class="o">=</span> <span class="mi">7</span>
<span class="n">MOE_ALLREDUCE_RESIDUAL_RMS_NORM</span> <span class="o">=</span> <span class="mi">8</span></div>
<span class="n">MOE_FINALIZE_ALLREDUCE_RESIDUAL_RMS_NORM</span> <span class="o">=</span> <span class="mi">8</span></div>
@ -4764,7 +4767,8 @@
<span class="n">scale</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">norm_pre_residual_weight</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">eps</span><span class="p">:</span> <span class="nb">float</span> <span class="o">=</span> <span class="mf">1e-06</span><span class="p">,</span>
<span class="n">enable_allreduce</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">):</span>
<span class="n">enable_allreduce</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">,</span>
<span class="n">trigger_completion_at_end</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">strategy</span> <span class="o">=</span> <span class="n">strategy</span>
<span class="bp">self</span><span class="o">.</span><span class="n">fusion_op</span> <span class="o">=</span> <span class="n">fusion_op</span>
<span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">bias</span>
@ -4775,6 +4779,7 @@
<span class="bp">self</span><span class="o">.</span><span class="n">eps</span> <span class="o">=</span> <span class="n">eps</span>
<span class="c1"># For torch path only, has no effect on TRT path</span>
<span class="bp">self</span><span class="o">.</span><span class="n">enable_allreduce</span> <span class="o">=</span> <span class="n">enable_allreduce</span>
<span class="bp">self</span><span class="o">.</span><span class="n">trigger_completion_at_end</span> <span class="o">=</span> <span class="n">trigger_completion_at_end</span>
<span class="k">assert</span> <span class="n">fusion_op</span> <span class="o">==</span> <span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">NONE</span><span class="o">.</span><span class="n">value</span> <span class="ow">or</span> <span class="p">(</span><span class="n">residual</span>
<span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">)</span>
@ -4806,6 +4811,51 @@
<div class="viewcode-block" id="MoEAllReduceParams">
<a class="viewcode-back" href="../../python-api/tensorrt_llm.functional.html#tensorrt_llm.functional.MoEAllReduceParams">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">MoEAllReduceParams</span><span class="p">(</span><span class="n">AllReduceParams</span><span class="p">):</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
<span class="n">device_num_experts</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">expert_scale_factor</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">expanded_idx_to_permuted_idx</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">shared_expert_output</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">bias</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">residual</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">norm_weight</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">scale</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">norm_pre_residual_weight</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">eps</span><span class="p">:</span> <span class="nb">float</span> <span class="o">=</span> <span class="mf">1e-06</span><span class="p">,</span>
<span class="n">enable_allreduce</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">,</span>
<span class="n">is_cutlass_min_latency</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span>
<span class="n">bias</span><span class="o">=</span><span class="n">bias</span><span class="p">,</span>
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">,</span>
<span class="n">norm_weight</span><span class="o">=</span><span class="n">norm_weight</span><span class="p">,</span>
<span class="n">scale</span><span class="o">=</span><span class="n">scale</span><span class="p">,</span>
<span class="n">norm_pre_residual_weight</span><span class="o">=</span><span class="n">norm_pre_residual_weight</span><span class="p">,</span>
<span class="n">eps</span><span class="o">=</span><span class="n">eps</span><span class="p">,</span>
<span class="n">enable_allreduce</span><span class="o">=</span><span class="n">enable_allreduce</span><span class="p">,</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">device_num_experts</span> <span class="o">=</span> <span class="n">device_num_experts</span>
<span class="bp">self</span><span class="o">.</span><span class="n">expert_scale_factor</span> <span class="o">=</span> <span class="n">expert_scale_factor</span>
<span class="bp">self</span><span class="o">.</span><span class="n">expanded_idx_to_permuted_idx</span> <span class="o">=</span> <span class="n">expanded_idx_to_permuted_idx</span>
<span class="bp">self</span><span class="o">.</span><span class="n">shared_expert_output</span> <span class="o">=</span> <span class="n">shared_expert_output</span>
<span class="bp">self</span><span class="o">.</span><span class="n">is_cutlass_min_latency</span> <span class="o">=</span> <span class="n">is_cutlass_min_latency</span>
<div class="viewcode-block" id="MoEAllReduceParams.is_valid">
<a class="viewcode-back" href="../../python-api/tensorrt_llm.functional.html#tensorrt_llm.functional.MoEAllReduceParams.is_valid">[docs]</a>
<span class="k">def</span><span class="w"> </span><span class="nf">is_valid</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_cutlass_min_latency</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">device_num_experts</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">expert_scale_factor</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">shared_expert_output</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">expanded_idx_to_permuted_idx</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">)</span></div>
</div>
<div class="viewcode-block" id="create_allreduce_plugin">
<a class="viewcode-back" href="../../python-api/tensorrt_llm.functional.html#tensorrt_llm.functional.create_allreduce_plugin">[docs]</a>
<span class="k">def</span><span class="w"> </span><span class="nf">create_allreduce_plugin</span><span class="p">(</span>
@ -5711,6 +5761,7 @@
<span class="k">return</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">rand</span><span class="p">(</span><span class="n">dim</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span></div>
<span class="c1"># Note: When not using deepseek_yarn, make sure to set mscale_all_dim to 0.0.</span>
<div class="viewcode-block" id="RopeEmbeddingUtils.create_sinusoidal_positions_yarn">
<a class="viewcode-back" href="../../python-api/tensorrt_llm.functional.html#tensorrt_llm.functional.RopeEmbeddingUtils.create_sinusoidal_positions_yarn">[docs]</a>
<span class="nd">@staticmethod</span>
@ -5725,24 +5776,19 @@
<span class="n">mscale</span><span class="p">:</span> <span class="nb">float</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">mscale_all_dim</span><span class="p">:</span> <span class="nb">float</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span>
<span class="n">duplicate_data</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">,</span>
<span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">):</span>
<span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">):</span>
<span class="c1"># Copy from https://huggingface.co/deepseek-ai/DeepSeek-V2/blob/main/modeling_deepseek.py</span>
<span class="c1"># Inverse dim formula to find dim based on number of rotations</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_find_correction_dim</span><span class="p">(</span><span class="n">num_rotations</span><span class="p">,</span>
<span class="n">dim</span><span class="p">,</span>
<span class="n">base</span><span class="o">=</span><span class="mi">10000</span><span class="p">,</span>
<span class="n">max_position_embeddings</span><span class="o">=</span><span class="mi">2048</span><span class="p">):</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_find_correction_dim</span><span class="p">(</span><span class="n">num_rotations</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">base</span><span class="p">,</span>
<span class="n">max_position_embeddings</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">dim</span> <span class="o">*</span> <span class="n">math</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">max_position_embeddings</span> <span class="o">/</span>
<span class="p">(</span><span class="n">num_rotations</span> <span class="o">*</span> <span class="mi">2</span> <span class="o">*</span> <span class="n">math</span><span class="o">.</span><span class="n">pi</span><span class="p">)))</span> <span class="o">/</span> <span class="p">(</span>
<span class="mi">2</span> <span class="o">*</span> <span class="n">math</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">base</span><span class="p">))</span>
<span class="c1"># Find dim range bounds based on rotations</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_find_correction_range</span><span class="p">(</span><span class="n">low_rot</span><span class="p">,</span>
<span class="n">high_rot</span><span class="p">,</span>
<span class="n">dim</span><span class="p">,</span>
<span class="n">base</span><span class="o">=</span><span class="mi">10000</span><span class="p">,</span>
<span class="n">max_position_embeddings</span><span class="o">=</span><span class="mi">2048</span><span class="p">):</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_find_correction_range</span><span class="p">(</span><span class="n">low_rot</span><span class="p">,</span> <span class="n">high_rot</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">base</span><span class="p">,</span>
<span class="n">max_position_embeddings</span><span class="p">):</span>
<span class="n">low</span> <span class="o">=</span> <span class="n">math</span><span class="o">.</span><span class="n">floor</span><span class="p">(</span>
<span class="n">yarn_find_correction_dim</span><span class="p">(</span><span class="n">low_rot</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">base</span><span class="p">,</span>
<span class="n">max_position_embeddings</span><span class="p">))</span>
@ -5755,7 +5801,7 @@
<span class="n">high</span> <span class="o">=</span> <span class="n">dim</span> <span class="o">-</span> <span class="mi">1</span>
<span class="k">return</span> <span class="n">low</span><span class="p">,</span> <span class="n">high</span> <span class="c1"># Clamp values just in case</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_get_mscale</span><span class="p">(</span><span class="n">scale</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">mscale</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
<span class="k">def</span><span class="w"> </span><span class="nf">yarn_get_mscale</span><span class="p">(</span><span class="n">scale</span><span class="p">,</span> <span class="n">mscale</span><span class="p">):</span>
<span class="k">if</span> <span class="n">scale</span> <span class="o">&lt;=</span> <span class="mi">1</span><span class="p">:</span>
<span class="k">return</span> <span class="mf">1.0</span>
<span class="k">return</span> <span class="mf">0.1</span> <span class="o">*</span> <span class="n">mscale</span> <span class="o">*</span> <span class="n">math</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">scale</span><span class="p">)</span> <span class="o">+</span> <span class="mf">1.0</span>
@ -5764,13 +5810,13 @@
<span class="k">if</span> <span class="nb">min</span> <span class="o">==</span> <span class="nb">max</span><span class="p">:</span>
<span class="nb">max</span> <span class="o">+=</span> <span class="mf">0.001</span> <span class="c1"># Prevent singularity</span>
<span class="n">linear_func</span> <span class="o">=</span> <span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">dim</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span> <span class="o">-</span> <span class="nb">min</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="nb">max</span> <span class="o">-</span> <span class="nb">min</span><span class="p">)</span>
<span class="n">ramp_func</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">clip</span><span class="p">(</span><span class="n">linear_func</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="n">linear_func</span> <span class="o">=</span> <span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">dim</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span> <span class="o">-</span> <span class="nb">min</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="nb">max</span> <span class="o">-</span> <span class="nb">min</span><span class="p">)</span>
<span class="n">ramp_func</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">clamp</span><span class="p">(</span><span class="n">linear_func</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="k">return</span> <span class="n">ramp_func</span>
<span class="n">freq_extra</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">/</span> <span class="p">(</span><span class="n">base</span><span class="o">**</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span> <span class="o">/</span> <span class="n">dim</span><span class="p">))</span>
<span class="n">freq_inter</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling_factor</span> <span class="o">*</span>
<span class="n">base</span><span class="o">**</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span> <span class="o">/</span> <span class="n">dim</span><span class="p">))</span>
<span class="n">pos_freqs</span> <span class="o">=</span> <span class="n">base</span><span class="o">**</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span> <span class="o">/</span> <span class="n">dim</span><span class="p">)</span>
<span class="n">freq_extra</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">/</span> <span class="n">pos_freqs</span>
<span class="n">freq_inter</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">/</span> <span class="p">(</span><span class="n">scaling_factor</span> <span class="o">*</span> <span class="n">pos_freqs</span><span class="p">)</span>
<span class="n">low</span><span class="p">,</span> <span class="n">high</span> <span class="o">=</span> <span class="n">yarn_find_correction_range</span><span class="p">(</span>
<span class="n">beta_fast</span><span class="p">,</span>
@ -5779,28 +5825,23 @@
<span class="n">base</span><span class="p">,</span>
<span class="n">original_max_position_embeddings</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">inv_freq_mask</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">-</span> <span class="n">yarn_linear_ramp_mask</span><span class="p">(</span><span class="n">low</span><span class="p">,</span> <span class="n">high</span><span class="p">,</span>
<span class="n">dim</span> <span class="o">//</span> <span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">inv_freq_mask</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">yarn_linear_ramp_mask</span><span class="p">(</span><span class="n">low</span><span class="p">,</span> <span class="n">high</span><span class="p">,</span> <span class="n">dim</span> <span class="o">//</span> <span class="mi">2</span><span class="p">))</span>
<span class="n">inv_freq</span> <span class="o">=</span> <span class="n">freq_inter</span> <span class="o">*</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">inv_freq_mask</span><span class="p">)</span> <span class="o">+</span> <span class="n">freq_extra</span> <span class="o">*</span> <span class="n">inv_freq_mask</span>
<span class="n">sinusoid_inp</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">einsum</span><span class="p">(</span><span class="s2">&quot;i , j -&gt; i j&quot;</span><span class="p">,</span>
<span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">num_pos</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">),</span>
<span class="n">inv_freq</span><span class="p">,</span>
<span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">),</span>
<span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">num_pos</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">sinusoid_inp</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">einsum</span><span class="p">(</span><span class="s2">&quot;i,j -&gt; ij&quot;</span><span class="p">,</span> <span class="n">t</span><span class="p">,</span> <span class="n">inv_freq</span><span class="p">)</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
<span class="n">_mscale</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span>
<span class="n">yarn_get_mscale</span><span class="p">(</span><span class="n">scaling_factor</span><span class="p">,</span> <span class="n">mscale</span><span class="p">)</span> <span class="o">/</span>
<span class="n">yarn_get_mscale</span><span class="p">(</span><span class="n">scaling_factor</span><span class="p">,</span> <span class="n">mscale_all_dim</span><span class="p">))</span>
<span class="k">if</span> <span class="n">duplicate_data</span><span class="p">:</span>
<span class="n">emb</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">concatenate</span><span class="p">((</span><span class="n">sinusoid_inp</span><span class="p">,</span> <span class="n">sinusoid_inp</span><span class="p">),</span> <span class="n">axis</span><span class="o">=-</span><span class="mi">2</span><span class="p">)</span>
<span class="n">emb</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">((</span><span class="n">sinusoid_inp</span><span class="p">,</span> <span class="n">sinusoid_inp</span><span class="p">),</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">2</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">emb</span> <span class="o">=</span> <span class="n">sinusoid_inp</span>
<span class="n">concat</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">concatenate</span><span class="p">((</span><span class="n">np</span><span class="o">.</span><span class="n">cos</span><span class="p">(</span><span class="n">emb</span><span class="p">)</span> <span class="o">*</span> <span class="n">_mscale</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">sin</span><span class="p">(</span><span class="n">emb</span><span class="p">)</span> <span class="o">*</span> <span class="n">_mscale</span><span class="p">),</span>
<span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">return</span> <span class="n">inv_freq</span><span class="p">,</span> <span class="n">concat</span><span class="o">.</span><span class="n">reshape</span><span class="p">((</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span></div>
<span class="n">concat</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">((</span><span class="n">torch</span><span class="o">.</span><span class="n">cos</span><span class="p">(</span><span class="n">emb</span><span class="p">)</span> <span class="o">*</span> <span class="n">_mscale</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">sin</span><span class="p">(</span><span class="n">emb</span><span class="p">)</span> <span class="o">*</span> <span class="n">_mscale</span><span class="p">),</span>
<span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">return</span> <span class="n">inv_freq</span><span class="o">.</span><span class="n">numpy</span><span class="p">(),</span> <span class="n">concat</span><span class="o">.</span><span class="n">reshape</span><span class="p">((</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">))</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span></div>
<div class="viewcode-block" id="RopeEmbeddingUtils.rotate_every_two">
@ -8681,9 +8722,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -646,9 +647,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -3511,9 +3512,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -653,9 +654,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -902,9 +903,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1369,9 +1370,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1217,9 +1218,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1243,9 +1244,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1007,9 +1008,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -662,9 +663,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -946,9 +947,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -536,7 +537,8 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">..executor.postproc_worker</span><span class="w"> </span><span class="kn">import</span> <span class="n">PostprocParams</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..executor.utils</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">create_mpi_comm_session</span><span class="p">,</span>
<span class="n">get_spawn_proxy_process_env</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..inputs</span><span class="w"> </span><span class="kn">import</span> <span class="n">PromptInputs</span><span class="p">,</span> <span class="n">create_input_processor</span><span class="p">,</span> <span class="n">prompt_inputs</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..inputs</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">PromptInputs</span><span class="p">,</span> <span class="n">create_input_processor</span><span class="p">,</span>
<span class="n">create_input_processor_with_hash</span><span class="p">,</span> <span class="n">prompt_inputs</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..logger</span><span class="w"> </span><span class="kn">import</span> <span class="n">logger</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..sampling_params</span><span class="w"> </span><span class="kn">import</span> <span class="n">SamplingParams</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.llm_args</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">LLMARGS_EXPLICIT_DOCSTRING</span><span class="p">,</span> <span class="n">PybindMirror</span><span class="p">,</span> <span class="n">TorchLlmArgs</span><span class="p">,</span>
@ -643,6 +645,16 @@
<span class="k">else</span><span class="p">:</span>
<span class="n">llm_args_cls</span> <span class="o">=</span> <span class="n">TrtLlmArgs</span>
<span class="c1"># check the kwargs and raise ValueError directly</span>
<span class="n">valid_keys</span> <span class="o">=</span> <span class="nb">set</span><span class="p">(</span>
<span class="nb">list</span><span class="p">(</span><span class="n">llm_args_cls</span><span class="o">.</span><span class="n">model_fields</span><span class="o">.</span><span class="n">keys</span><span class="p">())</span> <span class="o">+</span>
<span class="p">[</span><span class="s1">&#39;_mpi_session&#39;</span><span class="p">,</span> <span class="s1">&#39;backend&#39;</span><span class="p">])</span>
<span class="k">for</span> <span class="n">key</span> <span class="ow">in</span> <span class="n">kwargs</span><span class="p">:</span>
<span class="k">if</span> <span class="n">key</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">valid_keys</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2"> got invalid argument: </span><span class="si">{</span><span class="n">key</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span> <span class="o">=</span> <span class="n">llm_args_cls</span><span class="o">.</span><span class="n">from_kwargs</span><span class="p">(</span>
<span class="n">model</span><span class="o">=</span><span class="n">model</span><span class="p">,</span>
<span class="n">tokenizer</span><span class="o">=</span><span class="n">tokenizer</span><span class="p">,</span>
@ -702,10 +714,10 @@
<span class="bp">self</span><span class="o">.</span><span class="n">_build_model</span><span class="p">()</span>
<span class="k">except</span> <span class="ne">Exception</span> <span class="k">as</span> <span class="n">e</span><span class="p">:</span>
<span class="k">except</span> <span class="ne">Exception</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span><span class="o">.</span><span class="n">shutdown</span><span class="p">()</span>
<span class="k">raise</span> <span class="n">e</span>
<span class="k">raise</span>
<span class="n">exception_handler</span><span class="o">.</span><span class="n">register</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s1">&#39;shutdown&#39;</span><span class="p">)</span>
<span class="n">atexit</span><span class="o">.</span><span class="n">register</span><span class="p">(</span><span class="n">LLM</span><span class="o">.</span><span class="n">_shutdown_wrapper</span><span class="p">,</span> <span class="n">weakref</span><span class="o">.</span><span class="n">ref</span><span class="p">(</span><span class="bp">self</span><span class="p">))</span></div>
@ -862,22 +874,38 @@
<span class="n">sampling_params</span><span class="o">.</span><span class="n">add_special_tokens</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">query_token_ids</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">multimodal_input</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">multimodal_embedding</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">mrope_config</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="s2">&quot;prompt_token_ids&quot;</span> <span class="ow">in</span> <span class="n">inputs</span><span class="p">:</span>
<span class="c1"># TODO: if specify prompt_token_ids, the mm hashing is not supported yet</span>
<span class="n">prompt_token_ids</span> <span class="o">=</span> <span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;prompt_token_ids&#39;</span><span class="p">]</span>
<span class="n">prompt</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">query_token_ids</span> <span class="o">=</span> <span class="n">inputs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;query_token_ids&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="k">elif</span> <span class="s2">&quot;prompt&quot;</span> <span class="ow">in</span> <span class="n">inputs</span><span class="p">:</span>
<span class="k">with</span> <span class="n">nvtx_range_debug</span><span class="p">(</span><span class="s2">&quot;input_processor&quot;</span><span class="p">):</span>
<span class="n">prompt_token_ids</span><span class="p">,</span> <span class="n">extra_processed_inputs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="p">(</span>
<span class="n">inputs</span><span class="p">,</span> <span class="n">sampling_params</span><span class="p">)</span>
<span class="k">if</span> <span class="s1">&#39;multi_modal_data&#39;</span> <span class="ow">in</span> <span class="n">inputs</span><span class="p">:</span>
<span class="c1"># TODO: The current design uses a wrapper for existing input processor (input_processor_with_hash)</span>
<span class="c1"># to handle/add multimodal hashes, positions, and lengths. Now we only support image modality.</span>
<span class="c1"># In the future, we should refactor this to:</span>
<span class="c1"># 1. Extend support for more modalities and models</span>
<span class="c1"># 2. Decouple input processor into distinct phases (preprocessor (all preprocessing logics), vision model (fuse in model fwd), etc.</span>
<span class="n">input_processor_with_hash</span> <span class="o">=</span> <span class="n">create_input_processor_with_hash</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="p">)</span>
<span class="k">with</span> <span class="n">nvtx_range_debug</span><span class="p">(</span><span class="s2">&quot;input_processor_with_hash&quot;</span><span class="p">):</span>
<span class="n">prompt_token_ids</span><span class="p">,</span> <span class="n">extra_processed_inputs</span> <span class="o">=</span> <span class="n">input_processor_with_hash</span><span class="p">(</span>
<span class="n">inputs</span><span class="p">,</span> <span class="n">sampling_params</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">with</span> <span class="n">nvtx_range_debug</span><span class="p">(</span><span class="s2">&quot;input_processor&quot;</span><span class="p">):</span>
<span class="n">prompt_token_ids</span><span class="p">,</span> <span class="n">extra_processed_inputs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="p">(</span>
<span class="n">inputs</span><span class="p">,</span> <span class="n">sampling_params</span><span class="p">)</span>
<span class="n">prompt</span> <span class="o">=</span> <span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;prompt&#39;</span><span class="p">]</span>
<span class="k">if</span> <span class="n">extra_processed_inputs</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">query_token_ids</span> <span class="o">=</span> <span class="n">extra_processed_inputs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;query_token_ids&#39;</span><span class="p">)</span>
<span class="n">multimodal_embedding</span> <span class="o">=</span> <span class="n">extra_processed_inputs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span>
<span class="s1">&#39;mm_embedding&#39;</span><span class="p">)</span>
<span class="n">mrope_config</span> <span class="o">=</span> <span class="n">extra_processed_inputs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;mrope_config&#39;</span><span class="p">)</span>
<span class="n">multimodal_input</span> <span class="o">=</span> <span class="n">extra_processed_inputs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span>
<span class="s1">&#39;multimodal_input&#39;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;The inputs must be type str or list of int, but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span><span class="si">}</span><span class="s2">&quot;</span>
@ -897,6 +925,7 @@
<span class="n">lora_request</span><span class="o">=</span><span class="n">lora_request</span><span class="p">,</span>
<span class="n">prompt_adapter_request</span><span class="o">=</span><span class="n">prompt_adapter_request</span><span class="p">,</span>
<span class="n">streaming</span><span class="o">=</span><span class="n">streaming</span><span class="p">,</span>
<span class="n">multimodal_input</span><span class="o">=</span><span class="n">multimodal_input</span><span class="p">,</span>
<span class="n">multimodal_embedding</span><span class="o">=</span><span class="n">multimodal_embedding</span><span class="p">,</span>
<span class="n">mrope_config</span><span class="o">=</span><span class="n">mrope_config</span><span class="p">,</span>
<span class="n">kv_cache_retention_config</span><span class="o">=</span><span class="n">kv_cache_retention_config</span><span class="p">,</span>
@ -1124,7 +1153,7 @@
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="n">max_num_tokens</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="n">executor_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">ExecutorConfig</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">ExecutorConfig</span><span class="p">(</span>
<span class="n">max_beam_width</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">,</span>
<span class="n">scheduler_config</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">scheduler_config</span><span class="p">),</span>
@ -1136,20 +1165,20 @@
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="c1"># also set executor_config.max_seq_len in TRT workflow, to deduce default max_tokens</span>
<span class="k">if</span> <span class="n">max_seq_len</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
<span class="s2">&quot;config.json&quot;</span><span class="p">)</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="p">)</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s2">&quot;FORCE_DETERMINISTIC&quot;</span><span class="p">,</span> <span class="s2">&quot;0&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;1&quot;</span><span class="p">:</span>
<span class="c1"># Disable KV cache reuse for deterministic mode</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_block_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_partial_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_block_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_partial_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span><span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">lora_plugin</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
@ -1158,16 +1187,16 @@
<span class="n">max_lora_rank</span> <span class="o">=</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">max_lora_rank</span>
<span class="n">num_lora_modules</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">pretrained_config</span><span class="o">.</span><span class="n">num_hidden_layers</span> <span class="o">*</span> \
<span class="nb">len</span><span class="p">(</span><span class="n">lora_config</span><span class="o">.</span><span class="n">lora_target_modules</span> <span class="o">+</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">missing_qkv_modules</span><span class="p">)</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">PeftCacheConfig</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">PeftCacheConfig</span><span class="p">(</span>
<span class="n">num_device_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_loras</span><span class="p">,</span>
<span class="n">num_host_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_cpu_loras</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;xgrammar&#39;</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">XGRAMMAR</span><span class="p">,</span>
<span class="o">**</span><span class="n">_xgrammar_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
@ -1176,18 +1205,18 @@
<span class="sa">f</span><span class="s2">&quot;Unrecognized guided decoding backend </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">normalize_log_probs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">normalize_log_probs</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">enable_chunked_context</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">enable_chunked_prefill</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">normalize_log_probs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">normalize_log_probs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">enable_chunked_context</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">enable_chunked_prefill</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm._torch.pyexecutor.config</span><span class="w"> </span><span class="kn">import</span> <span class="n">update_executor_config</span>
<span class="n">update_executor_config</span><span class="p">(</span>
<span class="n">executor_config</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">backend</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span><span class="p">,</span>
<span class="n">pytorch_backend_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">get_pytorch_backend_config</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;pytorch&quot;</span><span class="p">,</span> <span class="s2">&quot;_autodeploy&quot;</span><span class="p">]</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
@ -1199,14 +1228,14 @@
<span class="n">trt_engine_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">max_input_len</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_input_len</span><span class="p">,</span>
<span class="n">max_seq_len</span><span class="o">=</span><span class="n">max_seq_len</span><span class="p">)</span>
<span class="n">executor_config</span><span class="o">.</span><span class="n">llm_parallel_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span>
<span class="n">return_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span> <span class="ow">or</span> <span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">gather_context_logits</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">llm_parallel_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span>
<span class="n">return_logits</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span>
<span class="ow">or</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">gather_context_logits</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_executor_cls</span><span class="o">.</span><span class="n">create</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">executor_config</span><span class="o">=</span><span class="n">executor_config</span><span class="p">,</span>
<span class="n">executor_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">batched_logits_processor</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batched_logits_processor</span><span class="p">,</span>
<span class="n">model_world_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">,</span>
<span class="n">mpi_session</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span><span class="p">,</span>
@ -1446,9 +1475,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -507,7 +508,8 @@
<article class="bd-article">
<h1>Source code for tensorrt_llm.llmapi.llm_args</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span><span class="w"> </span><span class="nn">json</span>
<span></span><span class="kn">import</span><span class="w"> </span><span class="nn">copy</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">json</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">math</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">os</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">abc</span><span class="w"> </span><span class="kn">import</span> <span class="n">ABC</span><span class="p">,</span> <span class="n">abstractmethod</span>
@ -1333,7 +1335,7 @@
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">model_config</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;arbitrary_types_allowed&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">,</span>
<span class="s2">&quot;extra&quot;</span><span class="p">:</span> <span class="s2">&quot;allow&quot;</span><span class="p">,</span>
<span class="s2">&quot;extra&quot;</span><span class="p">:</span> <span class="s2">&quot;forbid&quot;</span><span class="p">,</span>
<span class="p">}</span>
<span class="c1"># Explicit arguments</span>
@ -1381,7 +1383,9 @@
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The context parallel size.&quot;</span><span class="p">)</span>
<span class="n">gpus_per_node</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;The number of GPUs per node.&quot;</span><span class="p">)</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The number of GPUs per node.&quot;</span><span class="p">,</span>
<span class="n">validate_default</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">moe_cluster_parallel_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
@ -1436,7 +1440,7 @@
<span class="c1"># Quantization and calibration configurations</span>
<span class="n">quant_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">QuantConfig</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Quantization config.&quot;</span><span class="p">)</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Quantization config.&quot;</span><span class="p">,</span> <span class="n">validate_default</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="c1"># Several options from ExecutorConfig, expanded here for less hierarchy</span>
<span class="n">kv_cache_config</span><span class="p">:</span> <span class="n">KvCacheConfig</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default_factory</span><span class="o">=</span><span class="n">KvCacheConfig</span><span class="p">,</span>
@ -1489,40 +1493,35 @@
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum batch size.&quot;</span><span class="p">)</span>
<span class="c1"># generation constraints</span>
<span class="n">max_input_len</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="mi">1024</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum input length.&quot;</span><span class="p">)</span>
<span class="n">max_input_len</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum input length.&quot;</span><span class="p">)</span>
<span class="n">max_seq_len</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum sequence length.&quot;</span><span class="p">)</span>
<span class="n">max_beam_width</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum beam width.&quot;</span><span class="p">)</span>
<span class="n">max_beam_width</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum beam width.&quot;</span><span class="p">)</span>
<span class="n">max_num_tokens</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;The maximum number of tokens.&quot;</span><span class="p">)</span>
<span class="n">backend</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The backend to use.&quot;</span><span class="p">,</span>
<span class="n">exclude</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">gather_generation_logits</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Gather generation logits.&quot;</span><span class="p">)</span>
<span class="c1"># private fields those are unstable and just for internal use</span>
<span class="n">num_postprocess_workers</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The number of postprocess worker processes.&quot;</span><span class="p">,</span>
<span class="n">alias</span><span class="o">=</span><span class="s2">&quot;_num_postprocess_workers&quot;</span><span class="p">)</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;The number of processes used for postprocessing the generated tokens, including detokenization.&quot;</span>
<span class="p">)</span>
<span class="n">postprocess_tokenizer_dir</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The postprocess tokenizer directory.&quot;</span><span class="p">,</span>
<span class="n">alias</span><span class="o">=</span><span class="s2">&quot;_postprocess_tokenizer_dir&quot;</span><span class="p">)</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The path to the tokenizer directory for postprocessing.&quot;</span><span class="p">)</span>
<span class="n">reasoning_parser</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The parser to separate reasoning content from output.&quot;</span><span class="p">,</span>
<span class="n">alias</span><span class="o">=</span><span class="s2">&quot;_reasoning_parser&quot;</span><span class="p">)</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The parser to separate reasoning content from output.&quot;</span><span class="p">)</span>
<span class="c1"># TODO[Superjomn]: To deprecate this config.</span>
<span class="n">decoding_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
@ -1536,51 +1535,37 @@
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The optional MPI session to use for this LLM instance.&quot;</span><span class="p">,</span>
<span class="n">json_schema_extra</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;Optional[MpiSession]&quot;</span><span class="p">},</span>
<span class="n">exclude</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="c1"># exclude from serialization</span>
<span class="n">exclude</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">alias</span><span class="o">=</span><span class="s2">&quot;_mpi_session&quot;</span><span class="p">)</span>
<span class="nd">@print_traceback_on_error</span>
<span class="k">def</span><span class="w"> </span><span class="nf">model_post_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">__context</span><span class="p">:</span> <span class="n">Any</span><span class="p">):</span>
<span class="n">backend</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The backend to use for this LLM instance.&quot;</span><span class="p">,</span>
<span class="n">exclude_json_schema</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="c1"># hide from API references</span>
<span class="n">validate_default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">skip_tokenizer_init</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="n">tokenizer_factory</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">,</span>
<span class="n">trust_remote_code</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">trust_remote_code</span><span class="p">,</span>
<span class="n">use_fast</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer_mode</span> <span class="o">!=</span> <span class="s1">&#39;slow&#39;</span><span class="p">)</span>
<span class="n">_parallel_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">PrivateAttr</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
<span class="n">_model_format</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">_ModelFormatKind</span><span class="p">]</span> <span class="o">=</span> <span class="n">PrivateAttr</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
<span class="n">_speculative_model</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">PrivateAttr</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
<span class="n">_speculative_model_format</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">_ModelFormatKind</span><span class="p">]</span> <span class="o">=</span> <span class="n">PrivateAttr</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">get_device_properties</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">major</span> <span class="o">&lt;</span> <span class="mi">8</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="s1">&#39;auto&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">dtype</span> <span class="o">=</span> <span class="s1">&#39;float16&#39;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="s1">&#39;bfloat16&#39;</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Pre SM 80 GPUs do not support bfloat16&quot;</span><span class="p">)</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">parallel_config</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">_ParallelConfig</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_parallel_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">gpus_per_node</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Using default gpus_per_node: </span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">()</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">gpus_per_node</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">()</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">gpus_per_node</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">model_format</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">_ModelFormatKind</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_model_format</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">speculative_model</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Optional</span><span class="p">[</span><span class="n">_ModelFormatKind</span><span class="p">]:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="n">tp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">tensor_parallel_size</span><span class="p">,</span>
<span class="n">pp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">pipeline_parallel_size</span><span class="p">,</span>
<span class="n">cp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">context_parallel_size</span><span class="p">,</span>
<span class="n">gpus_per_node</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">gpus_per_node</span><span class="p">,</span>
<span class="n">moe_cluster_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span><span class="p">,</span>
<span class="n">moe_tp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span><span class="p">,</span>
<span class="n">moe_ep_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span><span class="p">,</span>
<span class="n">enable_attention_dp</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_attention_dp</span><span class="p">,</span>
<span class="n">cp_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">cp_config</span><span class="p">)</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">speculative_model_format</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">_ModelFormatKind</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model_format</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">from_kwargs</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">:</span> <span class="n">Any</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;BaseLlmArgs&quot;</span><span class="p">:</span>
@ -1592,9 +1577,9 @@
<span class="sd"> Returns:</span>
<span class="sd"> tensorrt_llm.llmapi.llm_utils.BaseLlmArgs: The `BaseLlmArgs` instance.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="n">BaseLlmArgs</span><span class="o">.</span><span class="n">_maybe_update_config_for_consistency</span><span class="p">(</span><span class="nb">dict</span><span class="p">(</span><span class="n">kwargs</span><span class="p">))</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="n">BaseLlmArgs</span><span class="o">.</span><span class="n">_check_consistency</span><span class="p">(</span><span class="nb">dict</span><span class="p">(</span><span class="n">kwargs</span><span class="p">))</span>
<span class="n">ret</span> <span class="o">=</span> <span class="bp">cls</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">ret</span><span class="o">.</span><span class="n">_setup</span><span class="p">()</span>
<span class="k">return</span> <span class="n">ret</span>
<span class="k">def</span><span class="w"> </span><span class="nf">to_dict</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">dict</span><span class="p">:</span>
@ -1603,11 +1588,13 @@
<span class="sd"> Returns:</span>
<span class="sd"> dict: The dict that contains all fields of the `LlmArgs` instance.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">model_dump</span><span class="p">()</span>
<span class="n">model_dict</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">model_dump</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s1">&#39;json&#39;</span><span class="p">)</span>
<span class="c1"># TODO: the BuildConfig.to_dict and from_dict don&#39;t work well with pydantic</span>
<span class="n">model_dict</span><span class="p">[</span><span class="s1">&#39;build_config&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">)</span>
<span class="k">return</span> <span class="n">model_dict</span>
<span class="nd">@staticmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_maybe_update_config_for_consistency</span><span class="p">(</span>
<span class="n">kwargs_dict</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]:</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_check_consistency</span><span class="p">(</span><span class="n">kwargs_dict</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]:</span>
<span class="c1"># max_beam_width is not included since vague behavior due to lacking the support for dynamic beam width during</span>
<span class="c1"># generation</span>
<span class="n">black_list</span> <span class="o">=</span> <span class="nb">set</span><span class="p">([</span><span class="s2">&quot;max_beam_width&quot;</span><span class="p">])</span>
@ -1622,57 +1609,99 @@
<span class="n">llm_args_attr</span>
<span class="p">),</span> <span class="sa">f</span><span class="s2">&quot;New options found in underlying ExecutorConfig: </span><span class="si">{</span><span class="n">llm_args_attr</span><span class="w"> </span><span class="o">-</span><span class="w"> </span><span class="n">executor_config_attrs</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="c1"># ensure build_config and LlmArgsBase consistency</span>
<span class="k">if</span> <span class="n">kwargs_dict</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;backend&quot;</span><span class="p">)</span> <span class="o">!=</span> <span class="s2">&quot;pytorch&quot;</span> <span class="ow">and</span> <span class="n">kwargs_dict</span><span class="o">.</span><span class="n">get</span><span class="p">(</span>
<span class="s2">&quot;build_config&quot;</span><span class="p">):</span>
<span class="c1"># TODO: move this to _perform_config_arbitration() once it&#39;s default-on.</span>
<span class="k">for</span> <span class="n">field_name</span> <span class="ow">in</span> <span class="p">[</span>
<span class="s2">&quot;max_input_len&quot;</span><span class="p">,</span> <span class="s2">&quot;max_seq_len&quot;</span><span class="p">,</span> <span class="s2">&quot;max_beam_width&quot;</span>
<span class="p">]:</span>
<span class="n">build_val</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">kwargs_dict</span><span class="p">[</span><span class="s2">&quot;build_config&quot;</span><span class="p">],</span> <span class="n">field_name</span><span class="p">,</span>
<span class="kc">None</span><span class="p">)</span>
<span class="n">llmargs_val</span> <span class="o">=</span> <span class="n">kwargs_dict</span><span class="o">.</span><span class="n">get</span><span class="p">(</span>
<span class="n">field_name</span><span class="p">)</span> <span class="ow">or</span> <span class="n">BaseLlmArgs</span><span class="o">.</span><span class="n">model_fields</span><span class="p">[</span><span class="n">field_name</span><span class="p">]</span>
<span class="k">if</span> <span class="n">build_val</span> <span class="o">!=</span> <span class="n">llmargs_val</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Overriding LlmArgsBase.</span><span class="si">{</span><span class="n">field_name</span><span class="si">}</span><span class="s2"> (</span><span class="si">{</span><span class="n">llmargs_val</span><span class="si">}</span><span class="s2">) with build_config.</span><span class="si">{</span><span class="n">field_name</span><span class="si">}</span><span class="s2"> (</span><span class="si">{</span><span class="n">build_val</span><span class="si">}</span><span class="s2">).&quot;</span>
<span class="p">)</span>
<span class="n">kwargs_dict</span><span class="p">[</span><span class="n">field_name</span><span class="p">]</span> <span class="o">=</span> <span class="n">build_val</span>
<span class="k">return</span> <span class="n">kwargs_dict</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_setup</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&#39;&#39;&#39; This method will setup the configs right before building the model. &#39;&#39;&#39;</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;dtype&quot;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_dtype</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">info</span><span class="p">):</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">get_device_properties</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">major</span> <span class="o">&lt;</span> <span class="mi">8</span><span class="p">:</span>
<span class="k">if</span> <span class="n">v</span> <span class="o">==</span> <span class="s1">&#39;auto&#39;</span><span class="p">:</span>
<span class="n">v</span> <span class="o">=</span> <span class="s1">&#39;float16&#39;</span>
<span class="k">if</span> <span class="n">v</span> <span class="o">==</span> <span class="s1">&#39;bfloat16&#39;</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Pre SM 80 GPUs do not support bfloat16&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">v</span>
<span class="n">is_trt_llm_args</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">TrtLlmArgs</span><span class="p">)</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;quant_config&quot;</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="s1">&#39;before&#39;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_quant_config</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">info</span><span class="p">):</span>
<span class="k">if</span> <span class="n">v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">v</span> <span class="o">=</span> <span class="n">QuantConfig</span><span class="p">()</span>
<span class="k">return</span> <span class="n">v</span>
<span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">model</span><span class="p">,</span>
<span class="p">(</span><span class="nb">str</span><span class="p">,</span> <span class="n">Path</span><span class="p">)),</span> <span class="sa">f</span><span class="s2">&quot;Invalid model: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">model</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;gpus_per_node&quot;</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="s1">&#39;before&#39;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_gpus_per_node</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">info</span><span class="p">):</span>
<span class="k">if</span> <span class="n">v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Using default gpus_per_node: </span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">()</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">v</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">()</span>
<span class="k">return</span> <span class="n">v</span>
<span class="k">if</span> <span class="n">is_trt_llm_args</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_setup_embedding_parallel_mode</span><span class="p">()</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;model&quot;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_model</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">info</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="nb">str</span><span class="p">,</span> <span class="n">Path</span><span class="p">)):</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Invalid model: </span><span class="si">{</span><span class="n">v</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">v</span>
<span class="k">if</span> <span class="n">is_trt_llm_args</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span> <span class="o">=</span> <span class="n">BuildCacheConfig</span><span class="p">()</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">,</span> <span class="nb">bool</span><span class="p">)</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">,</span> <span class="n">BuildCacheConfig</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Invalid build_cache_config: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_parallel_config</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="n">tp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">tensor_parallel_size</span><span class="p">,</span>
<span class="n">pp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">pipeline_parallel_size</span><span class="p">,</span>
<span class="n">cp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">context_parallel_size</span><span class="p">,</span>
<span class="n">gpus_per_node</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">gpus_per_node</span><span class="p">,</span>
<span class="n">moe_cluster_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_cluster_parallel_size</span><span class="p">,</span>
<span class="n">moe_tp_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_tensor_parallel_size</span><span class="p">,</span>
<span class="n">moe_ep_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_expert_parallel_size</span><span class="p">,</span>
<span class="n">enable_attention_dp</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_attention_dp</span><span class="p">,</span>
<span class="n">cp_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">cp_config</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">set_default_max_input_len</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span> <span class="o">=</span> <span class="mi">1024</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_and_init_tokenizer</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Initialize tokenizer based on configuration.&quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">skip_tokenizer_init</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="n">tokenizer_factory</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">,</span>
<span class="n">trust_remote_code</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">trust_remote_code</span><span class="p">,</span>
<span class="n">use_fast</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer_mode</span> <span class="o">!=</span> <span class="s1">&#39;slow&#39;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_model_format_misc</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&#39;&#39;&#39;</span>
<span class="sd"> Load the model format, and do the following:</span>
<span class="sd"> 1. Load the build_config if got an engine.</span>
<span class="sd"> 2. Load the parallel_config if got a checkpoint.</span>
<span class="sd"> &#39;&#39;&#39;</span>
<span class="n">model_obj</span> <span class="o">=</span> <span class="n">_ModelWrapper</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">model</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">speculative_model</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">,</span>
<span class="s2">&quot;speculative_model&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="n">speculative_model_obj</span> <span class="o">=</span> <span class="n">_ModelWrapper</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">speculative_model</span>
<span class="p">)</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">speculative_model</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">None</span>
<span class="k">if</span> <span class="n">model_obj</span><span class="o">.</span><span class="n">is_local_model</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">backend</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">[</span>
<span class="s1">&#39;pytorch&#39;</span><span class="p">,</span> <span class="s1">&#39;_autodeploy&#39;</span>
<span class="p">]:</span>
<span class="c1"># Load parallel_config from the engine.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="o">=</span> <span class="n">get_model_format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">model</span><span class="p">)</span>
<span class="n">model_format</span> <span class="o">=</span> <span class="n">get_model_format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">model</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="ow">is</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">TLLM_ENGINE</span><span class="p">:</span>
<span class="k">if</span> <span class="n">model_format</span> <span class="ow">is</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">TLLM_ENGINE</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="s2">&quot;The build_config is ignored for model format of TLLM_ENGINE.&quot;</span>
@ -1684,65 +1713,128 @@
<span class="n">runtime_defaults</span><span class="p">)</span>
<span class="c1"># Load parallel_config from the checkpoint.</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="ow">is</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">TLLM_CKPT</span><span class="p">:</span>
<span class="k">elif</span> <span class="n">model_format</span> <span class="ow">is</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">TLLM_CKPT</span><span class="p">:</span>
<span class="c1"># We need to create a temporary instance to call _load_config_from_ckpt</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_load_config_from_ckpt</span><span class="p">(</span><span class="n">model_obj</span><span class="o">.</span><span class="n">model_dir</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="o">=</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">HF</span>
<span class="n">model_format</span> <span class="o">=</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">HF</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">speculative_model</span> <span class="ow">and</span> <span class="n">speculative_model_obj</span><span class="o">.</span><span class="n">is_local_model</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">speculative_model_format</span> <span class="o">=</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">HF</span>
<span class="c1"># Store the model format in the values</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_format</span> <span class="o">=</span> <span class="n">model_format</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span> <span class="ow">or</span> <span class="n">QuantConfig</span><span class="p">()</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">init_build_config</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Creating a default BuildConfig if none is provided</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s2">&quot;max_batch_size&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s2">&quot;max_num_tokens&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s2">&quot;max_seq_len&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s2">&quot;max_beam_width&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s2">&quot;max_input_len&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="o">=</span> <span class="n">BuildConfig</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">if</span> <span class="n">is_trt_llm_args</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">calib_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">calib_config</span> <span class="ow">or</span> <span class="n">CalibConfig</span><span class="p">()</span>
<span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">,</span> <span class="n">BuildConfig</span>
<span class="p">),</span> <span class="sa">f</span><span class="s2">&quot;build_config is not initialized: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">set_runtime_knobs_from_build_config</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="c1"># TODO: remove this after PyT become default to adapt PyT with build_config as input</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> <span class="s2">&quot;build_config is not initialized&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">backend</span> <span class="o">==</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">:</span>
<span class="k">for</span> <span class="n">key</span> <span class="ow">in</span> <span class="p">[</span>
<span class="s2">&quot;max_batch_size&quot;</span><span class="p">,</span> <span class="s2">&quot;max_num_tokens&quot;</span><span class="p">,</span> <span class="s2">&quot;max_seq_len&quot;</span><span class="p">,</span>
<span class="s2">&quot;max_input_len&quot;</span><span class="p">,</span> <span class="s2">&quot;max_beam_width&quot;</span>
<span class="p">]:</span>
<span class="k">if</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">,</span> <span class="n">key</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="p">(</span><span class="n">v</span> <span class="o">:=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span>
<span class="kc">None</span><span class="p">))</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">v</span> <span class="o">!=</span> <span class="nb">getattr</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">,</span> <span class="n">key</span><span class="p">):</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;overriding </span><span class="si">{</span><span class="n">key</span><span class="si">}</span><span class="s2"> from build_config&quot;</span><span class="p">)</span>
<span class="nb">setattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">,</span> <span class="n">key</span><span class="p">))</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_build_config_with_runtime_params</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="c1"># Note: max_batch_size and max_num_tokens in LlmArgs are for runtime,</span>
<span class="c1"># which will be passed to the C++ Executor API, overwriting the values</span>
<span class="c1"># from an built engine. In order to set build configuration, it is</span>
<span class="c1"># recommended to use build_config instead.</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span><span class="p">:</span>
<span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">,</span> <span class="n">BuildConfig</span>
<span class="p">),</span> <span class="sa">f</span><span class="s2">&quot;build_config is not initialized: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span> <span class="o">&gt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;max_batch_size [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span><span class="si">}</span><span class="s2">] is greater than build_config.max_batch_size [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span><span class="si">}</span><span class="s2">] in build_config&quot;</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span> <span class="o">&gt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;max_num_tokens [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="si">}</span><span class="s2">] is greater than build_config.max_num_tokens [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="si">}</span><span class="s2">] in build_config&quot;</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Conflict detected in LlmArgs build_config.max_batch_size &quot;</span>
<span class="sa">f</span><span class="s2">&quot;(</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span><span class="si">}</span><span class="s2">) != max_batch_size (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span><span class="si">}</span><span class="s2">).&quot;</span>
<span class="sa">f</span><span class="s2">&quot;The &#39;max_batch_size&#39; specified in LlmArgs is ignored at &quot;</span>
<span class="sa">f</span><span class="s2">&quot;engine build and will override at runtime.&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="p">:</span>
<span class="sa">f</span><span class="s2">&quot;max_seq_len [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span><span class="si">}</span><span class="s2">] is overridden by build_config.max_seq_len [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span><span class="si">}</span><span class="s2">] in build_config&quot;</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Conflict detected in LlmArgs build_config.max_num_tokens &quot;</span>
<span class="sa">f</span><span class="s2">&quot;(</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="si">}</span><span class="s2">) != max_batch_size (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="si">}</span><span class="s2">).&quot;</span>
<span class="sa">f</span><span class="s2">&quot;The &#39;max_num_tokens&#39; specified in LlmArgs is ignored at &quot;</span>
<span class="sa">f</span><span class="s2">&quot;engine build and will override at runtime.&quot;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span> <span class="o">=</span> <span class="n">BuildConfig</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="sa">f</span><span class="s2">&quot;max_beam_width [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span><span class="si">}</span><span class="s2">] is overridden by build_config.max_beam_width [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span><span class="si">}</span><span class="s2">] in build_config&quot;</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_input_len</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;max_input_len [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">max_input_len</span><span class="si">}</span><span class="s2">] is overridden by build_config.max_input_len [</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_input_len</span><span class="si">}</span><span class="s2">] in build_config&quot;</span>
<span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_build_config_remaining</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">is_trt_llm_args</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">TrtLlmArgs</span><span class="p">)</span>
<span class="c1"># TODO: remove the checker when manage weights support all data types</span>
<span class="k">if</span> <span class="n">is_trt_llm_args</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">fast_build</span> <span class="ow">and</span> <span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span><span class="o">.</span><span class="n">quant_algo</span> <span class="ow">is</span> <span class="n">QuantAlgo</span><span class="o">.</span><span class="n">FP8</span>
<span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span><span class="o">.</span><span class="n">quant_algo</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">):</span>
<span class="k">if</span> <span class="n">is_trt_llm_args</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">fast_build</span> <span class="ow">and</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span><span class="o">.</span><span class="n">quant_algo</span>
<span class="ow">is</span> <span class="n">QuantAlgo</span><span class="o">.</span><span class="n">FP8</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_plugin_config</span><span class="p">(</span><span class="s2">&quot;manage_weights&quot;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">_world_size</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">_world_size</span> <span class="o">==</span> <span class="mi">1</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">nccl_plugin</span> <span class="o">=</span> <span class="kc">None</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_ensure_lora_config_consistency</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_lora</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">lora_config</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">backend</span> <span class="o">!=</span> <span class="s1">&#39;pytorch&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">lora_plugin</span> <span class="o">=</span> <span class="s1">&#39;auto&#39;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_lora_rank</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">lora_config</span><span class="o">.</span><span class="n">max_lora_rank</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_lora_rank</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_setup_speculative_config</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_prompt_adapter</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_prompt_embedding_table_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_prompt_adapter_token</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_setup_speculative_config</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="mi">1</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_speculative_config</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">:</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">,</span> <span class="n">LookaheadDecodingConfig</span><span class="p">):</span>
<span class="n">lookahead_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">speculative_config</span>
@ -1821,7 +1913,18 @@
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_ensure_lora_config_consistency</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">,</span>
<span class="s2">&quot;speculative_model&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="n">speculative_model_obj</span> <span class="o">=</span> <span class="n">_ModelWrapper</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model</span>
<span class="p">)</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model</span> <span class="ow">and</span> <span class="n">speculative_model_obj</span><span class="o">.</span><span class="n">is_local_model</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_speculative_model_format</span> <span class="o">=</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">HF</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_lora_config_consistency</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lora_config</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_lora_rank</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
@ -1857,10 +1960,7 @@
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lora_config</span><span class="o">.</span><span class="n">lora_target_modules</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span>
<span class="n">default_trtllm_modules_to_hf_modules</span><span class="o">.</span><span class="n">keys</span><span class="p">())</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_build_config_mutable</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">TLLM_ENGINE</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_update_plugin_config</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="n">Any</span><span class="p">):</span>
<span class="nb">setattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span>
@ -1884,7 +1984,7 @@
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;cp_size </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">cp_size</span><span class="si">}</span><span class="s2"> is not consistent with the engine&#39;s cp_size </span><span class="si">{</span><span class="n">mapping</span><span class="o">.</span><span class="n">cp_size</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="n">tp_size</span><span class="o">=</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
<span class="n">pp_size</span><span class="o">=</span><span class="n">mapping</span><span class="o">.</span><span class="n">pp_size</span><span class="p">,</span>
<span class="n">cp_size</span><span class="o">=</span><span class="n">mapping</span><span class="o">.</span><span class="n">cp_size</span><span class="p">,</span>
@ -1923,7 +2023,7 @@
<span class="sa">f</span><span class="s2">&quot;auto parallel with world_size </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="si">}</span><span class="s2"> does not support checkpoint with &quot;</span>
<span class="s2">&quot;world_size </span><span class="si">{world_size}</span><span class="s2"> &gt; 1&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">auto_parallel</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_parallel_config</span> <span class="o">=</span> <span class="n">_ParallelConfig</span><span class="p">(</span>
<span class="n">tp_size</span><span class="o">=</span><span class="n">tp_size</span><span class="p">,</span>
<span class="n">pp_size</span><span class="o">=</span><span class="n">pp_size</span><span class="p">,</span>
<span class="n">cp_size</span><span class="o">=</span><span class="n">cp_size</span><span class="p">,</span>
@ -1932,20 +2032,6 @@
<span class="n">moe_tp_size</span><span class="o">=</span><span class="n">moe_tp_size</span><span class="p">,</span>
<span class="n">moe_ep_size</span><span class="o">=</span><span class="n">moe_ep_size</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_setup_embedding_parallel_mode</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;NONE&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;SHARDING_ALONG_VOCAB&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;embedding_sharding_dim&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;SHARDING_ALONG_HIDDEN&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;embedding_sharding_dim&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Invalid embedding_parallel_mode: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">llm_args</span><span class="o">.</span><span class="n">embedding_parallel_mode</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<div class="viewcode-block" id="TrtLlmArgs">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs">[docs]</a>
@ -1968,12 +2054,6 @@
<span class="n">enable_tqdm</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable tqdm for progress bar.&quot;</span><span class="p">)</span>
<span class="c1"># BuildConfig is introduced to give users a familiar interface to configure the model building.</span>
<span class="n">build_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Build config.&quot;</span><span class="p">,</span>
<span class="n">json_schema_extra</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;type&quot;</span><span class="p">:</span> <span class="sa">f</span><span class="s2">&quot;Optional[</span><span class="si">{</span><span class="n">get_type_repr</span><span class="p">(</span><span class="n">BuildConfig</span><span class="p">)</span><span class="si">}</span><span class="s2">]&quot;</span><span class="p">})</span>
<span class="n">workspace</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The workspace for the model.&quot;</span><span class="p">)</span>
@ -1990,7 +2070,7 @@
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Extended runtime perf knob config.&quot;</span><span class="p">)</span>
<span class="n">calib_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">CalibConfig</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Calibration config.&quot;</span><span class="p">)</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Calibration config.&quot;</span><span class="p">,</span> <span class="n">validate_default</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">embedding_parallel_mode</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="s1">&#39;SHARDING_ALONG_VOCAB&#39;</span><span class="p">,</span>
@ -1998,6 +2078,12 @@
<span class="n">fast_build</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable fast build.&quot;</span><span class="p">)</span>
<span class="c1"># BuildConfig is introduced to give users a familiar interface to configure the model building.</span>
<span class="n">build_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Build config.&quot;</span><span class="p">,</span>
<span class="n">json_schema_extra</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;type&quot;</span><span class="p">:</span> <span class="sa">f</span><span class="s2">&quot;Optional[</span><span class="si">{</span><span class="n">get_type_repr</span><span class="p">(</span><span class="n">BuildConfig</span><span class="p">)</span><span class="si">}</span><span class="s2">]&quot;</span><span class="p">})</span>
<span class="c1"># Private attributes</span>
<span class="n">_auto_parallel_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">AutoParallelConfig</span><span class="p">]</span> <span class="o">=</span> <span class="n">PrivateAttr</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
@ -2009,12 +2095,36 @@
<span class="k">def</span><span class="w"> </span><span class="nf">auto_parallel_config</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">AutoParallelConfig</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_auto_parallel_config</span>
<div class="viewcode-block" id="TrtLlmArgs.model_post_init">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs.model_post_init">[docs]</a>
<span class="nd">@print_traceback_on_error</span>
<span class="k">def</span><span class="w"> </span><span class="nf">model_post_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">__context</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">model_post_init</span><span class="p">(</span><span class="n">__context</span><span class="p">)</span>
<div class="viewcode-block" id="TrtLlmArgs.init_calib_config">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs.init_calib_config">[docs]</a>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s1">&#39;calib_config&#39;</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="s1">&#39;before&#39;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">init_calib_config</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">):</span>
<span class="k">if</span> <span class="n">v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="n">CalibConfig</span><span class="p">()</span>
<span class="k">return</span> <span class="n">v</span></div>
<div class="viewcode-block" id="TrtLlmArgs.setup_embedding_parallel_mode">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs.setup_embedding_parallel_mode">[docs]</a>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">setup_embedding_parallel_mode</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;NONE&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;SHARDING_ALONG_VOCAB&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;embedding_sharding_dim&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_parallel_mode</span> <span class="o">==</span> <span class="s1">&#39;SHARDING_ALONG_HIDDEN&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;use_parallel_embedding&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_convert_checkpoint_options</span><span class="p">[</span><span class="s1">&#39;embedding_sharding_dim&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="c1"># No else clause needed since validation already happened</span>
<span class="k">return</span> <span class="bp">self</span></div>
<div class="viewcode-block" id="TrtLlmArgs.validate_auto_parallel">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs.validate_auto_parallel">[docs]</a>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_auto_parallel</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_auto_parallel_config</span> <span class="o">=</span> <span class="n">AutoParallelConfig</span><span class="p">(</span>
<span class="n">sharded_io_allowlist</span><span class="o">=</span><span class="p">[</span>
<span class="s2">&quot;past_key_value_</span><span class="se">\\</span><span class="s2">d+&quot;</span><span class="p">,</span>
@ -2029,7 +2139,23 @@
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">auto_parallel</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">auto_parallel</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">auto_parallel</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">auto_parallel_world_size</span></div>
<span class="bp">self</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">auto_parallel_world_size</span>
<span class="k">return</span> <span class="bp">self</span></div>
<div class="viewcode-block" id="TrtLlmArgs.validate_enable_build_cache">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TrtLlmArgs.validate_enable_build_cache">[docs]</a>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_enable_build_cache</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span> <span class="o">=</span> <span class="n">BuildCacheConfig</span><span class="p">()</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">,</span> <span class="nb">bool</span><span class="p">)</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="p">,</span> <span class="n">BuildCacheConfig</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Invalid build_cache_config: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_build_cache</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span></div>
</div>
@ -2046,6 +2172,30 @@
<span class="n">DUMMY</span> <span class="o">=</span> <span class="mi">1</span>
<div class="viewcode-block" id="TorchCompileConfig">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchCompileConfig">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">TorchCompileConfig</span><span class="p">(</span><span class="n">BaseModel</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Configuration for torch.compile.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">torch_compile_fullgraph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable full graph compilation in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_inductor_enabled</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable inductor backend in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_piecewise_cuda_graph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable piecewise CUDA graph in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_enable_userbuffers</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;When torch compile is enabled, userbuffers is enabled by default.&quot;</span><span class="p">)</span></div>
<div class="viewcode-block" id="TorchLlmArgs">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">TorchLlmArgs</span><span class="p">(</span><span class="n">BaseLlmArgs</span><span class="p">):</span>
@ -2113,9 +2263,6 @@
<span class="n">kv_cache_dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="s2">&quot;auto&quot;</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Data type for KV cache.&quot;</span><span class="p">)</span>
<span class="n">use_kv_cache</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Whether to use KV cache.&quot;</span><span class="p">)</span>
<span class="n">enable_iter_perf_stats</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable iteration performance statistics.&quot;</span><span class="p">)</span>
@ -2128,24 +2275,8 @@
<span class="n">print_iter_log</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Print iteration logs.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_enabled</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable torch.compile optimization.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_fullgraph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable full graph compilation in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_inductor_enabled</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable inductor backend in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_piecewise_cuda_graph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable piecewise CUDA graph in torch.compile.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_enable_userbuffers</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;When torch compile is enabled, userbuffers is enabled by default.&quot;</span><span class="p">)</span>
<span class="n">torch_compile_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">TorchCompileConfig</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Torch compile config.&quot;</span><span class="p">)</span>
<span class="n">autotuner_enabled</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
@ -2154,12 +2285,6 @@
<span class="n">enable_layerwise_nvtx_marker</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;If true, enable layerwise nvtx marker.&quot;</span><span class="p">)</span>
<span class="n">auto_deploy_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Auto deploy config.&quot;</span><span class="p">,</span>
<span class="n">exclude_from_json</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">json_schema_extra</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;type&quot;</span><span class="p">:</span> <span class="sa">f</span><span class="s2">&quot;Optional[AutoDeployConfig]&quot;</span><span class="p">})</span>
<span class="n">load_format</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">LoadFormat</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="n">LoadFormat</span><span class="o">.</span><span class="n">AUTO</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
@ -2172,6 +2297,16 @@
<span class="s2">&quot;If true, enable min-latency mode. Currently only used for Llama4.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># TODO: remove backend later</span>
<div class="viewcode-block" id="TorchLlmArgs.init_backend">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs.init_backend">[docs]</a>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s1">&#39;backend&#39;</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="s1">&#39;before&#39;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">init_backend</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">):</span>
<span class="k">if</span> <span class="n">v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="s1">&#39;pytorch&#39;</span>
<span class="k">return</span> <span class="n">v</span></div>
<div class="viewcode-block" id="TorchLlmArgs.convert_load_format">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs.convert_load_format">[docs]</a>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s1">&#39;load_format&#39;</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="s1">&#39;before&#39;</span><span class="p">)</span>
@ -2201,15 +2336,11 @@
<span class="k">def</span><span class="w"> </span><span class="nf">extra_resource_managers</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">object</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_extra_resource_managers</span> <span class="o">=</span> <span class="n">value</span>
<div class="viewcode-block" id="TorchLlmArgs.model_post_init">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs.model_post_init">[docs]</a>
<span class="nd">@print_traceback_on_error</span>
<span class="k">def</span><span class="w"> </span><span class="nf">model_post_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">__context</span><span class="p">):</span>
<div class="viewcode-block" id="TorchLlmArgs.validate_moe_load_balancer">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs.validate_moe_load_balancer">[docs]</a>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s2">&quot;after&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_moe_load_balancer</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.._torch.model_config</span><span class="w"> </span><span class="kn">import</span> <span class="n">MoeLoadBalancerConfig</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">model_post_init</span><span class="p">(</span><span class="n">__context</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">model_format</span> <span class="o">=</span> <span class="n">_ModelFormatKind</span><span class="o">.</span><span class="n">HF</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_load_balancer</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_load_balancer</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">FileNotFoundError</span><span class="p">(</span>
@ -2223,7 +2354,8 @@
<span class="k">except</span> <span class="ne">Exception</span> <span class="k">as</span> <span class="n">e</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Failed to load MoE load balancer config file: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">moe_load_balancer</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span> <span class="kn">from</span><span class="w"> </span><span class="nn">e</span></div>
<span class="p">)</span> <span class="kn">from</span><span class="w"> </span><span class="nn">e</span>
<span class="k">return</span> <span class="bp">self</span></div>
<span class="c1"># TODO: Remove this after the PyTorch backend is fully migrated to TorchLlmArgs from ExecutorConfig</span>
@ -2246,17 +2378,22 @@
<span class="n">mixed_sampler</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">mixed_sampler</span><span class="p">,</span>
<span class="n">enable_trtllm_sampler</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_trtllm_sampler</span><span class="p">,</span>
<span class="n">kv_cache_dtype</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">kv_cache_dtype</span><span class="p">,</span>
<span class="n">use_kv_cache</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">use_kv_cache</span><span class="p">,</span>
<span class="n">enable_iter_perf_stats</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_iter_perf_stats</span><span class="p">,</span>
<span class="n">enable_iter_req_stats</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_iter_req_stats</span><span class="p">,</span>
<span class="n">print_iter_log</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">print_iter_log</span><span class="p">,</span>
<span class="n">torch_compile_enabled</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_enabled</span><span class="p">,</span>
<span class="n">torch_compile_fullgraph</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_fullgraph</span><span class="p">,</span>
<span class="n">torch_compile_inductor_enabled</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_inductor_enabled</span><span class="p">,</span>
<span class="n">torch_compile_piecewise_cuda_graph</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span>
<span class="n">torch_compile_piecewise_cuda_graph</span><span class="p">,</span>
<span class="n">torch_compile_enable_userbuffers</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span>
<span class="n">torch_compile_enable_userbuffers</span><span class="p">,</span>
<span class="n">torch_compile_enabled</span><span class="o">=</span><span class="nb">bool</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">),</span>
<span class="n">torch_compile_fullgraph</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span><span class="o">.</span>
<span class="n">torch_compile_fullgraph</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">True</span><span class="p">,</span>
<span class="n">torch_compile_inductor_enabled</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span><span class="o">.</span>
<span class="n">torch_compile_inductor_enabled</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">False</span><span class="p">,</span>
<span class="n">torch_compile_piecewise_cuda_graph</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span><span class="o">.</span>
<span class="n">torch_compile_piecewise_cuda_graph</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">False</span><span class="p">,</span>
<span class="n">torch_compile_enable_userbuffers</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span><span class="o">.</span>
<span class="n">torch_compile_enable_userbuffers</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">torch_compile_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">True</span><span class="p">,</span>
<span class="n">autotuner_enabled</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">autotuner_enabled</span><span class="p">,</span>
<span class="n">enable_layerwise_nvtx_marker</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_layerwise_nvtx_marker</span><span class="p">,</span>
<span class="n">load_format</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">load_format</span><span class="p">,</span>
@ -2274,38 +2411,6 @@
<span class="k">return</span> <span class="n">v</span></div>
<span class="nd">@staticmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_generate_cuda_graph_batch_sizes</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">padding_enabled</span><span class="p">:</span> <span class="nb">bool</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Generate a list of batch sizes for CUDA graphs.</span>
<span class="sd"> Args:</span>
<span class="sd"> max_batch_size: Maximum batch size to generate up to</span>
<span class="sd"> padding_enabled: Whether padding is enabled, which affects the batch size distribution</span>
<span class="sd"> Returns:</span>
<span class="sd"> List of batch sizes to create CUDA graphs for</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">padding_enabled</span><span class="p">:</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">4</span><span class="p">]</span> <span class="o">+</span> <span class="p">[</span><span class="n">i</span> <span class="o">*</span> <span class="mi">8</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">17</span><span class="p">)]</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">))</span> <span class="o">+</span> <span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">64</span><span class="p">,</span> <span class="mi">128</span><span class="p">]</span>
<span class="c1"># Add powers of 2 up to max_batch_size</span>
<span class="n">batch_sizes</span> <span class="o">+=</span> <span class="p">[</span>
<span class="mi">2</span><span class="o">**</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="n">math</span><span class="o">.</span><span class="n">floor</span><span class="p">(</span><span class="n">math</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">,</span> <span class="mi">2</span><span class="p">)))</span>
<span class="p">]</span>
<span class="c1"># Filter and sort batch sizes</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="nb">sorted</span><span class="p">(</span>
<span class="p">[</span><span class="n">size</span> <span class="k">for</span> <span class="n">size</span> <span class="ow">in</span> <span class="n">batch_sizes</span> <span class="k">if</span> <span class="n">size</span> <span class="o">&lt;=</span> <span class="n">max_batch_size</span><span class="p">])</span>
<span class="c1"># Add max_batch_size if not already included</span>
<span class="k">if</span> <span class="n">max_batch_size</span> <span class="o">!=</span> <span class="n">batch_sizes</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:</span>
<span class="n">batch_sizes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">)</span>
<span class="k">return</span> <span class="n">batch_sizes</span>
<div class="viewcode-block" id="TorchLlmArgs.validate_cuda_graph_config">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs.validate_cuda_graph_config">[docs]</a>
<span class="nd">@model_validator</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="s1">&#39;after&#39;</span><span class="p">)</span>
@ -2340,7 +2445,39 @@
<span class="bp">self</span><span class="o">.</span><span class="n">cuda_graph_max_batch_size</span> <span class="o">=</span> <span class="n">max_batch_size</span>
<span class="k">return</span> <span class="bp">self</span></div>
</div>
<span class="nd">@staticmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_generate_cuda_graph_batch_sizes</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">padding_enabled</span><span class="p">:</span> <span class="nb">bool</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Generate a list of batch sizes for CUDA graphs.</span>
<span class="sd"> Args:</span>
<span class="sd"> max_batch_size: Maximum batch size to generate up to</span>
<span class="sd"> padding_enabled: Whether padding is enabled, which affects the batch size distribution</span>
<span class="sd"> Returns:</span>
<span class="sd"> List of batch sizes to create CUDA graphs for</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">padding_enabled</span><span class="p">:</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">4</span><span class="p">]</span> <span class="o">+</span> <span class="p">[</span><span class="n">i</span> <span class="o">*</span> <span class="mi">8</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">17</span><span class="p">)]</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">))</span> <span class="o">+</span> <span class="p">[</span><span class="mi">32</span><span class="p">,</span> <span class="mi">64</span><span class="p">,</span> <span class="mi">128</span><span class="p">]</span>
<span class="c1"># Add powers of 2 up to max_batch_size</span>
<span class="n">batch_sizes</span> <span class="o">+=</span> <span class="p">[</span>
<span class="mi">2</span><span class="o">**</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="n">math</span><span class="o">.</span><span class="n">floor</span><span class="p">(</span><span class="n">math</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">,</span> <span class="mi">2</span><span class="p">)))</span>
<span class="p">]</span>
<span class="c1"># Filter and sort batch sizes</span>
<span class="n">batch_sizes</span> <span class="o">=</span> <span class="nb">sorted</span><span class="p">(</span>
<span class="p">[</span><span class="n">size</span> <span class="k">for</span> <span class="n">size</span> <span class="ow">in</span> <span class="n">batch_sizes</span> <span class="k">if</span> <span class="n">size</span> <span class="o">&lt;=</span> <span class="n">max_batch_size</span><span class="p">])</span>
<span class="c1"># Add max_batch_size if not already included</span>
<span class="k">if</span> <span class="n">max_batch_size</span> <span class="o">!=</span> <span class="n">batch_sizes</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:</span>
<span class="n">batch_sizes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">max_batch_size</span><span class="p">)</span>
<span class="k">return</span> <span class="n">batch_sizes</span></div>
@ -2402,6 +2539,12 @@
<span class="s2">&quot;properly passed through.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">checkpoint_device</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Device on which to load the model checkpoint. &quot;</span>
<span class="s2">&quot;Defaults to the same device as the rest of the pipeline.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;free_mem_ratio&quot;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_free_mem_ratio</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">):</span>
@ -2452,7 +2595,6 @@
<span class="n">llm_args_dict</span><span class="p">:</span> <span class="n">Dict</span><span class="p">,</span>
<span class="n">extra_llm_api_options</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Dict</span><span class="p">:</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.._torch.pyexecutor.config</span><span class="w"> </span><span class="kn">import</span> <span class="n">PyTorchConfig</span>
<span class="n">field_mapping</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;quant_config&quot;</span><span class="p">:</span> <span class="n">QuantConfig</span><span class="p">,</span>
<span class="s2">&quot;calib_config&quot;</span><span class="p">:</span> <span class="n">CalibConfig</span><span class="p">,</span>
@ -2465,18 +2607,19 @@
<span class="s2">&quot;speculative_config&quot;</span><span class="p">:</span> <span class="n">DecodingBaseConfig</span><span class="p">,</span>
<span class="s2">&quot;batching_type&quot;</span><span class="p">:</span> <span class="n">BatchingType</span><span class="p">,</span>
<span class="s2">&quot;extended_runtime_perf_knob_config&quot;</span><span class="p">:</span> <span class="n">ExtendedRuntimePerfKnobConfig</span><span class="p">,</span>
<span class="s2">&quot;pytorch_backend_config&quot;</span><span class="p">:</span> <span class="n">PyTorchConfig</span><span class="p">,</span>
<span class="s2">&quot;cache_transceiver_config&quot;</span><span class="p">:</span> <span class="n">CacheTransceiverConfig</span><span class="p">,</span>
<span class="s2">&quot;lora_config&quot;</span><span class="p">:</span> <span class="n">LoraConfig</span><span class="p">,</span>
<span class="p">}</span>
<span class="k">for</span> <span class="n">field</span><span class="p">,</span> <span class="n">field_type</span> <span class="ow">in</span> <span class="n">field_mapping</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="k">if</span> <span class="n">field</span> <span class="ow">in</span> <span class="n">llm_args_dict</span><span class="p">:</span>
<span class="k">if</span> <span class="n">field</span> <span class="o">==</span> <span class="s2">&quot;speculative_config&quot;</span><span class="p">:</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field</span><span class="p">]</span> <span class="o">=</span> <span class="n">field_type</span><span class="o">.</span><span class="n">from_dict</span><span class="p">(</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field</span><span class="p">])</span>
<span class="k">for</span> <span class="n">field_name</span><span class="p">,</span> <span class="n">field_type</span> <span class="ow">in</span> <span class="n">field_mapping</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="k">if</span> <span class="n">field_name</span> <span class="ow">in</span> <span class="n">llm_args_dict</span><span class="p">:</span>
<span class="k">if</span> <span class="n">field_name</span> <span class="o">==</span> <span class="s2">&quot;speculative_config&quot;</span><span class="p">:</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field_name</span><span class="p">]</span> <span class="o">=</span> <span class="n">field_type</span><span class="o">.</span><span class="n">from_dict</span><span class="p">(</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field_name</span><span class="p">])</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field</span><span class="p">]</span> <span class="o">=</span> <span class="n">field_type</span><span class="p">(</span><span class="o">**</span><span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field</span><span class="p">])</span>
<span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field_name</span><span class="p">]</span> <span class="o">=</span> <span class="n">field_type</span><span class="p">(</span>
<span class="o">**</span><span class="n">llm_args_dict</span><span class="p">[</span><span class="n">field_name</span><span class="p">])</span>
<span class="n">extra_llm_str</span> <span class="o">=</span> <span class="sa">f</span><span class="s2">&quot;because it&#39;s specified in </span><span class="si">{</span><span class="n">extra_llm_api_options</span><span class="si">}</span><span class="s2">&quot;</span> <span class="k">if</span> <span class="n">extra_llm_api_options</span> <span class="k">else</span> <span class="s2">&quot;&quot;</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Overriding </span><span class="si">{</span><span class="n">field</span><span class="si">}</span><span class="s2"> </span><span class="si">{</span><span class="n">extra_llm_str</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Overriding </span><span class="si">{</span><span class="n">field_name</span><span class="si">}</span><span class="s2"> </span><span class="si">{</span><span class="n">extra_llm_str</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">llm_args</span> <span class="o">=</span> <span class="n">llm_args</span> <span class="o">|</span> <span class="n">llm_args_dict</span>
<span class="k">return</span> <span class="n">llm_args</span>
@ -2629,9 +2772,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1151,9 +1152,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -880,9 +881,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1184,9 +1185,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -792,9 +793,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -809,9 +810,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1008,9 +1009,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -837,9 +838,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -668,9 +669,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -921,9 +922,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -819,9 +820,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -683,9 +684,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -809,9 +810,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -903,9 +904,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -985,9 +986,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1021,9 +1022,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1957,9 +1958,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -2862,9 +2863,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -744,9 +745,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -906,9 +907,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -675,8 +676,8 @@
<span class="n">model_type</span> <span class="o">=</span> <span class="n">loads</span><span class="p">(</span>
<span class="p">(</span><span class="n">Path</span><span class="p">(</span><span class="n">config_dir</span><span class="p">)</span> <span class="o">/</span> <span class="s2">&quot;config.json&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">read_text</span><span class="p">())[</span><span class="s2">&quot;model_type&quot;</span><span class="p">]</span>
<span class="n">HFConfigClass</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;gemma2&quot;</span><span class="p">:</span> <span class="n">transformers</span><span class="o">.</span><span class="n">GemmaConfig</span><span class="p">,</span>
<span class="s2">&quot;gemma&quot;</span><span class="p">:</span> <span class="n">transformers</span><span class="o">.</span><span class="n">Gemma2Config</span><span class="p">,</span>
<span class="s2">&quot;gemma&quot;</span><span class="p">:</span> <span class="n">transformers</span><span class="o">.</span><span class="n">GemmaConfig</span><span class="p">,</span>
<span class="s2">&quot;gemma2&quot;</span><span class="p">:</span> <span class="n">transformers</span><span class="o">.</span><span class="n">Gemma2Config</span><span class="p">,</span>
<span class="s2">&quot;gemma3_text&quot;</span><span class="p">:</span> <span class="n">transformers</span><span class="o">.</span><span class="n">Gemma3TextConfig</span><span class="p">,</span>
<span class="p">}[</span><span class="n">model_type</span><span class="p">]</span>
<span class="n">hf_config</span> <span class="o">=</span> <span class="n">HFConfigClass</span><span class="o">.</span><span class="n">from_pretrained</span><span class="p">(</span><span class="n">config_dir</span><span class="p">)</span>
@ -834,9 +835,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1026,9 +1027,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -953,9 +954,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -534,8 +535,6 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">...mapping</span><span class="w"> </span><span class="kn">import</span> <span class="n">Mapping</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">...module</span><span class="w"> </span><span class="kn">import</span> <span class="n">Module</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">...quantization</span><span class="w"> </span><span class="kn">import</span> <span class="n">QuantMode</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">...quantization.functional</span><span class="w"> </span><span class="kn">import</span> <span class="n">quantize_fp8_per_token</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">...quantization.layers</span><span class="w"> </span><span class="kn">import</span> <span class="n">Fp8RowwiseMLP</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..model_weights_loader</span><span class="w"> </span><span class="kn">import</span> <span class="n">ModelWeightsLoader</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..modeling_utils</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">DecoderLayerList</span><span class="p">,</span> <span class="n">DecoderModelForCausalLM</span><span class="p">,</span>
<span class="n">QuantConfig</span><span class="p">)</span>
@ -683,10 +682,6 @@
<span class="n">residual</span> <span class="o">=</span> <span class="n">hidden_states</span>
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
<span class="c1"># Quantize per-token for fp8</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">,</span> <span class="n">Fp8RowwiseMLP</span><span class="p">):</span>
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">quantize_fp8_per_token</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span>
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">)</span>
@ -1062,9 +1057,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -682,9 +683,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -834,9 +835,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -774,9 +775,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -908,9 +909,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1256,9 +1257,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1101,9 +1102,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -741,9 +742,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -891,9 +892,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -2202,9 +2203,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1268,9 +1269,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -2663,9 +2664,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -806,9 +807,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -740,9 +741,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -808,9 +809,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -811,9 +812,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -855,9 +856,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -951,9 +952,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1254,9 +1255,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -926,9 +927,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1068,7 +1069,7 @@
<span class="k">def</span><span class="w"> </span><span class="nf">set_fp8_rowwise_quant_plugins</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">&quot;auto&quot;</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">fp8_rowwise_gemm_plugin</span> <span class="o">=</span> <span class="n">dtype</span>
<span class="bp">self</span><span class="o">.</span><span class="n">rmsnorm_quantization_plugin</span> <span class="o">=</span> <span class="n">dtype</span>
<span class="c1"># self.layernorm_quantization_plugin = dtype</span>
<span class="bp">self</span><span class="o">.</span><span class="n">layernorm_quantization_plugin</span> <span class="o">=</span> <span class="n">dtype</span>
<span class="bp">self</span><span class="o">.</span><span class="n">quantize_per_token_plugin</span> <span class="o">=</span> <span class="kc">True</span>
<span class="bp">self</span><span class="o">.</span><span class="n">quantize_tensor_plugin</span> <span class="o">=</span> <span class="kc">True</span>
<span class="k">return</span> <span class="bp">self</span>
@ -1482,9 +1483,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -551,6 +552,7 @@
<span class="n">INT8</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="n">MIXED_PRECISION</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="n">NVFP4</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="n">W4A8_MXFP4_FP8</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="n">NO_QUANT</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span></div>
@ -601,6 +603,8 @@
<span class="c1"># FP4</span>
<span class="n">NVFP4</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="n">NVFP4_KV_CACHE</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="c1"># W4A8 MXFP4</span>
<span class="n">W4A8_MXFP4_FP8</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
<span class="c1"># The smallest power-of-two that is not used by a flag. Do not call auto() after that line.</span>
<span class="n">COUNT</span> <span class="o">=</span> <span class="n">auto</span><span class="p">()</span>
@ -686,6 +690,9 @@
<span class="k">def</span><span class="w"> </span><span class="nf">has_nvfp4</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_any</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">NVFP4</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">has_w4a8_mxfp4_fp8</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_any</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">W4A8_MXFP4_FP8</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">has_weight_quant</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_any</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">INT4_WEIGHTS</span> <span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">INT8_WEIGHTS</span><span class="p">)</span>
@ -696,7 +703,8 @@
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">FP8_QDQ</span> <span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">FP8_ROWWISE</span>
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">W4A8_QSERVE</span>
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">FP8_1x128_128x128</span>
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">NVFP4</span><span class="p">)</span>
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">NVFP4</span>
<span class="o">|</span> <span class="bp">self</span><span class="o">.</span><span class="n">W4A8_MXFP4_FP8</span><span class="p">)</span>
<span class="k">if</span> <span class="n">exclude_kv_cache</span><span class="p">:</span>
<span class="k">return</span> <span class="n">has_quant</span>
@ -731,7 +739,8 @@
<span class="n">use_fp8_block_scales</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">use_fp8_rowwise</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">use_nvfp4</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">use_w4a8_qserve</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="n">use_w4a8_qserve</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">use_w4a8_mxfp4_fp8</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="k">def</span><span class="w"> </span><span class="nf">raise_error</span><span class="p">():</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Unsupported combination of QuantMode args: &quot;</span>
@ -747,7 +756,8 @@
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_fp8_block_scales</span><span class="si">=}</span><span class="s2">, &quot;</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_fp8_rowwise</span><span class="si">=}</span><span class="s2">, &quot;</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_nvfp4</span><span class="si">=}</span><span class="s2">, &quot;</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_w4a8_qserve</span><span class="si">=}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_w4a8_qserve</span><span class="si">=}</span><span class="s2">, &quot;</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">use_w4a8_mxfp4_fp8</span><span class="si">=}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="c1"># We must quantize weights when we quantize activations.</span>
<span class="k">if</span> <span class="n">quantize_activations</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">quantize_weights</span><span class="p">:</span>
@ -802,6 +812,9 @@
<span class="k">if</span> <span class="n">use_w4a8_qserve</span><span class="p">:</span>
<span class="n">mode</span> <span class="o">=</span> <span class="n">mode</span> <span class="o">|</span> <span class="n">QuantMode</span><span class="o">.</span><span class="n">W4A8_QSERVE</span>
<span class="k">if</span> <span class="n">use_w4a8_mxfp4_fp8</span><span class="p">:</span>
<span class="n">mode</span> <span class="o">=</span> <span class="n">mode</span> <span class="o">|</span> <span class="n">QuantMode</span><span class="o">.</span><span class="n">W4A8_MXFP4_FP8</span>
<span class="k">return</span> <span class="n">mode</span>
<span class="nd">@staticmethod</span>
@ -875,6 +888,8 @@
<span class="n">quant_mode</span> <span class="o">=</span> <span class="n">QuantMode</span><span class="o">.</span><span class="n">from_description</span><span class="p">(</span><span class="n">use_fp8_block_scales</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">elif</span> <span class="n">quant_algo</span> <span class="o">==</span> <span class="n">QuantAlgo</span><span class="o">.</span><span class="n">NVFP4</span><span class="p">:</span>
<span class="n">quant_mode</span> <span class="o">=</span> <span class="n">QuantMode</span><span class="o">.</span><span class="n">from_description</span><span class="p">(</span><span class="n">use_nvfp4</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">elif</span> <span class="n">quant_algo</span> <span class="o">==</span> <span class="n">QuantAlgo</span><span class="o">.</span><span class="n">W4A8_MXFP4_FP8</span><span class="p">:</span>
<span class="n">quant_mode</span> <span class="o">=</span> <span class="n">QuantMode</span><span class="o">.</span><span class="n">from_description</span><span class="p">(</span><span class="n">use_w4a8_mxfp4_fp8</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">quant_mode</span> <span class="o">=</span> <span class="n">QuantMode</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
@ -907,6 +922,8 @@
<span class="bp">self</span><span class="o">.</span><span class="n">has_fp8_block_scales</span><span class="p">(),</span>
<span class="s1">&#39;enable_nvfp4&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">has_nvfp4</span><span class="p">(),</span>
<span class="s1">&#39;enable_w4a8_mxfp4_fp8&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">has_w4a8_mxfp4_fp8</span><span class="p">(),</span>
<span class="s1">&#39;fp8_kv_cache&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">has_fp8_kv_cache</span><span class="p">(),</span>
<span class="s1">&#39;use_weight_only&#39;</span><span class="p">:</span>
@ -1034,9 +1051,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -624,6 +625,7 @@
<span class="n">QUANT_CFG_CHOICES</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;int8_sq&quot;</span><span class="p">:</span> <span class="n">mtq</span><span class="o">.</span><span class="n">INT8_SMOOTHQUANT_CFG</span><span class="p">,</span>
<span class="s2">&quot;fp8&quot;</span><span class="p">:</span> <span class="n">mtq</span><span class="o">.</span><span class="n">FP8_DEFAULT_CFG</span><span class="p">,</span>
<span class="s2">&quot;fp8_pc_pt&quot;</span><span class="p">:</span> <span class="n">mtq</span><span class="o">.</span><span class="n">FP8_PER_CHANNEL_PER_TOKEN_CFG</span><span class="p">,</span>
<span class="s2">&quot;int4_awq&quot;</span><span class="p">:</span> <span class="n">mtq</span><span class="o">.</span><span class="n">INT4_AWQ_CFG</span><span class="p">,</span>
<span class="s2">&quot;w4a8_awq&quot;</span><span class="p">:</span> <span class="n">mtq</span><span class="o">.</span><span class="n">W4A8_AWQ_BETA_CFG</span><span class="p">,</span>
<span class="s2">&quot;int8_wo&quot;</span><span class="p">:</span> <span class="n">EMPTY_CFG</span><span class="p">,</span>
@ -1021,6 +1023,7 @@
<span class="s2">&quot;int8&quot;</span><span class="p">:</span> <span class="s2">&quot;INT8_DEFAULT_CFG&quot;</span><span class="p">,</span>
<span class="s2">&quot;int8_sq&quot;</span><span class="p">:</span> <span class="s2">&quot;INT8_SMOOTHQUANT_CFG&quot;</span><span class="p">,</span>
<span class="s2">&quot;fp8&quot;</span><span class="p">:</span> <span class="s2">&quot;FP8_DEFAULT_CFG&quot;</span><span class="p">,</span>
<span class="s2">&quot;fp8_pc_pt&quot;</span><span class="p">:</span> <span class="s2">&quot;FP8_PER_CHANNEL_PER_TOKEN_CFG&quot;</span><span class="p">,</span>
<span class="s2">&quot;int4_awq&quot;</span><span class="p">:</span> <span class="s2">&quot;INT4_AWQ_CFG&quot;</span><span class="p">,</span>
<span class="s2">&quot;w4a8_awq&quot;</span><span class="p">:</span> <span class="s2">&quot;W4A8_AWQ_BETA_CFG&quot;</span><span class="p">,</span>
<span class="p">}</span>
@ -1895,9 +1898,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1165,9 +1166,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -5453,9 +5454,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1112,9 +1113,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1624,9 +1625,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -1387,6 +1388,8 @@
<span class="p">]</span>
<span class="k">return</span> <span class="n">prompt_tuning_configs</span>
<span class="c1"># TODO: add multimodal input for TRT engine backend</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_prepare_mrope_executor</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">batch_input_ids_list</span><span class="p">,</span> <span class="n">mrope</span><span class="p">:</span> <span class="n">MropeParams</span><span class="p">):</span>
<span class="n">mrope_configs</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">batch_input_ids_list</span><span class="p">)</span> <span class="o">*</span> <span class="p">[</span><span class="kc">None</span><span class="p">]</span>
<span class="k">if</span> <span class="n">mrope</span> <span class="o">!=</span> <span class="kc">None</span><span class="p">:</span>
@ -1828,9 +1831,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -3407,9 +3408,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -972,9 +973,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -442,6 +442,7 @@
<li class="toctree-l1"><a class="reference internal" href="../../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -517,15 +518,13 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">pydantic</span><span class="w"> </span><span class="kn">import</span> <span class="n">BaseModel</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.bindings</span><span class="w"> </span><span class="kn">import</span> <span class="n">executor</span> <span class="k">as</span> <span class="n">tllme</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.executor.serialization</span><span class="w"> </span><span class="kn">import</span> <span class="n">register_approved_ipc_class</span>
<div class="viewcode-block" id="GuidedDecodingParams">
<a class="viewcode-back" href="../../llm-api/reference.html#tensorrt_llm.llmapi.GuidedDecodingParams">[docs]</a>
<span class="nd">@dataclass</span><span class="p">(</span><span class="n">slots</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">kw_only</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">GuidedDecodingParams</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Guided decoding parameters for text generation. Only one of the fields could be effective.</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Guided decoding parameters for text generation. Only one of the fields could be effective.</span>
<span class="sd"> Args:</span>
<span class="sd"> json (str, pydantic.main.BaseModel, dict, optional): The generated text is amenable to json format with additional user-specified restrictions, namely schema. Defaults to None.</span>
@ -533,7 +532,8 @@
<span class="sd"> grammar (str, optional): The generated text is amenable to the user-specified extended Backus-Naur form (EBNF) grammar. Defaults to None.</span>
<span class="sd"> json_object (bool): If True, the generated text is amenable to json format. Defaults to False.</span>
<span class="sd"> structural_tag (str, optional): The generated text is amenable to the user-specified structural tag. Defaults to None.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="sd"> &quot;&quot;&quot;</span> <span class="c1"># noqa: E501</span>
<span class="n">json</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">BaseModel</span><span class="p">,</span> <span class="nb">dict</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">regex</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">grammar</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
@ -542,12 +542,10 @@
<span class="k">def</span><span class="w"> </span><span class="nf">_validate</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">num_guides</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">for</span> <span class="n">field</span> <span class="ow">in</span> <span class="n">fields</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">num_guides</span> <span class="o">+=</span> <span class="nb">bool</span><span class="p">(</span><span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">))</span>
<span class="k">for</span> <span class="n">_field</span> <span class="ow">in</span> <span class="n">fields</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">num_guides</span> <span class="o">+=</span> <span class="nb">bool</span><span class="p">(</span><span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">_field</span><span class="o">.</span><span class="n">name</span><span class="p">))</span>
<span class="k">if</span> <span class="n">num_guides</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Only one guide can be used for a request, but got </span><span class="si">{</span><span class="n">num_guides</span><span class="si">}</span><span class="s2">.&quot;</span>
<span class="p">)</span></div>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Only one guide can be used for a request, but got </span><span class="si">{</span><span class="n">num_guides</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span></div>
@ -570,28 +568,27 @@
<span class="sd"> &quot;&quot;&quot;</span>
<span class="nd">@abstractmethod</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">req_id</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">logits</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span>
<span class="n">token_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]],</span> <span class="n">stream_ptr</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span>
<span class="n">client_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__call__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">req_id</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">logits</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span>
<span class="n">token_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]],</span>
<span class="n">stream_ptr</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span>
<span class="n">client_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Logits processing callback. The callback is expected to inplace modify the logits.</span>
<span class="sd"> Args:</span>
<span class="sd"> req_id (int): Request id.</span>
<span class="sd"> logits (torch.Tensor): Logits tensor to be modified.</span>
<span class="sd"> token_ids (List[List[int]]): Token ids produced by the request so far. The shape is beam_width * sequence_length.</span>
<span class="sd"> stream_ptr (int, optional): The operation stream used by the logits tensor. Not required for PyTorch backend.</span>
<span class="sd"> token_ids (List[List[int]]): Token ids produced by the request so far.</span>
<span class="sd"> The shape is beam_width * sequence_length.</span>
<span class="sd"> stream_ptr (int, optional): The operation stream used by the logits tensor.</span>
<span class="sd"> Not required for PyTorch backend.</span>
<span class="sd"> client_id (int, optional): An optional client id.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">pass</span> <span class="c1"># noqa</span>
<span class="k">def</span><span class="w"> </span><span class="nf">__init_subclass__</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> This method is called when a class inherits from LogitsProcessor.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="c1"># Register subclass as an approved class for deserialization across IPC boundaries.</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">__init_subclass__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">register_approved_ipc_class</span><span class="p">(</span><span class="bp">cls</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">BatchedLogitsProcessor</span><span class="p">(</span><span class="n">ABC</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Base class for batched logits processor.</span>
@ -603,15 +600,21 @@
<span class="sd"> &quot;&quot;&quot;</span>
<span class="nd">@abstractmethod</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">req_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="n">logits</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">token_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]],</span> <span class="n">stream_ptr</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">client_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]])</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__call__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">req_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span>
<span class="n">logits</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">token_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]],</span>
<span class="n">stream_ptr</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">client_ids</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]],</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Batched logits processing callback. The callback is expected to inplace modify the logits.</span>
<span class="sd"> Args:</span>
<span class="sd"> req_ids (List[int]): A batch of request ids.</span>
<span class="sd"> logits (List[torch.Tensor]): A batch of the logits tensors.</span>
<span class="sd"> token_ids (List[List[List[int]]]): A batch of the token ids produced by the requests so far. The shape is batch * beam_width * sequence_length.</span>
<span class="sd"> token_ids (List[List[List[int]]]): A batch of the token ids produced by the requests so far.</span>
<span class="sd"> The shape is batch * beam_width * sequence_length.</span>
<span class="sd"> stream_ptr (int): The operation stream used by the logits tensors.</span>
<span class="sd"> client_ids (List[Optional[int]]): A batch of optional client ids.</span>
<span class="sd"> &quot;&quot;&quot;</span>
@ -620,13 +623,13 @@
<span class="nd">@dataclass</span><span class="p">(</span><span class="n">slots</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">kw_only</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">AdditionalModelOutput</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> An additional output to gather from the model.</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;An additional output to gather from the model.</span>
<span class="sd"> Args:</span>
<span class="sd"> name (str): The name of the additional output to gather from the model.</span>
<span class="sd"> gather_context (bool): A value indicating whether or not to gather the additional output from the context too. Defaults to False.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="sd"> &quot;&quot;&quot;</span> <span class="c1"># noqa: E501</span>
<span class="n">name</span><span class="p">:</span> <span class="nb">str</span>
<span class="n">gather_context</span><span class="p">:</span> <span class="nb">bool</span>
@ -635,8 +638,7 @@
<a class="viewcode-back" href="../../llm-api/reference.html#tensorrt_llm.llmapi.SamplingParams">[docs]</a>
<span class="nd">@dataclass</span><span class="p">(</span><span class="n">slots</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">kw_only</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">SamplingParams</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Sampling parameters for text generation.</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Sampling parameters for text generation.</span>
<span class="sd"> Usage Examples:</span>
@ -702,7 +704,8 @@
<span class="sd"> truncate_prompt_tokens (int, optional): If set to an integer k, will use only the last k tokens from the prompt (i.e., left truncation). Defaults to None.</span>
<span class="sd"> skip_special_tokens (bool): Whether to skip special tokens in the output. Defaults to True.</span>
<span class="sd"> spaces_between_special_tokens (bool): Whether to add spaces between special tokens in the output. Defaults to True.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="sd"> &quot;&quot;&quot;</span> <span class="c1"># noqa: E501</span>
<span class="c1"># [TO DEVELOPER] This class provides an interface to LLMAPI users.</span>
<span class="c1"># Internally, it manages and dispatches fields to Python bindings of C++ objects, currently including:</span>
<span class="c1"># (1) all fields of tllme.SamplingConfig;</span>
@ -717,19 +720,14 @@
<span class="n">max_tokens</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">32</span>
<span class="n">bad</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">List</span><span class="p">[</span><span class="nb">str</span><span class="p">]]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">bad_token_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">_bad_word_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]</span> <span class="o">=</span> <span class="n">field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">init</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="nb">repr</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="n">_bad_word_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]</span> <span class="o">=</span> <span class="n">field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">init</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="nb">repr</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="n">stop</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">List</span><span class="p">[</span><span class="nb">str</span><span class="p">]]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">stop_token_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">include_stop_str_in_output</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">_stop_word_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]</span> <span class="o">=</span> <span class="n">field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">init</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="nb">repr</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="n">_stop_word_ids</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]</span> <span class="o">=</span> <span class="n">field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">init</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="nb">repr</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="n">embedding_bias</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">logits_processor</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="n">LogitsProcessor</span><span class="p">,</span>
<span class="n">List</span><span class="p">[</span><span class="n">LogitsProcessor</span><span class="p">]]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">logits_processor</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="n">LogitsProcessor</span><span class="p">,</span> <span class="n">List</span><span class="p">[</span><span class="n">LogitsProcessor</span><span class="p">]]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">apply_batched_logits_processor</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">n</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">1</span>
@ -796,25 +794,28 @@
<span class="bp">self</span><span class="o">.</span><span class="n">_validate</span><span class="p">()</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_validate</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&#39;&#39;&#39; Verify the sampling parameters.</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Verify the sampling parameters.</span>
<span class="sd"> This function verifies the sampling parameters in the LLM API, which</span>
<span class="sd"> may have stricter requirements than the Executor class of C++ runtime.</span>
<span class="sd"> For instance, while the greedy decoding with n &gt; 1 is capable in the</span>
<span class="sd"> Executor class of C++ runtime, the LLM API disallows such combination.</span>
<span class="sd"> &#39;&#39;&#39;</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">best_of</span> <span class="o">&lt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">n</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;best_of (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">best_of</span><span class="si">}</span><span class="s2">) cannot be less than n (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">n</span><span class="si">}</span><span class="s2">)&quot;</span><span class="p">)</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;best_of (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">best_of</span><span class="si">}</span><span class="s2">) cannot be less than n (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">n</span><span class="si">}</span><span class="s2">)&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">best_of</span> <span class="o">&gt;</span> <span class="mi">1</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_greedy_decoding</span>
<span class="ow">and</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;TLLM_ALLOW_N_GREEDY_DECODING&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)):</span>
<span class="k">if</span> <span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">best_of</span> <span class="o">&gt;</span> <span class="mi">1</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_greedy_decoding</span>
<span class="ow">and</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;TLLM_ALLOW_N_GREEDY_DECODING&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="p">):</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s1">&#39;Greedy decoding in the LLM API does not allow multiple &#39;</span>
<span class="sa">f</span><span class="s1">&#39;returns. Please set to best_of=1, got best_of=</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">best_of</span><span class="si">}</span><span class="s1">. &#39;</span>
<span class="sa">f</span><span class="s1">&#39;Please set to best_of=1 or set an environment variable &#39;</span>
<span class="sa">f</span><span class="s1">&#39;TLLM_ALLOW_N_GREEDY_DECODING=1 to allow best_of &gt; 1 &#39;</span>
<span class="sa">f</span><span class="s1">&#39;under the greedy decoding.&#39;</span><span class="p">)</span>
<span class="sa">f</span><span class="s2">&quot;Greedy decoding in the LLM API does not allow multiple &quot;</span>
<span class="sa">f</span><span class="s2">&quot;returns. Please set to best_of=1, got best_of=</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">best_of</span><span class="si">}</span><span class="s2">. &quot;</span>
<span class="sa">f</span><span class="s2">&quot;Please set to best_of=1 or set an environment variable &quot;</span>
<span class="sa">f</span><span class="s2">&quot;TLLM_ALLOW_N_GREEDY_DECODING=1 to allow best_of &gt; 1 &quot;</span>
<span class="sa">f</span><span class="s2">&quot;under the greedy decoding.&quot;</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">truncate_prompt_tokens</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">truncate_prompt_tokens</span> <span class="o">&lt;</span> <span class="mi">1</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
@ -826,14 +827,15 @@
<span class="c1"># correct types as users might pass in logprob=True for Top-1 logprobs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">logprobs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">logprobs</span> <span class="ow">and</span> <span class="nb">int</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">logprobs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span> <span class="ow">and</span> <span class="nb">int</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span> <span class="ow">and</span> <span class="nb">int</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">prompt_logprobs</span><span class="p">)</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_greedy_decoding</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">use_beam_search</span>
<span class="ow">and</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">top_k</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">top_k</span> <span class="o">==</span> <span class="mi">1</span><span class="p">)</span>
<span class="ow">and</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">top_p</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">top_p</span> <span class="o">==</span> <span class="mf">0.0</span><span class="p">))</span>
<span class="k">return</span> <span class="p">(</span>
<span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">use_beam_search</span>
<span class="ow">and</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">top_k</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">top_k</span> <span class="o">==</span> <span class="mi">1</span><span class="p">)</span>
<span class="ow">and</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">top_p</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">top_p</span> <span class="o">==</span> <span class="mf">0.0</span><span class="p">)</span>
<span class="p">)</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_need_return_context_logits</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
@ -843,9 +845,7 @@
<span class="k">def</span><span class="w"> </span><span class="nf">_need_return_generation_logits</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">return_generation_logits</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_generation_logits_auto_enabled</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_setup</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
<span class="n">tokenizer</span><span class="p">,</span>
<span class="n">add_special_tokens</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s1">&#39;SamplingParams&#39;</span><span class="p">:</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_setup</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">tokenizer</span><span class="p">,</span> <span class="n">add_special_tokens</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;SamplingParams&quot;</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">end_id</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">end_id</span> <span class="o">=</span> <span class="n">tokenizer</span><span class="o">.</span><span class="n">eos_token_id</span>
<span class="bp">self</span><span class="o">.</span><span class="n">pad_id</span> <span class="o">=</span> <span class="n">tokenizer</span><span class="o">.</span><span class="n">pad_token_id</span>
@ -855,15 +855,13 @@
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">bad</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">strs</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">bad</span><span class="p">]</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">bad</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">bad</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_bad_word_ids</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">tokenizer</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="n">add_special_tokens</span><span class="o">=</span><span class="n">add_special_tokens</span><span class="p">)</span>
<span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">strs</span>
<span class="n">tokenizer</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="n">add_special_tokens</span><span class="o">=</span><span class="n">add_special_tokens</span><span class="p">)</span> <span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">strs</span>
<span class="p">]</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">stop</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">strs</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">stop</span><span class="p">]</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">stop</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">stop</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_stop_word_ids</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">tokenizer</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="n">add_special_tokens</span><span class="o">=</span><span class="n">add_special_tokens</span><span class="p">)</span>
<span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">strs</span>
<span class="n">tokenizer</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="n">add_special_tokens</span><span class="o">=</span><span class="n">add_special_tokens</span><span class="p">)</span> <span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">strs</span>
<span class="p">]</span>
<span class="k">return</span> <span class="bp">self</span>
@ -879,7 +877,8 @@
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_bad_word_ids</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.bad (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">bad</span><span class="si">}</span><span class="s2">) is not processed by tokenizer, &quot;</span>
<span class="s2">&quot;please call the setup method.&quot;</span><span class="p">)</span>
<span class="s2">&quot;please call the setup method.&quot;</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">words</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">_bad_word_ids</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_get_stop_words</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]:</span>
@ -893,11 +892,11 @@
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_stop_word_ids</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.stop (</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">stop</span><span class="si">}</span><span class="s2">) is not processed by tokenizer, &quot;</span>
<span class="s2">&quot;please call the setup method.&quot;</span><span class="p">)</span>
<span class="s2">&quot;please call the setup method.&quot;</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">words</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">_stop_word_ids</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_get_stop_reasons_and_words</span><span class="p">(</span>
<span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">int</span><span class="p">],</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]]:</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_get_stop_reasons_and_words</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">int</span><span class="p">],</span> <span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]]]]:</span>
<span class="n">stop_reasons</span> <span class="o">=</span> <span class="p">[]</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">stop_token_ids</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">stop_reasons</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">stop_token_ids</span><span class="p">)</span>
@ -923,37 +922,30 @@
<span class="c1"># | Sampling | use_beam_search | beam_width == 1 |</span>
<span class="c1"># | Sampling | n | num_return_sequences |</span>
<span class="c1"># | Sampling | best_of | no corresponding param |</span>
<span class="n">fields</span> <span class="o">=</span> <span class="p">{</span>
<span class="n">f</span>
<span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="nb">dir</span><span class="p">(</span><span class="n">tllme</span><span class="o">.</span><span class="n">SamplingConfig</span><span class="p">)</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">f</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">&#39;__&#39;</span><span class="p">)</span>
<span class="p">}</span>
<span class="n">fields</span> <span class="o">=</span> <span class="p">{</span><span class="n">f</span> <span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="nb">dir</span><span class="p">(</span><span class="n">tllme</span><span class="o">.</span><span class="n">SamplingConfig</span><span class="p">)</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">f</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">&quot;__&quot;</span><span class="p">)}</span>
<span class="n">unmatched_params</span> <span class="o">=</span> <span class="p">[</span>
<span class="s1">&#39;num_return_sequences&#39;</span><span class="p">,</span>
<span class="s1">&#39;beam_width&#39;</span><span class="p">,</span>
<span class="s1">&#39;n&#39;</span><span class="p">,</span>
<span class="s1">&#39;best_of&#39;</span><span class="p">,</span>
<span class="s1">&#39;use_beam_search&#39;</span><span class="p">,</span>
<span class="s2">&quot;num_return_sequences&quot;</span><span class="p">,</span>
<span class="s2">&quot;beam_width&quot;</span><span class="p">,</span>
<span class="s2">&quot;n&quot;</span><span class="p">,</span>
<span class="s2">&quot;best_of&quot;</span><span class="p">,</span>
<span class="s2">&quot;use_beam_search&quot;</span><span class="p">,</span>
<span class="p">]</span>
<span class="n">llmapi_to_rt_param_map</span> <span class="o">=</span> <span class="p">{</span>
<span class="n">f</span><span class="p">:</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">f</span><span class="p">)</span>
<span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">fields</span> <span class="k">if</span> <span class="n">f</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">unmatched_params</span>
<span class="p">}</span>
<span class="n">llmapi_to_rt_param_map</span> <span class="o">=</span> <span class="p">{</span><span class="n">f</span><span class="p">:</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">f</span><span class="p">)</span> <span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">fields</span> <span class="k">if</span> <span class="n">f</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">unmatched_params</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">use_beam_search</span><span class="p">:</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s1">&#39;num_return_sequences&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">n</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s1">&#39;beam_width&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">best_of</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s2">&quot;num_return_sequences&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">n</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s2">&quot;beam_width&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">best_of</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s1">&#39;num_return_sequences&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">best_of</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s1">&#39;beam_width&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s2">&quot;num_return_sequences&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">best_of</span>
<span class="n">llmapi_to_rt_param_map</span><span class="p">[</span><span class="s2">&quot;beam_width&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">SamplingConfig</span><span class="p">(</span><span class="o">**</span><span class="n">llmapi_to_rt_param_map</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_get_output_config</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
<span class="n">is_pytorch_backend</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">tllme</span><span class="o">.</span><span class="n">OutputConfig</span><span class="p">:</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_get_output_config</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">is_pytorch_backend</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">tllme</span><span class="o">.</span><span class="n">OutputConfig</span><span class="p">:</span>
<span class="n">sampling_param_fields</span> <span class="o">=</span> <span class="nb">set</span><span class="p">(</span><span class="nb">dir</span><span class="p">(</span><span class="n">SamplingParams</span><span class="p">))</span>
<span class="n">fields</span> <span class="o">=</span> <span class="p">[</span>
<span class="n">f</span> <span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="nb">dir</span><span class="p">(</span><span class="n">tllme</span><span class="o">.</span><span class="n">OutputConfig</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">f</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">&#39;__&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">sampling_param_fields</span>
<span class="n">f</span>
<span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="nb">dir</span><span class="p">(</span><span class="n">tllme</span><span class="o">.</span><span class="n">OutputConfig</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">f</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">&quot;__&quot;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">sampling_param_fields</span>
<span class="p">]</span>
<span class="n">config_kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="n">f</span><span class="p">:</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">f</span><span class="p">)</span> <span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">fields</span><span class="p">}</span>
@ -970,8 +962,7 @@
<span class="k">return</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">json_object</span><span class="p">:</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">JSON</span><span class="p">)</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span><span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">JSON</span><span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">json</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">json_schema</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">json</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">json_schema</span><span class="p">,</span> <span class="n">BaseModel</span><span class="p">):</span>
@ -979,19 +970,21 @@
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">json_schema</span><span class="p">,</span> <span class="nb">dict</span><span class="p">):</span>
<span class="n">json_schema</span> <span class="o">=</span> <span class="n">json</span><span class="o">.</span><span class="n">dumps</span><span class="p">(</span><span class="n">json_schema</span><span class="p">)</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">JSON_SCHEMA</span><span class="p">,</span> <span class="n">json_schema</span><span class="p">)</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">JSON_SCHEMA</span><span class="p">,</span> <span class="n">json_schema</span>
<span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">regex</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">REGEX</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">regex</span><span class="p">)</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">REGEX</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">regex</span>
<span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">grammar</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">EBNF_GRAMMAR</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">grammar</span><span class="p">)</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">EBNF_GRAMMAR</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">grammar</span>
<span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">structural_tag</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="p">(</span>
<span class="n">tllme</span><span class="o">.</span><span class="n">GuidedDecodingParams</span><span class="o">.</span><span class="n">GuideType</span><span class="o">.</span><span class="n">STRUCTURAL_TAG</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">structural_tag</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">guided_decoding</span><span class="o">.</span><span class="n">structural_tag</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="kc">None</span></div>
@ -1106,9 +1099,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -171,6 +171,8 @@ value for a given parameter, the vector can be limited to a single element
* `minP` is explained in [_Turning Up the Heat: Min-p Sampling for Creative and Coherent LLM Outputs_](https://arxiv.org/abs/2407.01082).
* TensorRT-LLM does not generate all possible tokenizations of a word. Therefore, stop words may appear in the output if there are multiple ways to tokenize a stop word and the token sequence in the output differs from the one in `stopWords`.
***Beam-search***
| Name in TRT-LLM | Description | Data type | Range of value | Default value | Name in HF |

View File

@ -41,12 +41,12 @@ The Low-Precision-AllReduce algorithm can be enabled in two ways:
```
AllReduce allreduce(mapping=mapping, strategy=AllReduceStrategy.LOWPRECISION);
```
2. **Environment variable control** with AUTO strategy:
2. Enable by LlmArgs
```
// In your code
AllReduce allreduce(mapping=mapping, strategy=AllReduceStrategy.AUTO);
// Set environment variable before running
export FORCE_LOW_PRECISION_ALL_REDUCE_STRATEGY=1
Set allreduce_strategy field in LlmArgs.
Candidates of strategies are "AUTO", "NCCL", "UB", "MINLATENCY", "ONESHOT", "TWOSHOT", "LOWPRECISION" and "MNNVL".
If no strategy is set, AUTO will be set.
```
## Performance and Accuracy Considerations
@ -58,8 +58,4 @@ Low-Precision-AllReduce reduces communication volume by using FP8 data format fo
Users should evaluate the precision impact on their specific models and workloads.
## Environment Variables
- `FORCE_LOW_PRECISION_ALL_REDUCE_STRATEGY`: When set to `1`, forces the use of low-precision algorithm with AUTO strategy. If the algorithm determines it cannot provide performance benefits, it will automatically fall back to other strategies.
**Note**: When compiling TensorRT-LLM without enabling the `ENABLE_FP8` option, setting Low Precision allreduce will not take effect.

View File

@ -0,0 +1,26 @@
We have recently open-sourced a set of Cutlass kernels that were previously known as "internal_cutlass_kernels". Due to internal dependencies, these kernels were previously only available to users as static libraries. We have now decoupled these internal dependencies, making the kernels available as source code.
The open-sourced Cutlass kernels are on the path `cpp/tensorrt_llm/kernels/cutlass_kernels`, including:
- `low_latency_gemm`
- `moe_gemm`
- `fp4_gemm`
- `allreduce_gemm`
To ensure stability and provide an optimized performance experience, we have maintained the previous method of calling these kernels via static libraries as an alternative option. You can switch between open-sourced Cutlass kernels and static library Cutlass kernels through the `USING_OSS_CUTLASS_*` macro (where * represents the specific kernel name), enabling kernel-level control. By default, the open-source Cutlass kernels are used.
Note that support for these static libraries will be gradually deprioritized in the future and may eventually be deprecated.
**Default Configuration (Using open-sourced Cutlass Kernels)**
To build using the open-source Cutlass kernels (default setting), run:
```bash
python3 ./scripts/build_wheel.py --cuda_architectures "90-real;100-real"
```
**Using Static Library Cutlass Kernels**
If you prefer to use the Cutlass kernels from the static library, you can control this during compilation by setting the `USING_OSS_CUTLASS_*` macro to `OFF`. For example, to use the static library implementation specifically for `low_latency_gemm` and `moe_gemm` while keeping other kernels as OSS, use the following compilation command:
```bash
python3 ./scripts/build_wheel.py --cuda_architectures "90-real;100-real" -D "USING_OSS_CUTLASS_MOE_GEMM=OFF;USING_OSS_CUTLASS_LOW_LATENCY_GEMM=OFF"
```

View File

@ -100,7 +100,7 @@ You can query Completions API with any http clients, a typical example is OpenAI
Another example uses ``curl``:
.. literalinclude:: ../../../examples/serve/curl_completion_client_for_multimodal.sh
.. literalinclude:: ../../../examples/serve/curl_chat_client_for_multimodal.sh
:language: bash
:linenos:

View File

@ -132,6 +132,7 @@ Welcome to TensorRT-LLM's Documentation!
reference/precision.md
reference/memory.md
reference/ci-overview.md
.. toctree::

View File

@ -136,6 +136,11 @@ API Reference
:undoc-members:
:special-members: __init__
:show-inheritance:
.. autoclass:: tensorrt_llm.llmapi.TorchCompileConfig
:members:
:undoc-members:
:special-members: __init__
:show-inheritance:
.. autoclass:: tensorrt_llm.llmapi.LlmArgs
:members:
:undoc-members:

View File

@ -475,6 +475,84 @@ Total Latency (ms): 18563.6825
```
#### Benchmarking with LoRA Adapters in PyTorch workflow
The PyTorch workflow supports benchmarking with LoRA (Low-Rank Adaptation) adapters. This requires preparing a dataset with LoRA metadata and configuring the LoRA settings.
**Preparing LoRA Dataset**
Use `prepare_dataset.py` with LoRA-specific options to generate requests with LoRA metadata:
```shell
python3 benchmarks/cpp/prepare_dataset.py \
--stdout \
--rand-task-id 0 1 \
--tokenizer /path/to/tokenizer \
--lora-dir /path/to/loras \
token-norm-dist \
--num-requests 100 \
--input-mean 128 \
--output-mean 128 \
--input-stdev 16 \
--output-stdev 24 \
> synthetic_lora_data.json
```
Key LoRA options:
- `--lora-dir`: Parent directory containing LoRA adapter subdirectories named by their task IDs (e.g., `0/`, `1/`, etc.)
- `--rand-task-id`: Range of LoRA task IDs to randomly assign to requests
- `--task-id`: Fixed LoRA task ID for all requests (alternative to `--rand-task-id`)
The generated dataset will include LoRA request metadata. Below is an example of a single such request data entry:
```json
{
"task_id": 0,
"input_ids": [3452, 88226, 102415, ...],
"output_tokens": 152,
"lora_request": {
"lora_name": "lora_0",
"lora_int_id": 0,
"lora_path": "/path/to/loras/0"
}
}
```
**LoRA Configuration**
Create an `extra-llm-api-options.yaml` file with LoRA configuration:
```yaml
lora_config:
lora_dir:
- /path/to/loras/0
- /path/to/loras/1
max_lora_rank: 64
lora_target_modules:
- attn_q
- attn_k
- attn_v
trtllm_modules_to_hf_modules:
attn_q: q_proj
attn_k: k_proj
attn_v: v_proj
```
**Running LoRA Benchmark**
```shell
trtllm-bench --model /path/to/base/model \
throughput \
--dataset synthetic_lora_data.json \
--backend pytorch \
--extra_llm_api_options extra-llm-api-options.yaml
```
```{note}
The LoRA directory structure should have task-specific subdirectories named by their task IDs (e.g., `loras/0/`, `loras/1/`).
Each subdirectory should contain the LoRA adapter files for that specific task.
```
#### Running multi-modal models in the PyTorch Workflow
To benchmark multi-modal models with PyTorch workflow, you can follow the similar approach as above.
@ -609,6 +687,7 @@ above:
"quant_algo": "FP8",
"kv_cache_quant_algo": null
}
}
```
The checkpoints above are quantized to run with a compute precision of `FP8` and default to no KV cache quantization (full

View File

@ -0,0 +1,112 @@
# Continuous Integration Overview
This page explains how TensorRTLLM's CI is organized and how individual tests map to Jenkins stages. Most stages execute integration tests defined in YAML files, while unit tests run as part of a mergerequest pipeline. The sections below describe how to locate a test and trigger the stage that runs it.
## Table of Contents
1. [CI pipelines](#ci-pipelines)
2. [Test definitions](#test-definitions)
3. [Unit tests](#unit-tests)
4. [Jenkins stage names](#jenkins-stage-names)
5. [Finding the stage for a test](#finding-the-stage-for-a-test)
6. [Waiving tests](#waiving-tests)
7. [Triggering CI Best Practices](#triggering-ci-best-practices)
## CI pipelines
Pull requests do not start testing by themselves. Developers trigger the CI by commenting `/bot run` (optionally with arguments) on the pull request (see [Pull Request Template](../../../.github/pull_request_template.md) for more details). That kicks off the **merge-request pipeline** (defined in `jenkins/L0_MergeRequest.groovy`), which runs unit tests and integration tests whose YAML entries specify `stage: pre_merge`. Once a pull request is merged, a separate **post-merge pipeline** (defined in `jenkins/L0_Test.groovy`) runs every test marked `post_merge` across all supported GPU configurations.
`stage` tags live in the YAML files under `tests/integration/test_lists/test-db/`. Searching those files for `stage: pre_merge` shows exactly which tests the merge-request pipeline covers.
## Test definitions
Integration tests are listed under `tests/integration/test_lists/test-db/`. Most YAML files are named after the GPU or configuration they run on (for example `l0_a100.yml`). Some files, like `l0_sanity_check.yml`, use wildcards and can run on multiple hardware types. Entries contain conditions and a list of tests. Two important terms in each entry are:
- `stage`: either `pre_merge` or `post_merge`.
- `backend`: `pytorch`, `tensorrt` or `triton`.
Example from `l0_a100.yml`:
```yaml
terms:
stage: post_merge
backend: triton
tests:
- triton_server/test_triton.py::test_gpt_ib_ptuning[gpt-ib-ptuning]
```
## Unit tests
Unit tests live under `tests/unittest/` and run during the merge-request pipeline. They are invoked from `jenkins/L0_MergeRequest.groovy` and do not require mapping to specific hardware stages.
## Jenkins stage names
`jenkins/L0_Test.groovy` maps stage names to these YAML files. For A100 the mapping includes:
```groovy
"A100X-Triton-Python-[Post-Merge]-1": ["a100x", "l0_a100", 1, 2],
"A100X-Triton-Python-[Post-Merge]-2": ["a100x", "l0_a100", 2, 2],
```
The array elements are: GPU type, YAML file (without extension), shard index, and total number of shards. Only tests with `stage: post_merge` from that YAML file are selected when a `Post-Merge` stage runs.
## Finding the stage for a test
1. Locate the test in the appropriate YAML file under `tests/integration/test_lists/test-db/` and note its `stage` and `backend` values.
2. Search `jenkins/L0_Test.groovy` for a stage whose YAML file matches (for example `l0_a100`) and whose name contains `[Post-Merge]` if the YAML entry uses `stage: post_merge`.
3. The resulting stage name(s) are what you pass to Jenkins via the `stage_list` parameter when triggering a job.
### Example
`triton_server/test_triton.py::test_gpt_ib_ptuning[gpt-ib-ptuning]` appears in `l0_a100.yml` under `stage: post_merge` and `backend: triton`. The corresponding Jenkins stages are `A100X-Triton-Python-[Post-Merge]-1` and `A100X-Triton-Python-[Post-Merge]-2` (two shards).
To run the same tests on your pull request, comment:
```bash
/bot run --stage-list "A100X-Triton-Python-[Post-Merge]-1,A100X-Triton-Python-[Post-Merge]-2"
```
This executes the same tests that run post-merge for this hardware/backend.
## Waiving tests
Sometimes a test is known to fail due to a bug or unsupported feature. Instead
of removing it from the YAML test lists, add the test name to
`tests/integration/test_lists/waives.txt`. Every CI run passes this file to
pytest via `--waives-file`, so the listed tests are skipped automatically.
Each line contains the fully qualified test name followed by an optional
`SKIP (reason)` marker. A `full:GPU_TYPE/` prefix restricts the waive to a
specific hardware family. Example:
```text
examples/test_openai.py::test_llm_openai_triton_1gpu SKIP (https://nvbugspro.nvidia.com/bug/4963654)
full:GH200/examples/test_qwen2audio.py::test_llm_qwen2audio_single_gpu[qwen2_audio_7b_instruct] SKIP (arm is not supported)
```
Changes to `waives.txt` should include a bug link or brief explanation so other
developers understand why the test is disabled.
## Triggering CI Best Practices
### Triggering Post-merge tests
When you only need to verify a handful of post-merge tests, avoid the heavy
`/bot run --post-merge` command. Instead, specify exactly which stages to run:
```bash
/bot run --stage-list "stage-A,stage-B"
```
This runs **only** the stages listed. You can also add stages on top of the
default pre-merge set:
```bash
/bot run --extra-stage "stage-A,stage-B"
```
Both options accept any stage name defined in `jenkins/L0_Test.groovy`. Being
selective keeps CI turnaround fast and conserves hardware resources.
### Avoiding unnecessary `--disable-fail-fast` usage
Avoid habitually using `--disable-fail-fast` as it wastes scarce hardware resources. The CI system automatically reuses successful test stages when commits remain unchanged, and subsequent `/bot run` commands only retry failed stages. Overusing `--disable-fail-fast` keeps failed pipelines consuming resources (like DGX-H100s), increasing queue backlogs and reducing team efficiency.

View File

@ -37,6 +37,27 @@ cd TensorRT-Model-Optimizer/examples/llm_ptq
scripts/huggingface_example.sh --model <huggingface_model_card> --quant fp8 --export_fmt hf
```
## Sampling
The PyTorch backend supports most of the sampling features that are supported on the C++ backend, such as temperature, top-k and top-p sampling, stop words, bad words, penalty, context and generation logits, and log probs.
In order to use this feature, it is necessary to enable option `enable_trtllm_sampler` in the `LLM` class, and pass a `SamplingParams` object with the desired options as well. The following example prepares two identical prompts which will give different results due to the sampling parameters chosen:
```python
from tensorrt_llm._torch import LLM
llm = LLM(model='nvidia/Llama-3.1-8B-Instruct-FP8',
enable_trtllm_sampler=True)
sampling_params = SamplingParams(
temperature=1.0,
top_k=8,
top_p=0.5,
)
llm.generate(["Hello, my name is",
"Hello, my name is"], sampling_params)
```
When using speculative decoders such as MTP or Eagle-3, the `enable_trtllm_sampler` option is not yet supported and therefore the subset of sampling options available is more restricted.
## Developer Guide
- [Architecture Overview](./torch/arch_overview.md)

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -808,9 +809,9 @@ export UCX_RNDV_PIPELINE_ERROR_HANDLING=y
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -813,9 +814,9 @@ the TensorRT-LLM C++ Executor API.</p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -680,9 +681,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -998,9 +999,9 @@ is computed as:</p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -814,6 +815,7 @@ value for a given parameter, the vector can be limited to a single element
<a class="reference external" href="https://arxiv.org/abs/2206.04624"><em>Factuality Enhanced Language Models for Open-Ended Text Generation</em></a>.
<code class="docutils literal notranslate"><span class="pre">topPDecay</span></code> is the decay, <code class="docutils literal notranslate"><span class="pre">topPMin</span></code> is the lower-bound and <code class="docutils literal notranslate"><span class="pre">topPResetIds</span></code> indicates where to reset the decay.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">minP</span></code> is explained in <a class="reference external" href="https://arxiv.org/abs/2407.01082"><em>Turning Up the Heat: Min-p Sampling for Creative and Coherent LLM Outputs</em></a>.</p></li>
<li><p>TensorRT-LLM does not generate all possible tokenizations of a word. Therefore, stop words may appear in the output if there are multiple ways to tokenize a stop word and the token sequence in the output differs from the one in <code class="docutils literal notranslate"><span class="pre">stopWords</span></code>.</p></li>
</ul>
<p><em><strong>Beam-search</strong></em></p>
<div class="pst-scrollable-table-container"><table class="table">
@ -1038,9 +1040,9 @@ The <code class="docutils literal notranslate"><span class="pre">GptDecoder</spa
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -859,9 +860,9 @@ techniques to optimize the underlying graph. It provides a wrapper similar to P
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -764,9 +765,9 @@ An “event” is any significant change in the lifecycle or state of a KV cache
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -737,9 +738,9 @@ Assume vocabulary size is 100, which means normal text token ids are in range [0
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -851,9 +852,9 @@ The shape of <code class="docutils literal notranslate"><span class="pre">LoraWe
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -447,6 +447,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -550,12 +551,11 @@ This feature is optimized for PCIe-based GPU topologies and may affect model acc
</pre></div>
</div>
<ol class="arabic simple" start="2">
<li><p><strong>Environment variable control</strong> with AUTO strategy:</p></li>
<li><p>Enable by LlmArgs</p></li>
</ol>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o">//</span> <span class="n">In</span> <span class="n">your</span> <span class="n">code</span>
<span class="n">AllReduce</span> <span class="n">allreduce</span><span class="p">(</span><span class="n">mapping</span><span class="o">=</span><span class="n">mapping</span><span class="p">,</span> <span class="n">strategy</span><span class="o">=</span><span class="n">AllReduceStrategy</span><span class="o">.</span><span class="n">AUTO</span><span class="p">);</span>
<span class="o">//</span> <span class="n">Set</span> <span class="n">environment</span> <span class="n">variable</span> <span class="n">before</span> <span class="n">running</span>
<span class="n">export</span> <span class="n">FORCE_LOW_PRECISION_ALL_REDUCE_STRATEGY</span><span class="o">=</span><span class="mi">1</span>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">Set</span> <span class="n">allreduce_strategy</span> <span class="n">field</span> <span class="ow">in</span> <span class="n">LlmArgs</span><span class="o">.</span>
<span class="n">Candidates</span> <span class="n">of</span> <span class="n">strategies</span> <span class="n">are</span> <span class="s2">&quot;AUTO&quot;</span><span class="p">,</span> <span class="s2">&quot;NCCL&quot;</span><span class="p">,</span> <span class="s2">&quot;UB&quot;</span><span class="p">,</span> <span class="s2">&quot;MINLATENCY&quot;</span><span class="p">,</span> <span class="s2">&quot;ONESHOT&quot;</span><span class="p">,</span> <span class="s2">&quot;TWOSHOT&quot;</span><span class="p">,</span> <span class="s2">&quot;LOWPRECISION&quot;</span> <span class="ow">and</span> <span class="s2">&quot;MNNVL&quot;</span><span class="o">.</span>
<span class="n">If</span> <span class="n">no</span> <span class="n">strategy</span> <span class="ow">is</span> <span class="nb">set</span><span class="p">,</span> <span class="n">AUTO</span> <span class="n">will</span> <span class="n">be</span> <span class="nb">set</span><span class="o">.</span>
</pre></div>
</div>
</section>
@ -568,12 +568,6 @@ This feature is optimized for PCIe-based GPU topologies and may affect model acc
<li><p>Automatically falls back to other strategies when no performance benefit is expected (e.g., with NVLink or small messages)</p></li>
</ul>
<p>Users should evaluate the precision impact on their specific models and workloads.</p>
</section>
<section id="environment-variables">
<h2>Environment Variables<a class="headerlink" href="#environment-variables" title="Link to this heading">#</a></h2>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">FORCE_LOW_PRECISION_ALL_REDUCE_STRATEGY</span></code>: When set to <code class="docutils literal notranslate"><span class="pre">1</span></code>, forces the use of low-precision algorithm with AUTO strategy. If the algorithm determines it cannot provide performance benefits, it will automatically fall back to other strategies.</p></li>
</ul>
<p><strong>Note</strong>: When compiling TensorRT-LLM without enabling the <code class="docutils literal notranslate"><span class="pre">ENABLE_FP8</span></code> option, setting Low Precision allreduce will not take effect.</p>
</section>
</section>
@ -613,7 +607,6 @@ This feature is optimized for PCIe-based GPU topologies and may affect model acc
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#topology-requirements">Topology Requirements</a></li>
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#usage">Usage</a></li>
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#performance-and-accuracy-considerations">Performance and Accuracy Considerations</a></li>
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#environment-variables">Environment Variables</a></li>
</ul>
</nav></div>
@ -708,9 +701,9 @@ This feature is optimized for PCIe-based GPU topologies and may affect model acc
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -0,0 +1,673 @@
<!DOCTYPE html>
<html lang="en" data-content_root="../" >
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="viewport" content="width=device-width, initial-scale=1" />
<title>&lt;no title&gt; &#8212; TensorRT-LLM</title>
<script data-cfasync="false">
document.documentElement.dataset.mode = localStorage.getItem("mode") || "";
document.documentElement.dataset.theme = localStorage.getItem("theme") || "";
</script>
<!--
this give us a css class that will be invisible only if js is disabled
-->
<noscript>
<style>
.pst-js-only { display: none !important; }
</style>
</noscript>
<!-- Loaded before other Sphinx assets -->
<link href="../_static/styles/theme.css?digest=8878045cc6db502f8baf" rel="stylesheet" />
<link href="../_static/styles/pydata-sphinx-theme.css?digest=8878045cc6db502f8baf" rel="stylesheet" />
<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=8f2a1f02" />
<link rel="stylesheet" type="text/css" href="../_static/styles/nvidia-sphinx-theme.css?v=df3ac72c" />
<link rel="stylesheet" type="text/css" href="../_static/copybutton.css?v=76b2166b" />
<link rel="stylesheet" type="text/css" href="../_static/autodoc_pydantic.css" />
<!-- So that users can add custom icons -->
<script src="../_static/scripts/fontawesome.js?digest=8878045cc6db502f8baf"></script>
<!-- Pre-loaded scripts that we'll load fully later -->
<link rel="preload" as="script" href="../_static/scripts/bootstrap.js?digest=8878045cc6db502f8baf" />
<link rel="preload" as="script" href="../_static/scripts/pydata-sphinx-theme.js?digest=8878045cc6db502f8baf" />
<script src="../_static/documentation_options.js?v=5929fcd5"></script>
<script src="../_static/doctools.js?v=9a2dae69"></script>
<script src="../_static/sphinx_highlight.js?v=dc90522c"></script>
<script src="../_static/clipboard.min.js?v=a7894cd8"></script>
<script src="../_static/copybutton.js?v=65e89d2a"></script>
<script>DOCUMENTATION_OPTIONS.pagename = 'advanced/open-sourced-cutlass-kernels';</script>
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
<link rel="icon" href="../_static/favicon.png"/>
<link rel="index" title="Index" href="../genindex.html" />
<link rel="search" title="Search" href="../search.html" />
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
<body data-bs-spy="scroll" data-bs-target=".bd-toc-nav" data-offset="180" data-bs-root-margin="0px 0px -60%" data-default-mode="">
<div id="pst-skip-link" class="skip-link d-print-none"><a href="#main-content">Skip to main content</a></div>
<div id="pst-scroll-pixel-helper"></div>
<button type="button" class="btn rounded-pill" id="pst-back-to-top">
<i class="fa-solid fa-arrow-up"></i>Back to top</button>
<dialog id="pst-search-dialog">
<form class="bd-search d-flex align-items-center"
action="../search.html"
method="get">
<i class="fa-solid fa-magnifying-glass"></i>
<input type="search"
class="form-control"
name="q"
placeholder="Search the docs ..."
aria-label="Search the docs ..."
autocomplete="off"
autocorrect="off"
autocapitalize="off"
spellcheck="false"/>
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd>K</kbd></span>
</form>
</dialog>
<div class="pst-async-banner-revealer d-none">
<aside id="bd-header-version-warning" class="d-none d-print-none" aria-label="Version warning"></aside>
</div>
<header class="bd-header navbar navbar-expand-lg bd-navbar d-print-none">
<div class="bd-header__inner bd-page-width">
<button class="pst-navbar-icon sidebar-toggle primary-toggle" aria-label="Site navigation">
<span class="fa-solid fa-bars"></span>
</button>
<div class="col-lg-3 navbar-header-items__start">
<div class="navbar-item">
<a class="navbar-brand logo" href="../index.html">
<img src="../_static/nvidia-logo-horiz-rgb-blk-for-screen.svg" class="logo__image only-light" alt="TensorRT-LLM - Home"/>
<img src="../_static/nvidia-logo-horiz-rgb-wht-for-screen.svg" class="logo__image only-dark pst-js-only" alt="TensorRT-LLM - Home"/>
<p class="title logo__title">TensorRT-LLM</p>
</a></div>
</div>
<div class="col-lg-9 navbar-header-items">
<div class="me-auto navbar-header-items__center">
<div class="navbar-item">
<div class="version-switcher__container dropdown pst-js-only">
<button id="pst-version-switcher-button-2"
type="button"
class="version-switcher__button btn btn-sm dropdown-toggle"
data-bs-toggle="dropdown"
aria-haspopup="listbox"
aria-controls="pst-version-switcher-list-2"
aria-label="Version switcher list"
>
Choose version <!-- this text may get changed later by javascript -->
<span class="caret"></span>
</button>
<div id="pst-version-switcher-list-2"
class="version-switcher__menu dropdown-menu list-group-flush py-0"
role="listbox" aria-labelledby="pst-version-switcher-button-2">
<!-- dropdown will be populated by javascript on page load -->
</div>
</div></div>
</div>
<div class="navbar-header-items__end">
<div class="navbar-item navbar-persistent--container">
<button class="btn search-button-field search-button__button pst-js-only" title="Search" aria-label="Search" data-bs-placement="bottom" data-bs-toggle="tooltip">
<i class="fa-solid fa-magnifying-glass"></i>
<span class="search-button__default-text">Search</span>
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd class="kbd-shortcut__modifier">K</kbd></span>
</button>
</div>
<div class="navbar-item">
<button class="btn btn-sm nav-link pst-navbar-icon theme-switch-button pst-js-only" aria-label="Color mode" data-bs-title="Color mode" data-bs-placement="bottom" data-bs-toggle="tooltip">
<i class="theme-switch fa-solid fa-sun fa-lg" data-mode="light" title="Light"></i>
<i class="theme-switch fa-solid fa-moon fa-lg" data-mode="dark" title="Dark"></i>
<i class="theme-switch fa-solid fa-circle-half-stroke fa-lg" data-mode="auto" title="System Settings"></i>
</button></div>
</div>
</div>
<div class="navbar-persistent--mobile">
<button class="btn search-button-field search-button__button pst-js-only" title="Search" aria-label="Search" data-bs-placement="bottom" data-bs-toggle="tooltip">
<i class="fa-solid fa-magnifying-glass"></i>
<span class="search-button__default-text">Search</span>
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd class="kbd-shortcut__modifier">K</kbd></span>
</button>
</div>
<button class="pst-navbar-icon sidebar-toggle secondary-toggle" aria-label="On this page">
<span class="fa-solid fa-outdent"></span>
</button>
</div>
</header>
<div class="bd-container">
<div class="bd-container__inner bd-page-width">
<dialog id="pst-primary-sidebar-modal"></dialog>
<div id="pst-primary-sidebar" class="bd-sidebar-primary bd-sidebar">
<a class="navbar-brand logo" href="../index.html">
<img src="../_static/nvidia-logo-horiz-rgb-blk-for-screen.svg" class="logo__image only-light" alt="TensorRT-LLM - Home"/>
<img src="../_static/nvidia-logo-horiz-rgb-wht-for-screen.svg" class="logo__image only-dark pst-js-only" alt="TensorRT-LLM - Home"/>
<p class="title logo__title">TensorRT-LLM</p>
</a>
<div class="sidebar-header-items sidebar-primary__section">
<div class="sidebar-header-items__center">
<div class="navbar-item">
<div class="version-switcher__container dropdown pst-js-only">
<button id="pst-version-switcher-button-3"
type="button"
class="version-switcher__button btn btn-sm dropdown-toggle"
data-bs-toggle="dropdown"
aria-haspopup="listbox"
aria-controls="pst-version-switcher-list-3"
aria-label="Version switcher list"
>
Choose version <!-- this text may get changed later by javascript -->
<span class="caret"></span>
</button>
<div id="pst-version-switcher-list-3"
class="version-switcher__menu dropdown-menu list-group-flush py-0"
role="listbox" aria-labelledby="pst-version-switcher-button-3">
<!-- dropdown will be populated by javascript on page load -->
</div>
</div></div>
</div>
<div class="sidebar-header-items__end">
<div class="navbar-item">
<button class="btn btn-sm nav-link pst-navbar-icon theme-switch-button pst-js-only" aria-label="Color mode" data-bs-title="Color mode" data-bs-placement="bottom" data-bs-toggle="tooltip">
<i class="theme-switch fa-solid fa-sun fa-lg" data-mode="light" title="Light"></i>
<i class="theme-switch fa-solid fa-moon fa-lg" data-mode="dark" title="Dark"></i>
<i class="theme-switch fa-solid fa-circle-half-stroke fa-lg" data-mode="auto" title="System Settings"></i>
</button></div>
</div>
</div>
<div class="sidebar-primary-items__start sidebar-primary__section">
<div class="sidebar-primary-item">
<nav class="bd-docs-nav bd-links"
aria-label="Table of Contents">
<p class="bd-links__title" role="heading" aria-level="1">Table of Contents</p>
<div class="bd-toc-item navbar-nav"><p aria-level="2" class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../overview.html">Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../quick-start-guide.html">Quick Start Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../key-features.html">Key Features</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch.html">PyTorch Backend</a></li>
<li class="toctree-l1"><a class="reference internal" href="../release-notes.html">Release Notes</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Installation</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../installation/linux.html">Installing on Linux</a></li>
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
<li class="toctree-l1"><a class="reference internal" href="../installation/grace-hopper.html">Installing on Grace Hopper</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">LLM API</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../llm-api/index.html">API Introduction</a></li>
<li class="toctree-l1"><a class="reference internal" href="../llm-api/reference.html">API Reference</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Examples</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/index.html">LLM Examples Introduction</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_medusa_decoding.html">Generate Text Using Medusa Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_multilora.html">Generate text with multiple LoRA adapters</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle_decoding.html">Generate Text Using Eagle Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async.html">Generate Text Asynchronously</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_distributed.html">Distributed LLM Generation</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_logits_processor.html">Control generated text using logits processor</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle2_decoding.html">Generate Text Using Eagle2 Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_kv_events.html">Get KV Cache Events</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_lookahead_decoding.html">Generate Text Using Lookahead Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_quantization.html">Generation with Quantization</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async_streaming.html">Generate Text in Streaming</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_guided_decoding.html">Generate text with guided decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference.html">Generate text</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_customize.html">Generate text with customization</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_auto_parallel.html">Automatic Parallelism with LLM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_llm_distributed.html">Llm Mgmn Llm Distributed</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_bench.html">Llm Mgmn Trtllm Bench</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_serve.html">Llm Mgmn Trtllm Serve</a></li>
</ul>
</details></li>
<li class="toctree-l1"><a class="reference internal" href="../examples/customization.html">LLM Common Customizations</a></li>
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/llm_api_examples.html">LLM Examples</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_medusa_decoding.html">Generate Text Using Medusa Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_multilora.html">Generate text with multiple LoRA adapters</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle_decoding.html">Generate Text Using Eagle Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async.html">Generate Text Asynchronously</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_distributed.html">Distributed LLM Generation</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_logits_processor.html">Control generated text using logits processor</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle2_decoding.html">Generate Text Using Eagle2 Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_kv_events.html">Get KV Cache Events</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_lookahead_decoding.html">Generate Text Using Lookahead Decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_quantization.html">Generation with Quantization</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async_streaming.html">Generate Text in Streaming</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_guided_decoding.html">Generate text with guided decoding</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference.html">Generate text</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_customize.html">Generate text with customization</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_auto_parallel.html">Automatic Parallelism with LLM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_llm_distributed.html">Llm Mgmn Llm Distributed</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_bench.html">Llm Mgmn Trtllm Bench</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_serve.html">Llm Mgmn Trtllm Serve</a></li>
</ul>
</details></li>
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/trtllm_serve_examples.html">Online Serving Examples</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_chat_client.html">Curl Chat Client</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_chat_client_for_multimodal.html">Curl Chat Client For Multimodal</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_completion_client.html">Curl Completion Client</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/deepseek_r1_reasoning_parser.html">Deepseek R1 Reasoning Parser</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/genai_perf_client.html">Genai Perf Client</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/genai_perf_client_for_multimodal.html">Genai Perf Client For Multimodal</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_chat_client.html">OpenAI Chat Client</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_chat_client_for_multimodal.html">OpenAI Chat Client</a></li>
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_completion_client.html">OpenAI Completion Client</a></li>
</ul>
</details></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Model Definition API</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.layers.html">Layers</a></li>
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.functional.html">Functionals</a></li>
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.models.html">Models</a></li>
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.plugin.html">Plugin</a></li>
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.runtime.html">Runtime</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">C++ API</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/executor.html">Executor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/runtime.html">Runtime</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Command-Line Reference</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-build.html">trtllm-build</a></li>
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-serve.html">trtllm-serve</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Architecture</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../architecture/overview.html">TensorRT-LLM Architecture</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html">Model Definition</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architecture/add-model.html">Adding a Model</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Advanced</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
<li class="toctree-l1"><a class="reference internal" href="gpt-runtime.html">C++ GPT Runtime</a></li>
<li class="toctree-l1"><a class="reference internal" href="executor.html">Executor API</a></li>
<li class="toctree-l1"><a class="reference internal" href="graph-rewriting.html">Graph Rewriting Module</a></li>
<li class="toctree-l1"><a class="reference internal" href="lora.html">Run gpt-2b + LoRA using Executor / cpp runtime</a></li>
<li class="toctree-l1"><a class="reference internal" href="expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="kv-cache-management.html">KV Cache Management: Pools, Blocks, and Events</a></li>
<li class="toctree-l1"><a class="reference internal" href="kv-cache-reuse.html">KV cache reuse</a></li>
<li class="toctree-l1"><a class="reference internal" href="speculative-decoding.html">Speculative Sampling</a></li>
<li class="toctree-l1"><a class="reference internal" href="disaggregated-service.html">Disaggregated-Service (experimental)</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Performance</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-overview.html">Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-benchmarking.html">Benchmarking</a></li>
<li class="toctree-l1 has-children"><a class="reference internal" href="../performance/performance-tuning-guide/index.html">Performance Tuning Guide</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/benchmarking-default-performance.html">Benchmarking Default Performance</a></li>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/useful-build-time-flags.html">Useful Build-Time Flags</a></li>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/tuning-max-batch-size-and-max-num-tokens.html">Tuning Max Batch Size and Max Num Tokens</a></li>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/deciding-model-sharding-strategy.html">Deciding Model Sharding Strategy</a></li>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/fp8-quantization.html">FP8 Quantization</a></li>
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/useful-runtime-flags.html">Useful Runtime Options</a></li>
</ul>
</details></li>
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-analysis.html">Performance Analysis</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Reference</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../reference/troubleshooting.html">Troubleshooting</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
<li class="toctree-l1"><a class="reference internal" href="../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/tech_blog/blog1_Pushing_Latency_Boundaries_Optimizing_DeepSeek-R1_Performance_on_NVIDIA_B200_GPUs.html">Pushing Latency Boundaries: Optimizing DeepSeek-R1 Performance on NVIDIA B200 GPUs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../blogs/tech_blog/blog2_DeepSeek_R1_MTP_Implementation_and_Optimization.html">DeepSeek R1 MTP Implementation and Optimization</a></li>
</ul>
</div>
</nav></div>
</div>
<div class="sidebar-primary-items__end sidebar-primary__section">
</div>
</div>
<main id="main-content" class="bd-main" role="main">
<div class="bd-content">
<div class="bd-article-container">
<div class="bd-header-article d-print-none">
<div class="header-article-items header-article__inner">
<div class="header-article-items__start">
<div class="header-article-item">
<nav aria-label="Breadcrumb" class="d-print-none">
<ul class="bd-breadcrumbs">
<li class="breadcrumb-item breadcrumb-home">
<a href="../index.html" class="nav-link" aria-label="Home">
<i class="fa-solid fa-home"></i>
</a>
</li>
<li class="breadcrumb-item active" aria-current="page"><span class="ellipsis">&lt;no title&gt;</span></li>
</ul>
</nav>
</div>
</div>
</div>
</div>
<div id="searchbox"></div>
<article class="bd-article">
<p>We have recently open-sourced a set of Cutlass kernels that were previously known as “internal_cutlass_kernels”. Due to internal dependencies, these kernels were previously only available to users as static libraries. We have now decoupled these internal dependencies, making the kernels available as source code.</p>
<p>The open-sourced Cutlass kernels are on the path <code class="docutils literal notranslate"><span class="pre">cpp/tensorrt_llm/kernels/cutlass_kernels</span></code>, including:</p>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">low_latency_gemm</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">moe_gemm</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">fp4_gemm</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">allreduce_gemm</span></code></p></li>
</ul>
<p>To ensure stability and provide an optimized performance experience, we have maintained the previous method of calling these kernels via static libraries as an alternative option. You can switch between open-sourced Cutlass kernels and static library Cutlass kernels through the <code class="docutils literal notranslate"><span class="pre">USING_OSS_CUTLASS_*</span></code> macro (where * represents the specific kernel name), enabling kernel-level control. By default, the open-source Cutlass kernels are used.
Note that support for these static libraries will be gradually deprioritized in the future and may eventually be deprecated.</p>
<p><strong>Default Configuration (Using open-sourced Cutlass Kernels)</strong></p>
<p>To build using the open-source Cutlass kernels (default setting), run:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python3<span class="w"> </span>./scripts/build_wheel.py<span class="w"> </span>--cuda_architectures<span class="w"> </span><span class="s2">&quot;90-real;100-real&quot;</span>
</pre></div>
</div>
<p><strong>Using Static Library Cutlass Kernels</strong></p>
<p>If you prefer to use the Cutlass kernels from the static library, you can control this during compilation by setting the <code class="docutils literal notranslate"><span class="pre">USING_OSS_CUTLASS_*</span></code> macro to <code class="docutils literal notranslate"><span class="pre">OFF</span></code>. For example, to use the static library implementation specifically for <code class="docutils literal notranslate"><span class="pre">low_latency_gemm</span></code> and <code class="docutils literal notranslate"><span class="pre">moe_gemm</span></code> while keeping other kernels as OSS, use the following compilation command:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python3<span class="w"> </span>./scripts/build_wheel.py<span class="w"> </span>--cuda_architectures<span class="w"> </span><span class="s2">&quot;90-real;100-real&quot;</span><span class="w"> </span>-D<span class="w"> </span><span class="s2">&quot;USING_OSS_CUTLASS_MOE_GEMM=OFF;USING_OSS_CUTLASS_LOW_LATENCY_GEMM=OFF&quot;</span>
</pre></div>
</div>
</article>
<footer class="prev-next-footer d-print-none">
<div class="prev-next-area">
</div>
</footer>
</div>
<dialog id="pst-secondary-sidebar-modal"></dialog>
<div id="pst-secondary-sidebar" class="bd-sidebar-secondary bd-toc"><div class="sidebar-secondary-items sidebar-secondary__inner">
<div class="sidebar-secondary-item">
<div
id="pst-page-navigation-heading-2"
class="page-toc tocsection onthispage">
<i class="fa-solid fa-list"></i> On this page
</div>
<nav class="bd-toc-nav page-toc" aria-labelledby="pst-page-navigation-heading-2">
<ul class="simple visible nav section-nav flex-column">
</ul>
</nav></div>
</div></div>
</div>
<footer class="bd-footer-content">
</footer>
</main>
</div>
</div>
<!-- Scripts loaded after <body> so the DOM is not blocked -->
<script defer src="../_static/scripts/bootstrap.js?digest=8878045cc6db502f8baf"></script>
<script defer src="../_static/scripts/pydata-sphinx-theme.js?digest=8878045cc6db502f8baf"></script>
<footer class="bd-footer">
<div class="bd-footer__inner bd-page-width">
<div class="footer-items__start">
<div class="footer-item">
<a class="footer-brand logo" href="https://www.nvidia.com">
<img src="../_static/nvidia-logo-horiz-rgb-1c-blk-for-screen.svg" class="logo__image only-light" alt="NVIDIA"/>
<img src="../_static/nvidia-logo-horiz-rgb-1c-wht-for-screen.svg" class="logo__image only-dark" alt="NVIDIA"/>
</a></div>
<div class="footer-item">
<div class="footer-links">
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/privacy-policy/">Privacy Policy</a>
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/privacy-center/">Manage My Privacy</a>
|
<a class="external" href="https://www.nvidia.com/en-us/preferences/start/">Do Not Sell or Share My Data</a>
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/terms-of-service/">Terms of Service</a>
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/accessibility/">Accessibility</a>
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/company-policies/">Corporate Policies</a>
|
<a class="external" href="https://www.nvidia.com/en-us/product-security/">Product Security</a>
|
<a class="external" href="https://www.nvidia.com/en-us/contact/">Contact</a>
</div>
</div>
<div class="footer-item">
<p class="copyright">
Copyright © 2025, NVidia.
<br/>
</p>
</div>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>
</div>
</div>
</footer>
</body>
</html>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -839,9 +840,9 @@ However, similar to any new model, you can follow the same approach to define yo
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -447,6 +447,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -688,9 +689,9 @@ python3<span class="w"> </span>examples/summarize.py<span class="w"> </span><spa
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc1';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.21.0rc2';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="0.21.0rc1" />
<meta name="docsearch:version" content="0.21.0rc2" />
</head>
@ -449,6 +449,7 @@
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/ci-overview.html">Continuous Integration Overview</a></li>
</ul>
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
<ul class="nav bd-sidenav">
@ -750,9 +751,9 @@ python<span class="w"> </span>../summarize.py<span class="w"> </span>--engine_di
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 09, 2025.</p>
<p>Last updated on June 16, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/9c012d5">9c012d5</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/8445416">8445416</a>.</p>
</div></div>

Some files were not shown because too many files have changed in this diff Show More