Update latest GitHub pages to v1.0.0rc1

This commit is contained in:
Kaiyu Xie 2025-07-01 09:49:04 +00:00
parent c3843d7ed6
commit 522f912bf7
184 changed files with 1447 additions and 1303 deletions

View File

@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 5dd2b8f29ac03c9c53f8ad8ba1fb6dcc
config: 812228e223c943ca4d4a375a1c33a00f
tags: 645f666f9bcd5a90fca523b33c5a78b7

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -3007,10 +3007,10 @@
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization9serializeERK17SpecDecodingStatsRNSt7ostreamE">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization9serializeERK17SpecDecodingStatsRNSt7ostreamE"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization9serializeERK17SpecDecodingStatsRNSt7ostreamE"></span><span id="tensorrt_llm::executor::Serialization::serialize__SpecDecodingStatsCR.osR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1a6f15b088fba6d48faa1bff296326bb8e"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serialize</span></span></span><span class="sig-paren">(</span>
<span id="_CPPv3N12tensorrt_llm8executor13Serialization9serializeERK17SpecDecodingStatsRNSt7ostreamE"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization9serializeERK17SpecDecodingStatsRNSt7ostreamE"></span><span id="tensorrt_llm::executor::Serialization::serialize__SpecDecodingStatsCR.osR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1aec84ae79883f50f60dfd65e16031cb39"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serialize</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">specDecStats</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">specDecodingStats</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">ostream</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">os</span></span></em>,</dd>
</dl>
@ -3019,7 +3019,13 @@
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats">
<span id="_CPPv3N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats"></span><span id="tensorrt_llm::executor::Serialization::serializedSize__SpecDecodingStatsCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1a1d3e364fe8e5cabe5371766da8e5dbf0"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serializedSize</span></span></span><span class="sig-paren">(</span><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">specDecStats</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats" title="Link to this definition">#</a><br /></dt>
<span id="_CPPv3N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats"></span><span id="_CPPv2N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats"></span><span id="tensorrt_llm::executor::Serialization::serializedSize__SpecDecodingStatsCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1Serialization_1afc687fe2efc5c0d3dca4b056c3d2f240"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">serializedSize</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">specDecodingStats</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13Serialization14serializedSizeERK17SpecDecodingStats" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
@ -4442,8 +4448,8 @@
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor14IterationStats12specDecStatsE">
<span id="_CPPv3N12tensorrt_llm8executor14IterationStats12specDecStatsE"></span><span id="_CPPv2N12tensorrt_llm8executor14IterationStats12specDecStatsE"></span><span id="tensorrt_llm::executor::IterationStats::specDecStats__std::optional:SpecDecodingStats:"></span><span class="target" id="structtensorrt__llm_1_1executor_1_1IterationStats_1a053228e657564091c3de901f262523b6"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">specDecStats</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor14IterationStats12specDecStatsE" title="Link to this definition">#</a><br /></dt>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor14IterationStats17specDecodingStatsE">
<span id="_CPPv3N12tensorrt_llm8executor14IterationStats17specDecodingStatsE"></span><span id="_CPPv2N12tensorrt_llm8executor14IterationStats17specDecodingStatsE"></span><span id="tensorrt_llm::executor::IterationStats::specDecodingStats__std::optional:SpecDecodingStats:"></span><span class="target" id="structtensorrt__llm_1_1executor_1_1IterationStats_1a5dfb69ab8a08d55901d6011307f5adb0"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17SpecDecodingStatsE" title="tensorrt_llm::executor::SpecDecodingStats"><span class="n"><span class="pre">SpecDecodingStats</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">specDecodingStats</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor14IterationStats17specDecodingStatsE" title="Link to this definition">#</a><br /></dt>
<dd><p>Stats specific to speculative decoding. </p>
</dd></dl>
@ -7226,8 +7232,8 @@
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEEbb">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEEbb"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEEbb"></span><span id="tensorrt_llm::executor::KvCacheConfig::KvCacheConfig__b.std::optional:SizeType32:CR.std::optional:std::vector:SizeType32::CR.std::optional:SizeType32:CR.std::optional:FloatType:CR.std::optional:s:CR.b.std::optional:FloatType:CR.std::optional:RetentionPriority:.s.std::optional:tensorrt_llm::runtime::RuntimeDefaults:CR.b.b"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1a5ed3a509d5024545a006ee3a07e529b0"></span><span class="k"><span class="pre">explicit</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">KvCacheConfig</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tbbbRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEE">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tbbbRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEE"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tbbbRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEE"></span><span id="tensorrt_llm::executor::KvCacheConfig::KvCacheConfig__b.std::optional:SizeType32:CR.std::optional:std::vector:SizeType32::CR.std::optional:SizeType32:CR.std::optional:FloatType:CR.std::optional:s:CR.b.std::optional:FloatType:CR.std::optional:RetentionPriority:.s.b.b.b.std::optional:tensorrt_llm::runtime::RuntimeDefaults:CR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1abae58a20e9b9827a44d4b33fb63512c3"></span><span class="k"><span class="pre">explicit</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">KvCacheConfig</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">enableBlockReuse</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">true</span></span></em>,</dd>
@ -7240,12 +7246,13 @@
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor9FloatTypeE" title="tensorrt_llm::executor::FloatType"><span class="n"><span class="pre">FloatType</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">crossKvCacheFraction</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm8executor17RetentionPriorityE" title="tensorrt_llm::executor::RetentionPriority"><span class="n"><span class="pre">RetentionPriority</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">secondaryOffloadMinPriority</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">eventBufferMaxSize</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv412tensorrt_llm" title="tensorrt_llm"><span class="n"><span class="pre">tensorrt_llm</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtimeE" title="tensorrt_llm::runtime"><span class="n"><span class="pre">runtime</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="runtime.html#_CPPv4N12tensorrt_llm7runtime15RuntimeDefaultsE" title="tensorrt_llm::runtime::RuntimeDefaults"><span class="n"><span class="pre">RuntimeDefaults</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">runtimeDefaults</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">enablePartialReuse</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">true</span></span></em>,</dd>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">copyOnPartialReuse</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">true</span></span></em>,</dd>
<dd><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">useUvm</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">false</span></span></em>,</dd>
<dd><em class="sig-param"><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">optional</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv412tensorrt_llm" title="tensorrt_llm"><span class="n"><span class="pre">tensorrt_llm</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtimeE" title="tensorrt_llm::runtime"><span class="n"><span class="pre">runtime</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="runtime.html#_CPPv4N12tensorrt_llm7runtime15RuntimeDefaultsE" title="tensorrt_llm::runtime::RuntimeDefaults"><span class="n"><span class="pre">RuntimeDefaults</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">runtimeDefaults</span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">nullopt</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEEbb" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tbbbRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
@ -7318,6 +7325,11 @@
<span id="_CPPv3NK12tensorrt_llm8executor13KvCacheConfig21getEventBufferMaxSizeEv"></span><span id="_CPPv2NK12tensorrt_llm8executor13KvCacheConfig21getEventBufferMaxSizeEv"></span><span id="tensorrt_llm::executor::KvCacheConfig::getEventBufferMaxSizeC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1afff662f63bfe3292d951951884aee2f2"></span><span class="n"><span class="pre">size_t</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getEventBufferMaxSize</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig21getEventBufferMaxSizeEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm8executor13KvCacheConfig9getUseUvmEv">
<span id="_CPPv3NK12tensorrt_llm8executor13KvCacheConfig9getUseUvmEv"></span><span id="_CPPv2NK12tensorrt_llm8executor13KvCacheConfig9getUseUvmEv"></span><span id="tensorrt_llm::executor::KvCacheConfig::getUseUvmC"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1a9b7d62d000c4beda7c474a4068c99362"></span><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getUseUvm</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig9getUseUvmEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig19setEnableBlockReuseEb">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig19setEnableBlockReuseEb"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig19setEnableBlockReuseEb"></span><span id="tensorrt_llm::executor::KvCacheConfig::setEnableBlockReuse__b"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1a934f7362c2d931489c7df5c86a5c19c1"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setEnableBlockReuse</span></span></span><span class="sig-paren">(</span><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">enableBlockReuse</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig19setEnableBlockReuseEb" title="Link to this definition">#</a><br /></dt>
@ -7391,14 +7403,27 @@
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsEN12tensorrt_llm7runtime15RuntimeDefaultsE">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsEN12tensorrt_llm7runtime15RuntimeDefaultsE"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsEN12tensorrt_llm7runtime15RuntimeDefaultsE"></span><span id="tensorrt_llm::executor::KvCacheConfig::fillEmptyFieldsFromRuntimeDefaults__tensorrt_llm::runtime::RuntimeDefaults"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1afbaa7fe2589b066f9e95d85fbae894c9"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">fillEmptyFieldsFromRuntimeDefaults</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig9setUseUvmEb">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig9setUseUvmEb"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig9setUseUvmEb"></span><span id="tensorrt_llm::executor::KvCacheConfig::setUseUvm__b"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1a87475c188ab07417058dd7166334662b"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setUseUvm</span></span></span><span class="sig-paren">(</span><em class="sig-param"><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="n sig-param"><span class="pre">useUvm</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig9setUseUvmEb" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsERKN12tensorrt_llm7runtime15RuntimeDefaultsE">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsERKN12tensorrt_llm7runtime15RuntimeDefaultsE"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsERKN12tensorrt_llm7runtime15RuntimeDefaultsE"></span><span id="tensorrt_llm::executor::KvCacheConfig::fillEmptyFieldsFromRuntimeDefaults__tensorrt_llm::runtime::RuntimeDefaultsCR"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1ab0e5cb93a75e85e03bd662fa122701a0"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">fillEmptyFieldsFromRuntimeDefaults</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv412tensorrt_llm" title="tensorrt_llm"><span class="n"><span class="pre">tensorrt_llm</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtimeE" title="tensorrt_llm::runtime"><span class="n"><span class="pre">runtime</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="runtime.html#_CPPv4N12tensorrt_llm7runtime15RuntimeDefaultsE" title="tensorrt_llm::runtime::RuntimeDefaults"><span class="n"><span class="pre">RuntimeDefaults</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">runtimeDefaults</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv412tensorrt_llm" title="tensorrt_llm"><span class="n"><span class="pre">tensorrt_llm</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtimeE" title="tensorrt_llm::runtime"><span class="n"><span class="pre">runtime</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="runtime.html#_CPPv4N12tensorrt_llm7runtime15RuntimeDefaultsE" title="tensorrt_llm::runtime::RuntimeDefaults"><span class="n"><span class="pre">RuntimeDefaults</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">runtimeDefaults</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsEN12tensorrt_llm7runtime15RuntimeDefaultsE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsERKN12tensorrt_llm7runtime15RuntimeDefaultsE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-static-attributes">Public Static Attributes</p>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig22kDefaultGpuMemFractionE">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig22kDefaultGpuMemFractionE"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig22kDefaultGpuMemFractionE"></span><span id="tensorrt_llm::executor::KvCacheConfig::kDefaultGpuMemFraction__auto"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1ab1e9b2fa9ab05d17ac4b7a4686d6f4eb"></span><span class="k"><span class="pre">static</span></span><span class="w"> </span><span class="k"><span class="pre">constexpr</span></span><span class="w"> </span><span class="kt"><span class="pre">auto</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">kDefaultGpuMemFraction</span></span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0.9F</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig22kDefaultGpuMemFractionE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
@ -7476,6 +7501,12 @@
<dd><p>Whether partially matched blocks that are in use can be reused after copying them. </p>
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm8executor13KvCacheConfig7mUseUvmE">
<span id="_CPPv3N12tensorrt_llm8executor13KvCacheConfig7mUseUvmE"></span><span id="_CPPv2N12tensorrt_llm8executor13KvCacheConfig7mUseUvmE"></span><span id="tensorrt_llm::executor::KvCacheConfig::mUseUvm__b"></span><span class="target" id="classtensorrt__llm_1_1executor_1_1KvCacheConfig_1a2b410a4337a0ad1a149fc1240ee78732"></span><span class="kt"><span class="pre">bool</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">mUseUvm</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig7mUseUvmE" title="Link to this definition">#</a><br /></dt>
<dd><p>Whether to use UVM for the KV cache. </p>
</dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-friends">Friends</p>
@ -11887,7 +11918,7 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor14IterationStats17crossKvCacheStatsE"><code class="docutils literal notranslate"><span class="pre">crossKvCacheStats</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor14IterationStats19staticBatchingStatsE"><code class="docutils literal notranslate"><span class="pre">staticBatchingStats</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor14IterationStats21inflightBatchingStatsE"><code class="docutils literal notranslate"><span class="pre">inflightBatchingStats</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor14IterationStats12specDecStatsE"><code class="docutils literal notranslate"><span class="pre">specDecStats</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor14IterationStats17specDecodingStatsE"><code class="docutils literal notranslate"><span class="pre">specDecodingStats</span></code></a></li>
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor12KvCacheStatsE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::KvCacheStats</span></code></a><ul class="nav section-nav flex-column">
@ -12336,7 +12367,7 @@
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfigE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::KvCacheConfig</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEEbb"><code class="docutils literal notranslate"><span class="pre">KvCacheConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig13KvCacheConfigEbRKNSt8optionalI10SizeType32EERKNSt8optionalINSt6vectorI10SizeType32EEEERKNSt8optionalI10SizeType32EERKNSt8optionalI9FloatTypeEERKNSt8optionalI6size_tEEbRKNSt8optionalI9FloatTypeEENSt8optionalI17RetentionPriorityEE6size_tbbbRKNSt8optionalIN12tensorrt_llm7runtime15RuntimeDefaultsEEE"><code class="docutils literal notranslate"><span class="pre">KvCacheConfig()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig19getEnableBlockReuseEv"><code class="docutils literal notranslate"><span class="pre">getEnableBlockReuse()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig21getEnablePartialReuseEv"><code class="docutils literal notranslate"><span class="pre">getEnablePartialReuse()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig21getCopyOnPartialReuseEv"><code class="docutils literal notranslate"><span class="pre">getCopyOnPartialReuse()</span></code></a></li>
@ -12349,6 +12380,7 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig16getOnboardBlocksEv"><code class="docutils literal notranslate"><span class="pre">getOnboardBlocks()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig30getSecondaryOffloadMinPriorityEv"><code class="docutils literal notranslate"><span class="pre">getSecondaryOffloadMinPriority()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig21getEventBufferMaxSizeEv"><code class="docutils literal notranslate"><span class="pre">getEventBufferMaxSize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm8executor13KvCacheConfig9getUseUvmEv"><code class="docutils literal notranslate"><span class="pre">getUseUvm()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig19setEnableBlockReuseEb"><code class="docutils literal notranslate"><span class="pre">setEnableBlockReuse()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig21setEnablePartialReuseEb"><code class="docutils literal notranslate"><span class="pre">setEnablePartialReuse()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig21setCopyOnPartialReuseEb"><code class="docutils literal notranslate"><span class="pre">setCopyOnPartialReuse()</span></code></a></li>
@ -12361,7 +12393,9 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig16setOnboardBlocksEb"><code class="docutils literal notranslate"><span class="pre">setOnboardBlocks()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig30setSecondaryOffloadMinPriorityENSt8optionalI17RetentionPriorityEE"><code class="docutils literal notranslate"><span class="pre">setSecondaryOffloadMinPriority()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig21setEventBufferMaxSizeE6size_t"><code class="docutils literal notranslate"><span class="pre">setEventBufferMaxSize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsEN12tensorrt_llm7runtime15RuntimeDefaultsE"><code class="docutils literal notranslate"><span class="pre">fillEmptyFieldsFromRuntimeDefaults()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig9setUseUvmEb"><code class="docutils literal notranslate"><span class="pre">setUseUvm()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig34fillEmptyFieldsFromRuntimeDefaultsERKN12tensorrt_llm7runtime15RuntimeDefaultsE"><code class="docutils literal notranslate"><span class="pre">fillEmptyFieldsFromRuntimeDefaults()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig22kDefaultGpuMemFractionE"><code class="docutils literal notranslate"><span class="pre">kDefaultGpuMemFraction</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig17mEnableBlockReuseE"><code class="docutils literal notranslate"><span class="pre">mEnableBlockReuse</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig10mMaxTokensE"><code class="docutils literal notranslate"><span class="pre">mMaxTokens</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig22mMaxAttentionWindowVecE"><code class="docutils literal notranslate"><span class="pre">mMaxAttentionWindowVec</span></code></a></li>
@ -12374,6 +12408,7 @@
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig19mEventBufferMaxSizeE"><code class="docutils literal notranslate"><span class="pre">mEventBufferMaxSize</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig19mEnablePartialReuseE"><code class="docutils literal notranslate"><span class="pre">mEnablePartialReuse</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig19mCopyOnPartialReuseE"><code class="docutils literal notranslate"><span class="pre">mCopyOnPartialReuse</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor13KvCacheConfig7mUseUvmE"><code class="docutils literal notranslate"><span class="pre">mUseUvm</span></code></a></li>
</ul>
</li>
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm8executor18KVCacheCreatedDataE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::executor::KVCacheCreatedData</span></code></a><ul class="nav section-nav flex-column">
@ -13050,9 +13085,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -9614,7 +9614,7 @@ one more than decoding draft tokens for prediction from primary head </p>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1ab71a988f92d801a763c8b7b960fd0769"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a4da8938fdafb368284faacaa41a488ba"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
</dd></dl>
<dl class="cpp function">
@ -9631,30 +9631,28 @@ one more than decoding draft tokens for prediction from primary head </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::forwardAsync__decoder::DecoderStateCR.decoder_batch::OutputR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a654fbc257f26b53dadb65937899938c0"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime9CudaEventE" title="tensorrt_llm::runtime::CudaEvent"><span class="n"><span class="pre">CudaEvent</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardAsync</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::forwardAsync__decoder::DecoderStateCR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a607be6a62cc79a01e7cdc638a2e0eb72"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime9CudaEventE" title="tensorrt_llm::runtime::CudaEvent"><span class="n"><span class="pre">CudaEvent</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardAsync</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoderE" title="tensorrt_llm::runtime::decoder"><span class="n"><span class="pre">decoder</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderStateE" title="tensorrt_llm::runtime::decoder::DecoderState"><span class="n"><span class="pre">DecoderState</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">decoderState</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="tensorrt_llm::runtime::decoder_batch::Output"><span class="n"><span class="pre">Output</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5InputE" title="tensorrt_llm::runtime::decoder_batch::Input"><span class="n"><span class="pre">Input</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">input</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<dd><p>Run one step for all requests without blocking the host process and return the token for synchronization. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::forward__decoder::DecoderStateCR.decoder_batch::OutputR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1ab71a988f92d801a763c8b7b960fd0769"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forward</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::IGptDecoderBatched::forward__decoder::DecoderStateCR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1IGptDecoderBatched_1a4da8938fdafb368284faacaa41a488ba"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forward</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoderE" title="tensorrt_llm::runtime::decoder"><span class="n"><span class="pre">decoder</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderStateE" title="tensorrt_llm::runtime::decoder::DecoderState"><span class="n"><span class="pre">DecoderState</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">decoderState</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="tensorrt_llm::runtime::decoder_batch::Output"><span class="n"><span class="pre">Output</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5InputE" title="tensorrt_llm::runtime::decoder_batch::Input"><span class="n"><span class="pre">Input</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">input</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="m"><span class="pre">0</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<dd><p>Run one step for all requests and wait for completion on the host. </p>
</dd></dl>
@ -9760,16 +9758,10 @@ one more than decoding draft tokens for prediction from primary head </p>
<dd><p>Filled with slots in request order, [batchSize]. </p>
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input16cacheIndirectionE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch5Input16cacheIndirectionE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch5Input16cacheIndirectionE"></span><span id="tensorrt_llm::runtime::decoder_batch::Input::cacheIndirection__TensorPtr"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Input_1ad1b93f5e2145ae272a72811679f7673a"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input9TensorPtrE" title="tensorrt_llm::runtime::decoder_batch::Input::TensorPtr"><span class="n"><span class="pre">TensorPtr</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">cacheIndirection</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input16cacheIndirectionE" title="Link to this definition">#</a><br /></dt>
<dd><p>For Beam Search Indices into KV cache of different rays within one beam, [maxBatchSize, maxBeamWidth, maxSeqLen], on gpu </p>
</dd></dl>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input15generationStepsE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch5Input15generationStepsE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch5Input15generationStepsE"></span><span id="tensorrt_llm::runtime::decoder_batch::Input::generationSteps__std::vector:SizeType32:"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Input_1ab585fbd53121a6094355bdc7cc5c195d"></span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">vector</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="p"><span class="pre">&gt;</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">generationSteps</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input15generationStepsE" title="Link to this definition">#</a><br /></dt>
<dd><p>The generation step of each request (for Variable-Beam-Width-Search), [batchSize]. </p>
<dd><p>For Beam Search The generation step of each request (for Variable-Beam-Width-Search), [batchSize] </p>
</dd></dl>
<dl class="cpp var">
@ -9803,36 +9795,6 @@ one more than decoding draft tokens for prediction from primary head </p>
</div>
</dd></dl>
<dl class="cpp class">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch6OutputE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch6OutputE"></span><span id="tensorrt_llm::runtime::decoder_batch::Output"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Output"></span><span class="k"><span class="pre">class</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">Output</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="Link to this definition">#</a><br /></dt>
<dd><div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-types">Public Types</p>
<dl class="cpp type">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Output_1ab1a4d43f1e25e7ffc107edf438b75ff8"></span><span class="k"><span class="pre">using</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">TensorPtr</span></span></span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="n"><span class="pre">std</span></span><span class="p"><span class="pre">::</span></span><span class="n"><span class="pre">shared_ptr</span></span><span class="p"><span class="pre">&lt;</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7ITensorE" title="tensorrt_llm::runtime::ITensor"><span class="n"><span class="pre">ITensor</span></span></a><span class="p"><span class="pre">&gt;</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-functions">Public Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output6OutputEv">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch6Output6OutputEv"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch6Output6OutputEv"></span><span id="tensorrt_llm::runtime::decoder_batch::Output::Output"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Output_1a11aaaeb3c20e5f93d815b2b6c86f345f"></span><span class="sig-name descname"><span class="n"><span class="pre">Output</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="p"><span class="pre">=</span></span><span class="w"> </span><span class="k"><span class="pre">default</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output6OutputEv" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
</div>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-public-members">Public Members</p>
<dl class="cpp var">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output16cacheIndirectionE">
<span id="_CPPv3N12tensorrt_llm7runtime13decoder_batch6Output16cacheIndirectionE"></span><span id="_CPPv2N12tensorrt_llm7runtime13decoder_batch6Output16cacheIndirectionE"></span><span id="tensorrt_llm::runtime::decoder_batch::Output::cacheIndirection__TensorPtr"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder__batch_1_1Output_1ab579f71d5d5639305bb67ecf1a58ba55"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE" title="tensorrt_llm::runtime::decoder_batch::Output::TensorPtr"><span class="n"><span class="pre">TensorPtr</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">cacheIndirection</span></span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output16cacheIndirectionE" title="Link to this definition">#</a><br /></dt>
<dd><p>parameters for beam search, [batchSize, maxBeamWidth, maxSeqLen], on gpu </p>
</dd></dl>
</div>
</dd></dl>
</dd></dl>
</dd></dl>
@ -10024,7 +9986,7 @@ one more than decoding draft tokens for prediction from primary head </p>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a41740e026890310d78a3ac98c22e3132"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
<dd><p>Setup the decoder before calling <code class="docutils literal notranslate"><a class="reference internal" href="#classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a1644b6eedbe77c15d3486b1ab688dd85"><span class="std std-ref"><span class="pre">forward()</span></span></a></code></p>
</dd></dl>
<dl class="cpp function">
@ -10041,30 +10003,28 @@ one more than decoding draft tokens for prediction from primary head </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forwardAsync__decoder::DecoderStateCR.decoder_batch::OutputR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a6f28f352026b3d0beb36947fb5706392"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime9CudaEventE" title="tensorrt_llm::runtime::CudaEvent"><span class="n"><span class="pre">CudaEvent</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardAsync</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forwardAsync__decoder::DecoderStateCR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1ad077bf32d0116473d4e537750c7d90c4"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime9CudaEventE" title="tensorrt_llm::runtime::CudaEvent"><span class="n"><span class="pre">CudaEvent</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardAsync</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoderE" title="tensorrt_llm::runtime::decoder"><span class="n"><span class="pre">decoder</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderStateE" title="tensorrt_llm::runtime::decoder::DecoderState"><span class="n"><span class="pre">DecoderState</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">decoderState</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="tensorrt_llm::runtime::decoder_batch::Output"><span class="n"><span class="pre">Output</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5InputE" title="tensorrt_llm::runtime::decoder_batch::Input"><span class="n"><span class="pre">Input</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">input</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<dd><p>Run one step for all requests without blocking the host process and return the token for synchronization. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forward__decoder::DecoderStateCR.decoder_batch::OutputR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a41740e026890310d78a3ac98c22e3132"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forward</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forward__decoder::DecoderStateCR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a1644b6eedbe77c15d3486b1ab688dd85"></span><span class="k"><span class="pre">virtual</span></span><span class="w"> </span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forward</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoderE" title="tensorrt_llm::runtime::decoder"><span class="n"><span class="pre">decoder</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderStateE" title="tensorrt_llm::runtime::decoder::DecoderState"><span class="n"><span class="pre">DecoderState</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">decoderState</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="tensorrt_llm::runtime::decoder_batch::Output"><span class="n"><span class="pre">Output</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5InputE" title="tensorrt_llm::runtime::decoder_batch::Input"><span class="n"><span class="pre">Input</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">input</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">override</span></span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<dd><p>Run one step for all requests and wait for completion on the host. </p>
</dd></dl>
@ -10110,16 +10070,15 @@ one more than decoding draft tokens for prediction from primary head </p>
<div class="breathe-sectiondef docutils container">
<p class="breathe-sectiondef-title rubric" id="breathe-section-title-private-functions">Private Functions</p>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forwardDispatch__decoder::DecoderStateCR.decoder_batch::OutputR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a6afa6ebdff09dba1bd53d47aa74e2967"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardDispatch</span></span></span><span class="sig-paren">(</span>
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERKN13decoder_batch5InputE">
<span id="_CPPv3N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="_CPPv2N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERKN13decoder_batch5InputE"></span><span id="tensorrt_llm::runtime::GptDecoderBatched::forwardDispatch__decoder::DecoderStateCR.decoder_batch::InputCR"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1GptDecoderBatched_1a310e2010c97a4654e70f3eba7d5df5be"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">forwardDispatch</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoderE" title="tensorrt_llm::runtime::decoder"><span class="n"><span class="pre">decoder</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderStateE" title="tensorrt_llm::runtime::decoder::DecoderState"><span class="n"><span class="pre">DecoderState</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">decoderState</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE" title="tensorrt_llm::runtime::decoder_batch::Output"><span class="n"><span class="pre">Output</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">output</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batchE" title="tensorrt_llm::runtime::decoder_batch"><span class="n"><span class="pre">decoder_batch</span></span></a><span class="p"><span class="pre">::</span></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5InputE" title="tensorrt_llm::runtime::decoder_batch::Input"><span class="n"><span class="pre">Input</span></span></a><span class="w"> </span><span class="k"><span class="pre">const</span></span><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="n sig-param"><span class="pre">input</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERKN13decoder_batch5InputE" title="Link to this definition">#</a><br /></dt>
<dd><p>Calls decoders for tokens per engine step. </p>
</dd></dl>
@ -10992,7 +10951,23 @@ one more than decoding draft tokens for prediction from primary head </p>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState5setupE10SizeType3210SizeType3210SizeType3210SizeType3210SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager" title="Link to this definition">#</a><br /></dt>
<dd></dd></dl>
<dd><p>Setup buffers for the decoder excluding speculative decoding. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState21setupCacheIndirectionE10SizeType3210SizeType3210SizeType32">
<span id="_CPPv3N12tensorrt_llm7runtime7decoder12DecoderState21setupCacheIndirectionE10SizeType3210SizeType3210SizeType32"></span><span id="_CPPv2N12tensorrt_llm7runtime7decoder12DecoderState21setupCacheIndirectionE10SizeType3210SizeType3210SizeType32"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::setupCacheIndirection__SizeType32.SizeType32.SizeType32"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1aac08ec4e2c6f8a9e8b10dec6c20a337c"></span><span class="kt"><span class="pre">void</span></span><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">setupCacheIndirection</span></span></span><span class="sig-paren">(</span>
<dl>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBatchSize</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxBeamWidth</span></span></em>,</dd>
<dd><em class="sig-param"><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime10SizeType32E" title="tensorrt_llm::runtime::SizeType32"><span class="n"><span class="pre">SizeType32</span></span></a><span class="w"> </span><span class="n sig-param"><span class="pre">maxAttentionWindow</span></span></em>,</dd>
</dl>
<span class="sig-paren">)</span><a class="headerlink" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState21setupCacheIndirectionE10SizeType3210SizeType3210SizeType32" title="Link to this definition">#</a><br /></dt>
<dd><p>Setup buffers for the cache indirection. </p>
<p>This is used for beam search on pipeline parallel ranks without a decoder. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState24setupSpeculativeDecodingERK23SpeculativeDecodingMode10SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager">
@ -11332,6 +11307,18 @@ one more than decoding draft tokens for prediction from primary head </p>
<dd><p>Workspace for beam search in streaming mode. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24getCacheIndirectionInputEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState24getCacheIndirectionInputEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState24getCacheIndirectionInputEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getCacheIndirectionInputC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1adc8e71751b62a60ce0d77e846c96f9fc"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState9TensorPtrE" title="tensorrt_llm::runtime::decoder::DecoderState::TensorPtr"><span class="n"><span class="pre">TensorPtr</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getCacheIndirectionInput</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24getCacheIndirectionInputEv" title="Link to this definition">#</a><br /></dt>
<dd><p>Cache indirection input for beam search. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState25getCacheIndirectionOutputEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState25getCacheIndirectionOutputEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState25getCacheIndirectionOutputEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getCacheIndirectionOutputC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a891c5a9630f5035fb7391ed2b90ac75f"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState9TensorPtrE" title="tensorrt_llm::runtime::decoder::DecoderState::TensorPtr"><span class="n"><span class="pre">TensorPtr</span></span></a><span class="w"> </span><span class="sig-name descname"><span class="n"><span class="pre">getCacheIndirectionOutput</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState25getCacheIndirectionOutputEv" title="Link to this definition">#</a><br /></dt>
<dd><p>Cache indirection output for beam search. </p>
</dd></dl>
<dl class="cpp function">
<dt class="sig sig-object cpp" id="_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv">
<span id="_CPPv3NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv"></span><span id="_CPPv2NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv"></span><span id="tensorrt_llm::runtime::decoder::DecoderState::getJointDecodingInputC"></span><span class="target" id="classtensorrt__llm_1_1runtime_1_1decoder_1_1DecoderState_1a4dfefcff30e619815aea4fbe5bd9eaca"></span><a class="reference internal" href="#_CPPv4N12tensorrt_llm7runtime13DecodingInputE" title="tensorrt_llm::runtime::DecodingInput"><span class="n"><span class="pre">DecodingInput</span></span></a><span class="w"> </span><span class="p"><span class="pre">&amp;</span></span><span class="sig-name descname"><span class="n"><span class="pre">getJointDecodingInput</span></span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><span class="w"> </span><span class="k"><span class="pre">const</span></span><a class="headerlink" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv" title="Link to this definition">#</a><br /></dt>
@ -13262,8 +13249,8 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched16disableLookaheadERK13RequestVectorRK9TensorPtr"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime18IGptDecoderBatched8finalizeERKN7decoder12DecoderStateE10SizeType32RK14SamplingConfigb"><code class="docutils literal notranslate"><span class="pre">finalize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatched18IGptDecoderBatchedEv"><code class="docutils literal notranslate"><span class="pre">IGptDecoderBatched()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime18IGptDecoderBatchedD0Ev"><code class="docutils literal notranslate"><span class="pre">~IGptDecoderBatched()</span></code></a></li>
@ -13280,7 +13267,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input15maxDecoderStepsE"><code class="docutils literal notranslate"><span class="pre">maxDecoderSteps</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input10batchSlotsE"><code class="docutils literal notranslate"><span class="pre">batchSlots</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input22batchSlotsRequestOrderE"><code class="docutils literal notranslate"><span class="pre">batchSlotsRequestOrder</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input16cacheIndirectionE"><code class="docutils literal notranslate"><span class="pre">cacheIndirection</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input15generationStepsE"><code class="docutils literal notranslate"><span class="pre">generationSteps</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input20predictedDraftLogitsE"><code class="docutils literal notranslate"><span class="pre">predictedDraftLogits</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input25explicitDraftTokensInputsE"><code class="docutils literal notranslate"><span class="pre">explicitDraftTokensInputs</span></code></a></li>
@ -13289,12 +13275,6 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch5Input15eagleLastInputsE"><code class="docutils literal notranslate"><span class="pre">eagleLastInputs</span></code></a></li>
</ul>
</li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6OutputE"><code class="docutils literal notranslate"><span class="pre">tensorrt_llm::runtime::decoder_batch::Output</span></code></a><ul class="nav section-nav flex-column">
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output9TensorPtrE"><code class="docutils literal notranslate"><span class="pre">TensorPtr</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output6OutputEv"><code class="docutils literal notranslate"><span class="pre">Output()</span></code></a></li>
<li class="toc-h5 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime13decoder_batch6Output16cacheIndirectionE"><code class="docutils literal notranslate"><span class="pre">cacheIndirection</span></code></a></li>
</ul>
</li>
</ul>
</li>
</ul>
@ -13331,14 +13311,14 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched17GptDecoderBatchedE13CudaStreamPtr"><code class="docutils literal notranslate"><span class="pre">GptDecoderBatched()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched5setupERKN8executor12DecodingModeE10SizeType3210SizeType32N8nvinfer18DataTypeERK11ModelConfigRK11WorldConfig"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched16disableLookaheadERK13RequestVectorRK9TensorPtr"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched12forwardAsyncERKN7decoder12DecoderStateERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardAsync()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched7forwardERKN7decoder12DecoderStateERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forward()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime17GptDecoderBatched8finalizeERKN7decoder12DecoderStateE10SizeType32RK14SamplingConfigb"><code class="docutils literal notranslate"><span class="pre">finalize()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime17GptDecoderBatched16getDecoderStreamEv"><code class="docutils literal notranslate"><span class="pre">getDecoderStream()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime17GptDecoderBatched20getUnderlyingDecoderEv"><code class="docutils literal notranslate"><span class="pre">getUnderlyingDecoder()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime17GptDecoderBatched16getBufferManagerEv"><code class="docutils literal notranslate"><span class="pre">getBufferManager()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched13GptDecoderPtrE"><code class="docutils literal notranslate"><span class="pre">GptDecoderPtr</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERN13decoder_batch6OutputERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardDispatch()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched15forwardDispatchERKN7decoder12DecoderStateERKN13decoder_batch5InputE"><code class="docutils literal notranslate"><span class="pre">forwardDispatch()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched14mRuntimeStreamE"><code class="docutils literal notranslate"><span class="pre">mRuntimeStream</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched14mDecoderStreamE"><code class="docutils literal notranslate"><span class="pre">mDecoderStream</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime17GptDecoderBatched14mBufferManagerE"><code class="docutils literal notranslate"><span class="pre">mBufferManager</span></code></a></li>
@ -13492,6 +13472,7 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState12DecoderStateEN8nvinfer18DataTypeERK13BufferManager"><code class="docutils literal notranslate"><span class="pre">DecoderState()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState34allocateSpeculativeDecodingBuffersE23SpeculativeDecodingModeN8nvinfer18DataTypeERK13BufferManager"><code class="docutils literal notranslate"><span class="pre">allocateSpeculativeDecodingBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState5setupE10SizeType3210SizeType3210SizeType3210SizeType3210SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager"><code class="docutils literal notranslate"><span class="pre">setup()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState21setupCacheIndirectionE10SizeType3210SizeType3210SizeType32"><code class="docutils literal notranslate"><span class="pre">setupCacheIndirection()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState24setupSpeculativeDecodingERK23SpeculativeDecodingMode10SizeType32RK11ModelConfigRK11WorldConfigRK13BufferManager"><code class="docutils literal notranslate"><span class="pre">setupSpeculativeDecoding()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState16disableLookaheadERK13RequestVector"><code class="docutils literal notranslate"><span class="pre">disableLookahead()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState14getFinishedSumEv"><code class="docutils literal notranslate"><span class="pre">getFinishedSum()</span></code></a></li>
@ -13527,6 +13508,8 @@ one more than decoding draft tokens for prediction from primary head </p>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState15getEagleBuffersEv"><code class="docutils literal notranslate"><span class="pre">getEagleBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState19getLookaheadBuffersEv"><code class="docutils literal notranslate"><span class="pre">getLookaheadBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState20getBeamSearchBuffersEv"><code class="docutils literal notranslate"><span class="pre">getBeamSearchBuffers()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState24getCacheIndirectionInputEv"><code class="docutils literal notranslate"><span class="pre">getCacheIndirectionInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState25getCacheIndirectionOutputEv"><code class="docutils literal notranslate"><span class="pre">getCacheIndirectionOutput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState21getJointDecodingInputEv"><code class="docutils literal notranslate"><span class="pre">getJointDecodingInput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4NK12tensorrt_llm7runtime7decoder12DecoderState22getJointDecodingOutputEv"><code class="docutils literal notranslate"><span class="pre">getJointDecodingOutput()</span></code></a></li>
<li class="toc-h4 nav-item toc-entry"><a class="reference internal nav-link" href="#_CPPv4N12tensorrt_llm7runtime7decoder12DecoderState13mMaxBatchSizeE"><code class="docutils literal notranslate"><span class="pre">mMaxBatchSize</span></code></a></li>
@ -13706,9 +13689,9 @@ one more than decoding draft tokens for prediction from primary head </p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -5,6 +5,7 @@ from typing import Optional, Union, cast
import torch
from torch import nn
from tensorrt_llm._utils import get_sm_version
from tensorrt_llm.logger import logger
from tensorrt_llm.mapping import Mapping
@ -346,6 +347,47 @@ def mla_custom_op_inplace(
mla_layer.forward_impl(position_ids, hidden_states, metadata, output=output)
def fp8_block_scaling_bmm_out(
mat1: torch.Tensor,
mat2_fp8: torch.Tensor,
mat2_scale: torch.Tensor,
out: torch.Tensor,
) -> torch.Tensor:
sm_version = get_sm_version()
if sm_version == 90:
mat1_fp8, mat1_scale = torch.ops.trtllm.fp8_batched_quantize_1x128_permute102(
mat1)
torch.ops.trtllm.fp8_block_scaling_bmm_out(mat1_fp8, mat2_fp8,
mat1_scale, mat2_scale, out)
elif sm_version == 100:
low_latency = True
use_deep_seek_fp8 = True
tile_size = 8
epilogue_tile_m = 64 if use_deep_seek_fp8 else 128
m_size = mat1.shape[0]
if m_size % tile_size != 0:
tiled_shape = ((m_size + tile_size - 1) // tile_size) * tile_size
mat1 = torch.nn.functional.pad(
mat1, (0, 0, 0, 0, 0, tiled_shape - m_size), "constant", 0)
mat1_fp8, mat1_scale = torch.ops.trtllm.fp8_batched_quantize_1x128_permute102(
mat1)
output, output_sf = torch.ops.trtllm.fp8_batched_gemm_trtllmgen(
mat1_fp8,
mat2_fp8,
tile_size=tile_size,
epilogue_tile_m=epilogue_tile_m,
use_deep_seek_fp8=use_deep_seek_fp8,
low_latency=low_latency,
dq_sfs_a=mat1_scale.reshape(mat1.shape[-1] // 128, -1),
dq_sfs_b=mat2_scale,
out_dtype=out.dtype,
)
out.copy_(output[:, :m_size])
else:
raise NotImplementedError(f"SM{sm_version} is not supported")
class MLA(nn.Module):
def __init__(
@ -922,6 +964,166 @@ class MLA(nn.Module):
return attn_output
def forward_context_with_chunked_prefill(
self,
q: torch.Tensor,
compressed_kv: torch.Tensor,
latent_cache: torch.
Tensor, # compressed_kv + k_pe [context_tokens, 1, lora_size + rope_size]
attn_metadata: TrtllmAttentionMetadata,
output: Optional[torch.Tensor] = None,
) -> torch.Tensor:
trtllm_attention = cast(TrtllmAttention, self.mha)
# apply RoPE, append compressed_kv + k_pe to paged kv cache and assign q_pe to q
trtllm_attention.mla_rope_append_paged_kv_assign_q(
q, latent_cache, attn_metadata)
# determine the number of loop
# currently we assume that the chunk size is the same as the max_num_tokens
chunk_size = attn_metadata.runtime_features.chunk_size
chunked_loop_num = attn_metadata.chunked_loop_num
# [toal_token_q, num_heads, 2] -> [toal_token_q, num_heads] float2
self.softmax_stats_tensor = torch.empty(
(attn_metadata.num_ctx_tokens, self.num_heads, 2),
dtype=torch.float,
device='cuda',
)
self.temp_softmax_stats_tensor = torch.empty(
(attn_metadata.num_ctx_tokens, self.num_heads, 2),
dtype=torch.float,
device='cuda',
)
if output is None:
attn_output = q.new_empty(
(q.size(0), self.num_heads * self.v_head_dim), dtype=q.dtype)
else:
attn_output = output
temp_attn_output = q.new_empty(
(q.size(0), self.num_heads * self.v_head_dim), dtype=q.dtype)
# use fake cached_cu_seq_len for chunked loop
origin_kv_lens_cuda_runtime = attn_metadata.kv_lens_cuda_runtime
origin_kv_lens_runtime = attn_metadata.kv_lens_runtime
for loop_idx in range(chunked_loop_num):
# {b, chunked_unit_size, h, kv_lora_rank + qk_rope_head_dim} zero padded
# fetch `loop_idx` chunk from kv cache
temp_cu_chunked_seq_len = attn_metadata.cu_chunked_seq_len[loop_idx]
total_ctx_chunked_tokens = attn_metadata.host_cu_chunked_seq_len[
loop_idx, attn_metadata.num_contexts]
chunked_compressed_kv, chunked_k_pe = trtllm_attention.load_chunked_kv_cache_for_mla(
metadata=attn_metadata,
chunked_idx=loop_idx,
num_ctx_cached_tokens=total_ctx_chunked_tokens,
cu_chunked_seq_len=temp_cu_chunked_seq_len,
out_dtype=q.dtype)
# up proj to uncompressed kv
# [tokens, 2, h, kv_dim], without rope_dim
chunked_kv = self.kv_b_proj(chunked_compressed_kv)
# build full_kv
# full_kv {B, 2, chunk_size / tokens_per_block, h, tokens_per_block, kv_dim + rope_dim}
tokens_per_block = attn_metadata.kv_cache_manager.tokens_per_block
full_kv = torch.zeros([
attn_metadata.num_contexts, 2,
(chunk_size + tokens_per_block - 1) // tokens_per_block,
self.num_heads, tokens_per_block,
max(self.qk_nope_head_dim + self.qk_rope_head_dim,
self.v_head_dim)
],
dtype=q.dtype,
device=q.device)
mla_kv_cache_block_offsets = trtllm_attention.set_chunked_kv_cache_for_mla(
full_kv,
chunked_kv,
chunked_k_pe,
cu_chunked_seq_len=temp_cu_chunked_seq_len,
cached=True,
metadata=attn_metadata)
# copy chunked_seq_len to replace kv_lens_runtime
attn_metadata.kv_lens_runtime = attn_metadata.host_chunked_seq_len[
loop_idx]
attn_metadata.kv_lens_cuda_runtime = attn_metadata.chunked_seq_len[
loop_idx]
out_scale = None
# do not apply mask for attention within loop
temp_attn_output = self.mha.forward(
q,
None,
None,
attn_metadata,
attention_input_type=AttentionInputType.context_only,
latent_cache=None,
out_scale=out_scale,
attention_mask=PredefinedAttentionMask.FULL,
mla_context_paged_kv=full_kv,
mla_context_kv_cache_block_offsets=mla_kv_cache_block_offsets,
softmax_stats_tensor=self.temp_softmax_stats_tensor,
output=temp_attn_output,
)
# merge attn result
temp_merge_op = attn_metadata.merge_op_tensor[loop_idx]
trtllm_attention.merge_attention_for_mla(
attn_output, temp_attn_output, self.softmax_stats_tensor,
self.temp_softmax_stats_tensor, temp_merge_op, attn_metadata)
# deal with the uncached kv
kv = self.kv_b_proj(compressed_kv)
_, k_pe = latent_cache.view([
-1, self.kv_lora_rank + self.qk_rope_head_dim
]).split([self.kv_lora_rank, self.qk_rope_head_dim], -1)
k_pe = k_pe.contiguous()
# final round of attention
# out_scale = getattr(self.o_proj, "inv_input_scale", None)
out_scale = None # Currently we use BF16 MHA for context phase
tokens_per_block = attn_metadata.kv_cache_manager.tokens_per_block
full_kv = torch.zeros([
attn_metadata.num_contexts, 2,
(attn_metadata.max_ctx_seq_len + tokens_per_block - 1) //
tokens_per_block, self.num_heads, tokens_per_block,
max(self.qk_nope_head_dim + self.qk_rope_head_dim, self.v_head_dim)
],
dtype=q.dtype,
device=q.device)
mla_kv_cache_block_offsets = trtllm_attention.set_chunked_kv_cache_for_mla(
full_kv,
kv,
k_pe,
cu_chunked_seq_len=None,
cached=False,
metadata=attn_metadata)
# copy q_lens to replace kv_lens_runtime
attn_metadata.kv_lens_runtime = attn_metadata.prompt_lens_cpu_runtime
attn_metadata.kv_lens_cuda_runtime = attn_metadata.prompt_lens_cuda_runtime
temp_attn_output = self.mha.forward(
q,
None,
None,
attn_metadata,
attention_input_type=AttentionInputType.context_only,
latent_cache=None,
out_scale=out_scale,
mla_context_paged_kv=full_kv,
mla_context_kv_cache_block_offsets=mla_kv_cache_block_offsets,
softmax_stats_tensor=self.temp_softmax_stats_tensor,
output=temp_attn_output,
)
temp_merge_op = attn_metadata.merge_op_tensor[chunked_loop_num]
trtllm_attention.merge_attention_for_mla(attn_output, temp_attn_output,
self.softmax_stats_tensor,
self.temp_softmax_stats_tensor,
temp_merge_op, attn_metadata)
# copy back kv_lens_runtime and kv_lens_cuda_runtime
attn_metadata.kv_lens_runtime = origin_kv_lens_runtime
attn_metadata.kv_lens_cuda_runtime = origin_kv_lens_cuda_runtime
return attn_output
def forward_context(
self,
q: torch.Tensor,
@ -934,7 +1136,11 @@ class MLA(nn.Module):
if isinstance(self.mha, TrtllmAttention):
assert isinstance(attn_metadata, TrtllmAttentionMetadata)
trtllm_attention = cast(TrtllmAttention, self.mha)
if trtllm_attention.has_cached_kv_for_mla_context(attn_metadata):
if trtllm_attention.is_chunked_prefill_for_mla_context(
attn_metadata):
return self.forward_context_with_chunked_prefill(
q, compressed_kv, latent_cache, attn_metadata, output)
elif trtllm_attention.has_cached_kv_for_mla_context(attn_metadata):
return self.forward_context_with_cached_kv(
q, latent_cache, attn_metadata, output)
return self.forward_context_default(q, compressed_kv, k_pe,
@ -976,15 +1182,11 @@ class MLA(nn.Module):
self.k_b_proj_trans.transpose(1, 2),
q_nope_out)
elif self.k_b_proj_trans.dtype == torch.float8_e4m3fn:
q_nope_fp8, q_nope_scales = torch.ops.trtllm.fp8_batched_quantize_1x128_permute102(
q_nope)
# [num_heads, num_tokens, self.kv_lora_rank]
q_nope_out = fused_q[..., :self.kv_lora_rank].transpose(0, 1)
torch.ops.trtllm.fp8_block_scaling_bmm_out(
q_nope_fp8, self.k_b_proj_trans, q_nope_scales,
self.k_b_proj_trans_scale, q_nope_out)
q_nope_scales = None
fp8_block_scaling_bmm_out(q_nope, self.k_b_proj_trans,
self.k_b_proj_trans_scale, q_nope_out)
else:
raise NotImplementedError(
f"Missing bmm impl for dtype: {self.k_b_proj_trans.dtype}.")
@ -1033,13 +1235,9 @@ class MLA(nn.Module):
self.v_b_proj.transpose(1, 2),
attn_output.transpose(0, 1))
elif self.v_b_proj.dtype == torch.float8_e4m3fn:
attn_out_latent, attn_out_latent_scales = torch.ops.trtllm.fp8_batched_quantize_1x128_permute102(
attn_out_latent)
torch.ops.trtllm.fp8_block_scaling_bmm_out(
attn_out_latent, self.v_b_proj, attn_out_latent_scales,
self.v_b_proj_scale, attn_output.transpose(0, 1))
attn_out_latent_scales = None
fp8_block_scaling_bmm_out(attn_out_latent, self.v_b_proj,
self.v_b_proj_scale,
attn_output.transpose(0, 1))
else:
raise NotImplementedError(
f"Missing bmm impl for dtype: {self.v_b_proj.dtype}.")

View File

@ -7,7 +7,7 @@ from dataclasses import dataclass, field
from enum import Enum, EnumMeta
from pathlib import Path
from typing import (TYPE_CHECKING, Any, ClassVar, Dict, List, Literal, Optional,
Union)
TypeAlias, Union)
import torch
import yaml
@ -54,8 +54,7 @@ from ..models.modeling_utils import (PretrainedConfig, QuantAlgo, QuantConfig,
from ..sampling_params import BatchedLogitsProcessor
from .build_cache import BuildCacheConfig
from .tokenizer import TokenizerBase, tokenizer_factory
from .utils import (generate_api_docs_as_docstring, get_type_repr,
print_traceback_on_error)
from .utils import generate_api_docs_as_docstring, get_type_repr
# TODO[chunweiy]: move the following symbols back to utils scope, and remove the following import
@ -599,6 +598,16 @@ class LookaheadDecodingConfig(DecodingBaseConfig, PybindMirror):
decoding_type: ClassVar[str] = "Lookahead"
SpeculativeConfig: TypeAlias = Optional[Union[
DraftTargetDecodingConfig,
EagleDecodingConfig,
LookaheadDecodingConfig,
MedusaDecodingConfig,
MTPDecodingConfig,
NGramDecodingConfig,
]]
@PybindMirror.mirror_pybind_fields(_KvCacheConfig)
class KvCacheConfig(BaseModel, PybindMirror):
"""
@ -658,6 +667,8 @@ class KvCacheConfig(BaseModel, PybindMirror):
description=
"Whether partially matched blocks that are in use can be reused after copying them."
)
use_uvm: bool = Field(default=False,
description="Whether to use UVM for the KV cache.")
def _to_pybind(self):
return _KvCacheConfig(
@ -672,7 +683,8 @@ class KvCacheConfig(BaseModel, PybindMirror):
secondary_offload_min_priority=self.secondary_offload_min_priority,
event_buffer_max_size=self.event_buffer_max_size,
enable_partial_reuse=self.enable_partial_reuse,
copy_on_partial_reuse=self.copy_on_partial_reuse)
copy_on_partial_reuse=self.copy_on_partial_reuse,
use_uvm=self.use_uvm)
@PybindMirror.mirror_pybind_fields(_ExtendedRuntimePerfKnobConfig)
@ -879,8 +891,11 @@ class BaseLlmArgs(BaseModel):
enable_chunked_prefill: bool = Field(default=False,
description="Enable chunked prefill.")
guided_decoding_backend: Optional[str] = Field(
default=None, description="Guided decoding backend.")
guided_decoding_backend: Optional[Literal["xgrammar", "llguidance"]] = Field(
default=None,
description=
"Guided decoding backend. llguidance is supported in PyTorch backend only."
)
batched_logits_processor: Optional[object] = Field(
default=None,
@ -908,11 +923,8 @@ class BaseLlmArgs(BaseModel):
default=None, description="Cache transceiver config.")
# Speculative decoding parameters
speculative_config: Optional[
Union[LookaheadDecodingConfig, MedusaDecodingConfig,
EagleDecodingConfig, MTPDecodingConfig, NGramDecodingConfig,
DraftTargetDecodingConfig]] = Field(
default=None, description="Speculative decoding config.")
speculative_config: SpeculativeConfig = Field(
default=None, description="Speculative decoding config.")
batching_type: Optional[BatchingType] = Field(default=None,
description="Batching type.")
@ -954,12 +966,6 @@ class BaseLlmArgs(BaseModel):
default=None,
description="The parser to separate reasoning content from output.")
garbage_collection_gen0_threshold: int = Field(
default=20000,
description=
"Threshold for Python garbage collection of generation 0 objects."
"Lower values trigger more frequent garbage collection.")
# TODO[Superjomn]: To deprecate this config.
decoding_config: Optional[object] = Field(
default=None,
@ -1621,7 +1627,6 @@ class TorchCompileConfig(BaseModel):
class TorchLlmArgs(BaseLlmArgs):
# Just a dummy BuildConfig to allow code reuse with the TrtLlmArgs
build_config: Optional[object] = Field(
default=None,
@ -1631,6 +1636,12 @@ class TorchLlmArgs(BaseLlmArgs):
# PyTorch backend specific configurations
garbage_collection_gen0_threshold: int = Field(
default=20000,
description=
"Threshold for Python garbage collection of generation 0 objects."
"Lower values trigger more frequent garbage collection.")
use_cuda_graph: bool = Field(
default=False,
description=
@ -1901,115 +1912,6 @@ class TorchLlmArgs(BaseLlmArgs):
return batch_sizes
class _AutoDeployLlmArgs(TorchLlmArgs):
"""LLM arguments specifically for AutoDeploy backend.
This class extends TorchLlmArgs with AutoDeploy-specific configuration options.
AutoDeploy provides automatic deployment and optimization of language models
with various attention backends and optimization strategies.
"""
model_factory: Literal[
"AutoModelForCausalLM", "AutoModelForImageTextToText"] = Field(
default="AutoModelForCausalLM",
description="The model factory to use for loading the model.",
)
model_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description=
"Extra kwargs for the model config class to customize the model config. "
"These arguments take precedence over default values or config values in the model config "
"file. Arguments are resolved in order: 1) Default values in model config class, 2) Values "
"in model config file, 3) Values in model_kwargs. Note: if a kwarg doesn't exist in the "
"model config class, it will be ignored.",
)
mla_backend: Literal["MultiHeadLatentAttention"] = Field(
default="MultiHeadLatentAttention",
description="The Multi-Head Latent Attention backend to use.",
)
skip_loading_weights: bool = Field(
default=False,
description=
"Whether to skip loading model weights during initialization. "
"If True, only the model architecture is loaded.",
)
free_mem_ratio: float = Field(
default=0.8,
description="The fraction of available memory to allocate for cache. "
"Must be between 0.0 and 1.0.",
)
simple_shard_only: bool = Field(
default=False,
description=
"If True, force simple sharding (all_gather) in tensor parallelism. "
"If False, auto-detect and use column+row (all_reduce) sharding when possible.",
)
# TODO: Remove this field once tokens_per_block is properly passed through
attn_page_size: int = Field(
default=64,
description=
"Page size for attention (tokens_per_block). For TritonWithFlattenedInputs "
"backend, this should equal max_seq_len. Temporary field until tokens_per_block gets "
"properly passed through.",
)
checkpoint_device: Optional[str] = Field(
default=None,
description="Device on which to load the model checkpoint. "
"Defaults to the same device as the rest of the pipeline.",
)
@field_validator("free_mem_ratio")
@classmethod
def validate_free_mem_ratio(cls, v):
"""Validate that free_mem_ratio is between 0.0 and 1.0."""
if not 0.0 <= v <= 1.0:
raise ValueError(
f"free_mem_ratio must be between 0.0 and 1.0, got {v}")
return v
@print_traceback_on_error
def model_post_init(self, __context):
# Modify default values that differ from TorchLlmArgs
new_defaults = {
"max_batch_size": 8,
"max_seq_len": 512,
"attn_backend": "FlashInfer",
# TODO: Remove this when overlap scheduler is supported (https://github.com/NVIDIA/TensorRT-LLM/issues/4364)
"disable_overlap_scheduler": True,
}
for k, v_default in new_defaults.items():
if k not in self.__pydantic_fields_set__:
setattr(self, k, v_default)
# NOTE: Only call super() after setting the default values since default values should be
# set first.
super().model_post_init(__context)
# Handle attn_page_size for TritonWithFlattenedInputs backend
if self.attn_backend == "TritonWithFlattenedInputs":
self.attn_page_size = self.max_seq_len
# Add max_position_embeddings to model_kwargs
# TODO (lucaslie): this is more HF specific than a generic model_kwargs. Ideally, we can
# move this to the HF model factory but we don't have access to max_seq_len there right now.
self.model_kwargs["max_position_embeddings"] = min(
self.max_seq_len,
self.model_kwargs.get("max_position_embeddings", self.max_seq_len),
)
# TODO: Remove this after the PyTorch backend is fully migrated to TorchLlmArgs from ExecutorConfig
def get_pytorch_backend_config(self) -> "_AutoDeployLlmArgs":
"""Return the _AutoDeployLlmArgs (self) object."""
return self
def update_llm_args_with_extra_dict(
llm_args: Dict,
llm_args_dict: Dict,

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -688,9 +688,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1997,9 +1997,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -668,9 +668,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -596,6 +596,7 @@
<span class="sd"> stop_reason (int, str, optional): The stop string or token id that caused the completion to stop, None if the completion finished for some other reason. Defaults to None.</span>
<span class="sd"> generation_logits (torch.Tensor, optional): The logits on the generated output token ids. Defaults to None.</span>
<span class="sd"> disaggregated_params (tensorrt_llm.disaggregated_params.DisaggregatedParams, optional): Parameters needed for disaggregated serving. Includes the type of request, the first generated tokens, the context request id and the any additional state needing to be transferred from context and generation instances. Defaults to None.</span>
<span class="sd"> request_perf_metrics (tensorrt_llm.bindings.executor.RequestPerfMetrics, optional): Performance metrics for the request. Defaults to None.</span>
<span class="sd"> Attributes:</span>
<span class="sd"> length (int): The number of generated tokens.</span>
@ -614,6 +615,7 @@
<span class="n">stop_reason</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">generation_logits</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">disaggregated_params</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">DisaggregatedParams</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">request_perf_metrics</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">tllm</span><span class="o">.</span><span class="n">RequestPerfMetrics</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="c1"># hidden fields for tracking the diffs</span>
<span class="n">_last_text_len</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">init</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="nb">repr</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
@ -750,6 +752,9 @@
<span class="n">src_idx</span><span class="p">]</span> <span class="o">==</span> <span class="n">tllm</span><span class="o">.</span><span class="n">FinishReason</span><span class="o">.</span><span class="n">CANCELLED</span><span class="p">:</span>
<span class="n">output</span><span class="o">.</span><span class="n">finish_reason</span> <span class="o">=</span> <span class="s1">&#39;cancelled&#39;</span>
<span class="k">if</span> <span class="n">response_tensors</span><span class="o">.</span><span class="n">request_perf_metrics</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">output</span><span class="o">.</span><span class="n">request_perf_metrics</span> <span class="o">=</span> <span class="n">response_tensors</span><span class="o">.</span><span class="n">request_perf_metrics</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_done</span><span class="p">:</span>
<span class="k">if</span> <span class="n">finish_reasons</span><span class="p">[</span><span class="n">src_idx</span><span class="p">]</span> <span class="o">==</span> <span class="n">tllm</span><span class="o">.</span><span class="n">FinishReason</span><span class="o">.</span><span class="n">END_ID</span><span class="p">:</span>
<span class="n">output</span><span class="o">.</span><span class="n">finish_reason</span> <span class="o">=</span> <span class="s1">&#39;stop&#39;</span>
@ -1283,9 +1288,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -791,9 +791,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -8722,9 +8722,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -647,9 +647,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -3512,9 +3512,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -654,9 +654,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -903,9 +903,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1370,9 +1370,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1218,9 +1218,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1244,9 +1244,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1008,9 +1008,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -663,9 +663,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -947,9 +947,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -522,11 +522,9 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">tqdm</span><span class="w"> </span><span class="kn">import</span> <span class="n">tqdm</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">transformers</span><span class="w"> </span><span class="kn">import</span> <span class="n">PreTrainedTokenizerBase</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.builder</span><span class="w"> </span><span class="kn">import</span> <span class="n">BuildConfig</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.inputs.data</span><span class="w"> </span><span class="kn">import</span> <span class="n">TextPrompt</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.inputs.registry</span><span class="w"> </span><span class="kn">import</span> <span class="n">DefaultInputProcessor</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..</span><span class="w"> </span><span class="kn">import</span> <span class="n">bindings</span> <span class="k">as</span> <span class="n">tllm</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.._utils</span><span class="w"> </span><span class="kn">import</span> <span class="n">nvtx_range_debug</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..bindings</span><span class="w"> </span><span class="kn">import</span> <span class="n">executor</span> <span class="k">as</span> <span class="n">tllm</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..builder</span><span class="w"> </span><span class="kn">import</span> <span class="n">EngineConfig</span>
@ -543,7 +541,7 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">..sampling_params</span><span class="w"> </span><span class="kn">import</span> <span class="n">SamplingParams</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.llm_args</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">TORCH_LLMARGS_EXPLICIT_DOCSTRING</span><span class="p">,</span>
<span class="n">TRT_LLMARGS_EXPLICIT_DOCSTRING</span><span class="p">,</span> <span class="n">PybindMirror</span><span class="p">,</span>
<span class="n">TorchLlmArgs</span><span class="p">,</span> <span class="n">TrtLlmArgs</span><span class="p">,</span> <span class="n">_AutoDeployLlmArgs</span><span class="p">)</span>
<span class="n">TorchLlmArgs</span><span class="p">,</span> <span class="n">TrtLlmArgs</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.llm_utils</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">CachedModelLoader</span><span class="p">,</span> <span class="n">KvCacheRetentionConfig</span><span class="p">,</span>
<span class="n">LlmBuildStats</span><span class="p">,</span> <span class="n">ModelLoader</span><span class="p">,</span> <span class="n">_ModelRuntimeContext</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.mpi_session</span><span class="w"> </span><span class="kn">import</span> <span class="n">MpiPoolSession</span><span class="p">,</span> <span class="n">external_mpi_comm_available</span>
@ -643,7 +641,9 @@
<span class="k">if</span> <span class="n">backend</span> <span class="o">==</span> <span class="s1">&#39;pytorch&#39;</span><span class="p">:</span>
<span class="n">llm_args_cls</span> <span class="o">=</span> <span class="n">TorchLlmArgs</span>
<span class="k">elif</span> <span class="n">backend</span> <span class="o">==</span> <span class="s1">&#39;_autodeploy&#39;</span><span class="p">:</span>
<span class="n">llm_args_cls</span> <span class="o">=</span> <span class="n">_AutoDeployLlmArgs</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.._torch.auto_deploy.llm_args</span><span class="w"> </span><span class="kn">import</span> \
<span class="n">LlmArgs</span> <span class="k">as</span> <span class="n">AutoDeployLlmArgs</span>
<span class="n">llm_args_cls</span> <span class="o">=</span> <span class="n">AutoDeployLlmArgs</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">llm_args_cls</span> <span class="o">=</span> <span class="n">TrtLlmArgs</span>
@ -1047,8 +1047,6 @@
<span class="sa">f</span><span class="s2">&quot;PyTorch backend currently only supports `logprobs=1`. Received `logprobs=</span><span class="si">{</span><span class="n">sampling_params</span><span class="o">.</span><span class="n">logprobs</span><span class="si">}</span><span class="s2">` (Top</span><span class="si">{</span><span class="n">sampling_params</span><span class="o">.</span><span class="n">logprobs</span><span class="si">}</span><span class="s2"> logprobs). Please set `logprobs=1` in `sampling_params` instead.&quot;</span>
<span class="p">)</span>
<span class="k">return</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="o">==</span> <span class="s2">&quot;_autodeploy&quot;</span><span class="p">:</span>
<span class="k">return</span>
<span class="n">build_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
@ -1108,134 +1106,6 @@
<span class="n">llm_build_stats</span><span class="o">=</span><span class="n">weakref</span><span class="o">.</span><span class="n">proxy</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">llm_build_stats</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span> <span class="o">=</span> <span class="n">model_loader</span><span class="p">()</span>
<span class="c1"># update the model_dir to a local dir for the runtime, such as tokenizer loading.</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">model</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span>
<span class="c1"># Tokenizer loading should be after calling model_loader(), since model_loader() may download the model from HF hub.</span>
<span class="c1"># It should also be before bindings ExecutorConfig, which may depend on tokenizer info.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_try_load_tokenizer</span><span class="p">()</span>
<span class="c1"># Multimodal special handling:</span>
<span class="c1"># 1. Default load_tokenizer may fail because MM has different tokenizer configuration. Hence we initialize it inside input processor</span>
<span class="c1"># 2. May need to modify model weights for MM (e.g., resize vocab embedding). We must do such operation via input processor&#39;s __init__</span>
<span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span> <span class="o">=</span> <span class="n">create_input_processor</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="o">.</span><span class="n">tokenizer</span>
<span class="n">max_batch_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="n">build_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="k">else</span> <span class="n">BuildConfig</span><span class="p">(</span>
<span class="p">)</span>
<span class="n">max_batch_size</span> <span class="o">=</span> <span class="n">max_batch_size</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="n">max_num_tokens</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">ExecutorConfig</span><span class="p">(</span>
<span class="n">max_beam_width</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">,</span>
<span class="n">scheduler_config</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">scheduler_config</span><span class="p">),</span>
<span class="n">batching_type</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batching_type</span><span class="p">)</span>
<span class="ow">or</span> <span class="n">tllm</span><span class="o">.</span><span class="n">BatchingType</span><span class="o">.</span><span class="n">INFLIGHT</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="o">=</span><span class="n">max_batch_size</span><span class="p">,</span>
<span class="n">max_num_tokens</span><span class="o">=</span><span class="n">max_num_tokens</span><span class="p">,</span>
<span class="n">gather_generation_logits</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="c1"># also set executor_config.max_seq_len in TRT workflow, to deduce default max_tokens</span>
<span class="k">if</span> <span class="n">max_seq_len</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
<span class="s2">&quot;config.json&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="p">)</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s2">&quot;FORCE_DETERMINISTIC&quot;</span><span class="p">,</span> <span class="s2">&quot;0&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;1&quot;</span><span class="p">:</span>
<span class="c1"># Disable KV cache reuse for deterministic mode</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_block_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_partial_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span><span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">lora_plugin</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
<span class="s2">&quot;config.json&quot;</span><span class="p">)</span>
<span class="n">lora_config</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">lora_config</span>
<span class="n">max_lora_rank</span> <span class="o">=</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">max_lora_rank</span>
<span class="n">num_lora_modules</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">pretrained_config</span><span class="o">.</span><span class="n">num_hidden_layers</span> <span class="o">*</span> \
<span class="nb">len</span><span class="p">(</span><span class="n">lora_config</span><span class="o">.</span><span class="n">lora_target_modules</span> <span class="o">+</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">missing_qkv_modules</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">PeftCacheConfig</span><span class="p">(</span>
<span class="n">num_device_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_loras</span><span class="p">,</span>
<span class="n">num_host_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_cpu_loras</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;xgrammar&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">XGRAMMAR</span><span class="p">,</span>
<span class="o">**</span><span class="n">_xgrammar_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;llguidance&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">LLGUIDANCE</span><span class="p">,</span>
<span class="o">**</span><span class="n">_llguidance_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Unrecognized guided decoding backend </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">normalize_log_probs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">normalize_log_probs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">enable_chunked_context</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">enable_chunked_prefill</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm._torch.pyexecutor.config</span><span class="w"> </span><span class="kn">import</span> <span class="n">update_executor_config</span>
<span class="n">update_executor_config</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">backend</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span><span class="p">,</span>
<span class="n">pytorch_backend_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">get_pytorch_backend_config</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;pytorch&quot;</span><span class="p">,</span> <span class="s2">&quot;_autodeploy&quot;</span><span class="p">]</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">mapping</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">to_mapping</span><span class="p">(),</span>
<span class="n">build_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_on_trt_backend</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">speculative_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">,</span>
<span class="n">hf_model_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span><span class="p">,</span>
<span class="n">trt_engine_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">max_input_len</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_input_len</span><span class="p">,</span>
<span class="n">max_seq_len</span><span class="o">=</span><span class="n">max_seq_len</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">llm_parallel_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span>
<span class="n">return_logits</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span>
<span class="ow">or</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">gather_context_logits</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_executor_cls</span><span class="o">.</span><span class="n">create</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">executor_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">batched_logits_processor</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batched_logits_processor</span><span class="p">,</span>
<span class="n">model_world_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">,</span>
<span class="n">mpi_session</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span><span class="p">,</span>
<span class="n">reuse_mpi_comm</span><span class="o">=</span><span class="n">external_mpi_comm_available</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">),</span>
<span class="n">return_logits</span><span class="o">=</span><span class="n">return_logits</span><span class="p">,</span>
<span class="n">postproc_worker_config</span><span class="o">=</span><span class="n">PostprocWorkerConfig</span><span class="p">(</span>
<span class="n">num_postprocess_workers</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">num_postprocess_workers</span><span class="p">,</span>
<span class="n">postprocess_tokenizer_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">postprocess_tokenizer_dir</span><span class="p">,</span>
<span class="p">),</span>
<span class="n">is_llm_executor</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">lora_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">lora_config</span><span class="p">,</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="p">)</span>
<span class="nd">@property</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_on_trt_backend</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
@ -1376,6 +1246,116 @@
<span class="sa">f</span><span class="s2">&quot;Copying </span><span class="si">{</span><span class="n">file</span><span class="si">}</span><span class="s2"> to </span><span class="si">{</span><span class="n">target_engine_dir</span><span class="w"> </span><span class="o">/</span><span class="w"> </span><span class="n">file</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\n</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">shutil</span><span class="o">.</span><span class="n">copy</span><span class="p">(</span><span class="n">file</span><span class="p">,</span> <span class="n">target_engine_dir</span> <span class="o">/</span> <span class="n">file</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_build_model</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">_build_model</span><span class="p">()</span>
<span class="c1"># update the model_dir to a local dir for the runtime, such as tokenizer loading.</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">model</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span>
<span class="c1"># Tokenizer loading should be after calling model_loader(), since model_loader() may download the model from HF hub.</span>
<span class="c1"># It should also be before bindings ExecutorConfig, which may depend on tokenizer info.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_try_load_tokenizer</span><span class="p">()</span>
<span class="c1"># Multimodal special handling:</span>
<span class="c1"># 1. Default load_tokenizer may fail because MM has different tokenizer configuration. Hence we initialize it inside input processor</span>
<span class="c1"># 2. May need to modify model weights for MM (e.g., resize vocab embedding). We must do such operation via input processor&#39;s __init__</span>
<span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span> <span class="o">=</span> <span class="n">create_input_processor</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="o">.</span><span class="n">tokenizer</span>
<span class="n">max_batch_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="n">build_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="n">max_batch_size</span> <span class="o">=</span> <span class="n">max_batch_size</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="n">max_num_tokens</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span> <span class="ow">or</span> <span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">ExecutorConfig</span><span class="p">(</span>
<span class="n">max_beam_width</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">,</span>
<span class="n">scheduler_config</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">scheduler_config</span><span class="p">),</span>
<span class="n">batching_type</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batching_type</span><span class="p">)</span>
<span class="ow">or</span> <span class="n">tllm</span><span class="o">.</span><span class="n">BatchingType</span><span class="o">.</span><span class="n">INFLIGHT</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="o">=</span><span class="n">max_batch_size</span><span class="p">,</span>
<span class="n">max_num_tokens</span><span class="o">=</span><span class="n">max_num_tokens</span><span class="p">,</span>
<span class="n">gather_generation_logits</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span><span class="p">)</span>
<span class="c1"># also set executor_config.max_seq_len in TRT workflow, to deduce default max_tokens</span>
<span class="k">if</span> <span class="n">max_seq_len</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">max_seq_len</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
<span class="s2">&quot;config.json&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_seq_len</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="p">)</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s2">&quot;FORCE_DETERMINISTIC&quot;</span><span class="p">,</span> <span class="s2">&quot;0&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;1&quot;</span><span class="p">:</span>
<span class="c1"># Disable KV cache reuse for deterministic mode</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_block_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_partial_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span><span class="p">)</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">lora_plugin</span><span class="p">:</span>
<span class="n">engine_config</span> <span class="o">=</span> <span class="n">EngineConfig</span><span class="o">.</span><span class="n">from_json_file</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="o">/</span>
<span class="s2">&quot;config.json&quot;</span><span class="p">)</span>
<span class="n">lora_config</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">lora_config</span>
<span class="n">max_lora_rank</span> <span class="o">=</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">max_lora_rank</span>
<span class="n">num_lora_modules</span> <span class="o">=</span> <span class="n">engine_config</span><span class="o">.</span><span class="n">pretrained_config</span><span class="o">.</span><span class="n">num_hidden_layers</span> <span class="o">*</span> \
<span class="nb">len</span><span class="p">(</span><span class="n">lora_config</span><span class="o">.</span><span class="n">lora_target_modules</span> <span class="o">+</span> <span class="n">lora_config</span><span class="o">.</span><span class="n">missing_qkv_modules</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">PeftCacheConfig</span><span class="p">(</span>
<span class="n">num_device_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_loras</span><span class="p">,</span>
<span class="n">num_host_module_layer</span><span class="o">=</span><span class="n">max_lora_rank</span> <span class="o">*</span> <span class="n">num_lora_modules</span> <span class="o">*</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_cpu_loras</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;xgrammar&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">XGRAMMAR</span><span class="p">,</span>
<span class="o">**</span><span class="n">_xgrammar_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Unsupported guided decoding backend </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">normalize_log_probs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">normalize_log_probs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">enable_chunked_context</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">enable_chunked_prefill</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">extended_runtime_perf_knob_config</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">llm_parallel_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span>
<span class="n">return_logits</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span>
<span class="ow">or</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">build_config</span><span class="o">.</span><span class="n">gather_context_logits</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_executor_cls</span><span class="o">.</span><span class="n">create</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">executor_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">batched_logits_processor</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batched_logits_processor</span><span class="p">,</span>
<span class="n">model_world_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">,</span>
<span class="n">mpi_session</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span><span class="p">,</span>
<span class="n">reuse_mpi_comm</span><span class="o">=</span><span class="n">external_mpi_comm_available</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">),</span>
<span class="n">return_logits</span><span class="o">=</span><span class="n">return_logits</span><span class="p">,</span>
<span class="n">postproc_worker_config</span><span class="o">=</span><span class="n">PostprocWorkerConfig</span><span class="p">(</span>
<span class="n">num_postprocess_workers</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">num_postprocess_workers</span><span class="p">,</span>
<span class="n">postprocess_tokenizer_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">postprocess_tokenizer_dir</span><span class="p">,</span>
<span class="p">),</span>
<span class="n">is_llm_executor</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">lora_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">lora_config</span><span class="p">)</span>
<span class="nd">@append_docstring</span><span class="p">(</span><span class="n">TORCH_LLM_DOCSTRING</span><span class="p">)</span>
<span class="k">class</span><span class="w"> </span><span class="nc">_TorchLLM</span><span class="p">(</span><span class="n">BaseLLM</span><span class="p">):</span>
@ -1398,7 +1378,7 @@
<span class="o">**</span><span class="n">kwargs</span><span class="p">:</span> <span class="n">Any</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="c1"># TODO: deprecate backend in LLM kwargs</span>
<span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s2">&quot;backend&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="n">backend</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s2">&quot;backend&quot;</span><span class="p">,</span> <span class="s2">&quot;pytorch&quot;</span><span class="p">)</span>
<span class="c1"># Validate that users don&#39;t pass TrtLlmArgs-specific arguments</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_validate_args_for_torch_backend</span><span class="p">(</span><span class="n">kwargs</span><span class="p">)</span>
@ -1412,9 +1392,104 @@
<span class="n">dtype</span><span class="p">,</span>
<span class="n">revision</span><span class="p">,</span>
<span class="n">tokenizer_revision</span><span class="p">,</span>
<span class="n">backend</span><span class="o">=</span><span class="s1">&#39;pytorch&#39;</span><span class="p">,</span>
<span class="n">backend</span><span class="o">=</span><span class="n">backend</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_build_model</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">_build_model</span><span class="p">()</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span> <span class="ow">is</span> <span class="kc">None</span>
<span class="c1"># Tokenizer loading should be after calling model_loader(), since model_loader() may download the model from HF hub.</span>
<span class="c1"># It should also be before bindings ExecutorConfig, which may depend on tokenizer info.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_try_load_tokenizer</span><span class="p">()</span>
<span class="c1"># Multimodal special handling:</span>
<span class="c1"># 1. Default load_tokenizer may fail because MM has different tokenizer configuration. Hence we initialize it inside input processor</span>
<span class="c1"># 2. May need to modify model weights for MM (e.g., resize vocab embedding). We must do such operation via input processor&#39;s __init__</span>
<span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span> <span class="o">=</span> <span class="n">create_input_processor</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_processor</span><span class="o">.</span><span class="n">tokenizer</span>
<span class="n">max_batch_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_batch_size</span>
<span class="n">max_num_tokens</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_num_tokens</span>
<span class="n">max_seq_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">ExecutorConfig</span><span class="p">(</span>
<span class="n">max_beam_width</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span><span class="p">,</span>
<span class="n">scheduler_config</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">scheduler_config</span><span class="p">),</span>
<span class="n">batching_type</span><span class="o">=</span><span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batching_type</span><span class="p">)</span>
<span class="ow">or</span> <span class="n">tllm</span><span class="o">.</span><span class="n">BatchingType</span><span class="o">.</span><span class="n">INFLIGHT</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="o">=</span><span class="n">max_batch_size</span><span class="p">,</span>
<span class="n">max_num_tokens</span><span class="o">=</span><span class="n">max_num_tokens</span><span class="p">,</span>
<span class="n">gather_generation_logits</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="p">)</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s2">&quot;FORCE_DETERMINISTIC&quot;</span><span class="p">,</span> <span class="s2">&quot;0&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;1&quot;</span><span class="p">:</span>
<span class="c1"># Disable KV cache reuse for deterministic mode</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_block_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">kv_cache_config</span><span class="o">.</span><span class="n">enable_partial_reuse</span> <span class="o">=</span> <span class="kc">False</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">peft_cache_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">peft_cache_config</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">decoding_config</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">decoding_config</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;xgrammar&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">XGRAMMAR</span><span class="p">,</span>
<span class="o">**</span><span class="n">_xgrammar_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="o">==</span> <span class="s1">&#39;llguidance&#39;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">guided_decoding_config</span> <span class="o">=</span> <span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="p">(</span>
<span class="n">backend</span><span class="o">=</span><span class="n">tllm</span><span class="o">.</span><span class="n">GuidedDecodingConfig</span><span class="o">.</span><span class="n">GuidedDecodingBackend</span><span class="o">.</span>
<span class="n">LLGUIDANCE</span><span class="p">,</span>
<span class="o">**</span><span class="n">_llguidance_tokenizer_info</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">tokenizer</span><span class="p">))</span>
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;Unsupported guided decoding backend </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">guided_decoding_backend</span><span class="si">}</span><span class="s2">&quot;</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">normalize_log_probs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">normalize_log_probs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">enable_chunked_context</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">enable_chunked_prefill</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">max_beam_width</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_beam_width</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="o">.</span><span class="n">cache_transceiver_config</span> <span class="o">=</span> <span class="n">PybindMirror</span><span class="o">.</span><span class="n">maybe_to_pybind</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">cache_transceiver_config</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm._torch.pyexecutor.config</span><span class="w"> </span><span class="kn">import</span> <span class="n">update_executor_config</span>
<span class="n">update_executor_config</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">backend</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span><span class="p">,</span>
<span class="n">pytorch_backend_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">get_pytorch_backend_config</span><span class="p">()</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">backend</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;pytorch&quot;</span><span class="p">,</span> <span class="s2">&quot;_autodeploy&quot;</span><span class="p">]</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">mapping</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">to_mapping</span><span class="p">(),</span>
<span class="n">speculative_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">speculative_config</span><span class="p">,</span>
<span class="n">hf_model_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_hf_model_dir</span><span class="p">,</span>
<span class="n">max_input_len</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">max_input_len</span><span class="p">,</span>
<span class="n">max_seq_len</span><span class="o">=</span><span class="n">max_seq_len</span><span class="p">)</span>
<span class="c1"># TODO: revisit gather_context_logits</span>
<span class="n">return_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">gather_generation_logits</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_executor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_executor_cls</span><span class="o">.</span><span class="n">create</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_engine_dir</span><span class="p">,</span>
<span class="n">executor_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_executor_config</span><span class="p">,</span>
<span class="n">batched_logits_processor</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">batched_logits_processor</span><span class="p">,</span>
<span class="n">model_world_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">,</span>
<span class="n">mpi_session</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">mpi_session</span><span class="p">,</span>
<span class="n">reuse_mpi_comm</span><span class="o">=</span><span class="n">external_mpi_comm_available</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">parallel_config</span><span class="o">.</span><span class="n">world_size</span><span class="p">),</span>
<span class="n">return_logits</span><span class="o">=</span><span class="n">return_logits</span><span class="p">,</span>
<span class="n">postproc_worker_config</span><span class="o">=</span><span class="n">PostprocWorkerConfig</span><span class="p">(</span>
<span class="n">num_postprocess_workers</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">num_postprocess_workers</span><span class="p">,</span>
<span class="n">postprocess_tokenizer_dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">postprocess_tokenizer_dir</span><span class="p">,</span>
<span class="p">),</span>
<span class="n">is_llm_executor</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">lora_config</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span><span class="n">lora_config</span><span class="p">,</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">args</span><span class="o">.</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_validate_args_for_torch_backend</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">kwargs</span><span class="p">:</span> <span class="nb">dict</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Validate that users don&#39;t pass TrtLlmArgs-specific arguments when using PyTorch backend.</span>
<span class="sd"> &quot;&quot;&quot;</span>
@ -1579,9 +1654,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -517,7 +517,7 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">enum</span><span class="w"> </span><span class="kn">import</span> <span class="n">Enum</span><span class="p">,</span> <span class="n">EnumMeta</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">pathlib</span><span class="w"> </span><span class="kn">import</span> <span class="n">Path</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">typing</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">TYPE_CHECKING</span><span class="p">,</span> <span class="n">Any</span><span class="p">,</span> <span class="n">ClassVar</span><span class="p">,</span> <span class="n">Dict</span><span class="p">,</span> <span class="n">List</span><span class="p">,</span> <span class="n">Literal</span><span class="p">,</span> <span class="n">Optional</span><span class="p">,</span>
<span class="n">Union</span><span class="p">)</span>
<span class="n">TypeAlias</span><span class="p">,</span> <span class="n">Union</span><span class="p">)</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">torch</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">yaml</span>
@ -564,8 +564,7 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">..sampling_params</span><span class="w"> </span><span class="kn">import</span> <span class="n">BatchedLogitsProcessor</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.build_cache</span><span class="w"> </span><span class="kn">import</span> <span class="n">BuildCacheConfig</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.tokenizer</span><span class="w"> </span><span class="kn">import</span> <span class="n">TokenizerBase</span><span class="p">,</span> <span class="n">tokenizer_factory</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.utils</span><span class="w"> </span><span class="kn">import</span> <span class="p">(</span><span class="n">generate_api_docs_as_docstring</span><span class="p">,</span> <span class="n">get_type_repr</span><span class="p">,</span>
<span class="n">print_traceback_on_error</span><span class="p">)</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">.utils</span><span class="w"> </span><span class="kn">import</span> <span class="n">generate_api_docs_as_docstring</span><span class="p">,</span> <span class="n">get_type_repr</span>
<span class="c1"># TODO[chunweiy]: move the following symbols back to utils scope, and remove the following import</span>
@ -1178,6 +1177,16 @@
<span class="n">SpeculativeConfig</span><span class="p">:</span> <span class="n">TypeAlias</span> <span class="o">=</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span>
<span class="n">DraftTargetDecodingConfig</span><span class="p">,</span>
<span class="n">EagleDecodingConfig</span><span class="p">,</span>
<span class="n">LookaheadDecodingConfig</span><span class="p">,</span>
<span class="n">MedusaDecodingConfig</span><span class="p">,</span>
<span class="n">MTPDecodingConfig</span><span class="p">,</span>
<span class="n">NGramDecodingConfig</span><span class="p">,</span>
<span class="p">]]</span>
<div class="viewcode-block" id="KvCacheConfig">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.KvCacheConfig">[docs]</a>
<span class="nd">@PybindMirror</span><span class="o">.</span><span class="n">mirror_pybind_fields</span><span class="p">(</span><span class="n">_KvCacheConfig</span><span class="p">)</span>
@ -1239,6 +1248,8 @@
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Whether partially matched blocks that are in use can be reused after copying them.&quot;</span>
<span class="p">)</span>
<span class="n">use_uvm</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Whether to use UVM for the KV cache.&quot;</span><span class="p">)</span>
<span class="k">def</span><span class="w"> </span><span class="nf">_to_pybind</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="n">_KvCacheConfig</span><span class="p">(</span>
@ -1253,7 +1264,8 @@
<span class="n">secondary_offload_min_priority</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">secondary_offload_min_priority</span><span class="p">,</span>
<span class="n">event_buffer_max_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">event_buffer_max_size</span><span class="p">,</span>
<span class="n">enable_partial_reuse</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">enable_partial_reuse</span><span class="p">,</span>
<span class="n">copy_on_partial_reuse</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">copy_on_partial_reuse</span><span class="p">)</span></div>
<span class="n">copy_on_partial_reuse</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">copy_on_partial_reuse</span><span class="p">,</span>
<span class="n">use_uvm</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">use_uvm</span><span class="p">)</span></div>
@ -1467,8 +1479,11 @@
<span class="n">enable_chunked_prefill</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Enable chunked prefill.&quot;</span><span class="p">)</span>
<span class="n">guided_decoding_backend</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Guided decoding backend.&quot;</span><span class="p">)</span>
<span class="n">guided_decoding_backend</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Literal</span><span class="p">[</span><span class="s2">&quot;xgrammar&quot;</span><span class="p">,</span> <span class="s2">&quot;llguidance&quot;</span><span class="p">]]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Guided decoding backend. llguidance is supported in PyTorch backend only.&quot;</span>
<span class="p">)</span>
<span class="n">batched_logits_processor</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
@ -1496,11 +1511,8 @@
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Cache transceiver config.&quot;</span><span class="p">)</span>
<span class="c1"># Speculative decoding parameters</span>
<span class="n">speculative_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span>
<span class="n">Union</span><span class="p">[</span><span class="n">LookaheadDecodingConfig</span><span class="p">,</span> <span class="n">MedusaDecodingConfig</span><span class="p">,</span>
<span class="n">EagleDecodingConfig</span><span class="p">,</span> <span class="n">MTPDecodingConfig</span><span class="p">,</span> <span class="n">NGramDecodingConfig</span><span class="p">,</span>
<span class="n">DraftTargetDecodingConfig</span><span class="p">]]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Speculative decoding config.&quot;</span><span class="p">)</span>
<span class="n">speculative_config</span><span class="p">:</span> <span class="n">SpeculativeConfig</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">description</span><span class="o">=</span><span class="s2">&quot;Speculative decoding config.&quot;</span><span class="p">)</span>
<span class="n">batching_type</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">BatchingType</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span><span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Batching type.&quot;</span><span class="p">)</span>
@ -1542,12 +1554,6 @@
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The parser to separate reasoning content from output.&quot;</span><span class="p">)</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="mi">20000</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Threshold for Python garbage collection of generation 0 objects.&quot;</span>
<span class="s2">&quot;Lower values trigger more frequent garbage collection.&quot;</span><span class="p">)</span>
<span class="c1"># TODO[Superjomn]: To deprecate this config.</span>
<span class="n">decoding_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
@ -2229,7 +2235,6 @@
<div class="viewcode-block" id="TorchLlmArgs">
<a class="viewcode-back" href="../../../llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">TorchLlmArgs</span><span class="p">(</span><span class="n">BaseLlmArgs</span><span class="p">):</span>
<span class="c1"># Just a dummy BuildConfig to allow code reuse with the TrtLlmArgs</span>
<span class="n">build_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">object</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
@ -2239,6 +2244,12 @@
<span class="c1"># PyTorch backend specific configurations</span>
<span class="n">garbage_collection_gen0_threshold</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="mi">20000</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Threshold for Python garbage collection of generation 0 objects.&quot;</span>
<span class="s2">&quot;Lower values trigger more frequent garbage collection.&quot;</span><span class="p">)</span>
<span class="n">use_cuda_graph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
@ -2531,115 +2542,6 @@
<span class="k">class</span><span class="w"> </span><span class="nc">_AutoDeployLlmArgs</span><span class="p">(</span><span class="n">TorchLlmArgs</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;LLM arguments specifically for AutoDeploy backend.</span>
<span class="sd"> This class extends TorchLlmArgs with AutoDeploy-specific configuration options.</span>
<span class="sd"> AutoDeploy provides automatic deployment and optimization of language models</span>
<span class="sd"> with various attention backends and optimization strategies.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">model_factory</span><span class="p">:</span> <span class="n">Literal</span><span class="p">[</span>
<span class="s2">&quot;AutoModelForCausalLM&quot;</span><span class="p">,</span> <span class="s2">&quot;AutoModelForImageTextToText&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="s2">&quot;AutoModelForCausalLM&quot;</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The model factory to use for loading the model.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">model_kwargs</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default_factory</span><span class="o">=</span><span class="nb">dict</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Extra kwargs for the model config class to customize the model config. &quot;</span>
<span class="s2">&quot;These arguments take precedence over default values or config values in the model config &quot;</span>
<span class="s2">&quot;file. Arguments are resolved in order: 1) Default values in model config class, 2) Values &quot;</span>
<span class="s2">&quot;in model config file, 3) Values in model_kwargs. Note: if a kwarg doesn&#39;t exist in the &quot;</span>
<span class="s2">&quot;model config class, it will be ignored.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">mla_backend</span><span class="p">:</span> <span class="n">Literal</span><span class="p">[</span><span class="s2">&quot;MultiHeadLatentAttention&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="s2">&quot;MultiHeadLatentAttention&quot;</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The Multi-Head Latent Attention backend to use.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">skip_loading_weights</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Whether to skip loading model weights during initialization. &quot;</span>
<span class="s2">&quot;If True, only the model architecture is loaded.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">free_mem_ratio</span><span class="p">:</span> <span class="nb">float</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="mf">0.8</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;The fraction of available memory to allocate for cache. &quot;</span>
<span class="s2">&quot;Must be between 0.0 and 1.0.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">simple_shard_only</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;If True, force simple sharding (all_gather) in tensor parallelism. &quot;</span>
<span class="s2">&quot;If False, auto-detect and use column+row (all_reduce) sharding when possible.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># TODO: Remove this field once tokens_per_block is properly passed through</span>
<span class="n">attn_page_size</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="mi">64</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span>
<span class="s2">&quot;Page size for attention (tokens_per_block). For TritonWithFlattenedInputs &quot;</span>
<span class="s2">&quot;backend, this should equal max_seq_len. Temporary field until tokens_per_block gets &quot;</span>
<span class="s2">&quot;properly passed through.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">checkpoint_device</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="n">Field</span><span class="p">(</span>
<span class="n">default</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">description</span><span class="o">=</span><span class="s2">&quot;Device on which to load the model checkpoint. &quot;</span>
<span class="s2">&quot;Defaults to the same device as the rest of the pipeline.&quot;</span><span class="p">,</span>
<span class="p">)</span>
<span class="nd">@field_validator</span><span class="p">(</span><span class="s2">&quot;free_mem_ratio&quot;</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span><span class="w"> </span><span class="nf">validate_free_mem_ratio</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">v</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Validate that free_mem_ratio is between 0.0 and 1.0.&quot;&quot;&quot;</span>
<span class="k">if</span> <span class="ow">not</span> <span class="mf">0.0</span> <span class="o">&lt;=</span> <span class="n">v</span> <span class="o">&lt;=</span> <span class="mf">1.0</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="sa">f</span><span class="s2">&quot;free_mem_ratio must be between 0.0 and 1.0, got </span><span class="si">{</span><span class="n">v</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">v</span>
<span class="nd">@print_traceback_on_error</span>
<span class="k">def</span><span class="w"> </span><span class="nf">model_post_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">__context</span><span class="p">):</span>
<span class="c1"># Modify default values that differ from TorchLlmArgs</span>
<span class="n">new_defaults</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;max_batch_size&quot;</span><span class="p">:</span> <span class="mi">8</span><span class="p">,</span>
<span class="s2">&quot;max_seq_len&quot;</span><span class="p">:</span> <span class="mi">512</span><span class="p">,</span>
<span class="s2">&quot;attn_backend&quot;</span><span class="p">:</span> <span class="s2">&quot;FlashInfer&quot;</span><span class="p">,</span>
<span class="c1"># TODO: Remove this when overlap scheduler is supported (https://github.com/NVIDIA/TensorRT-LLM/issues/4364)</span>
<span class="s2">&quot;disable_overlap_scheduler&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">,</span>
<span class="p">}</span>
<span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v_default</span> <span class="ow">in</span> <span class="n">new_defaults</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="k">if</span> <span class="n">k</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">__pydantic_fields_set__</span><span class="p">:</span>
<span class="nb">setattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">k</span><span class="p">,</span> <span class="n">v_default</span><span class="p">)</span>
<span class="c1"># NOTE: Only call super() after setting the default values since default values should be</span>
<span class="c1"># set first.</span>
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">model_post_init</span><span class="p">(</span><span class="n">__context</span><span class="p">)</span>
<span class="c1"># Handle attn_page_size for TritonWithFlattenedInputs backend</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">attn_backend</span> <span class="o">==</span> <span class="s2">&quot;TritonWithFlattenedInputs&quot;</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">attn_page_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span>
<span class="c1"># Add max_position_embeddings to model_kwargs</span>
<span class="c1"># TODO (lucaslie): this is more HF specific than a generic model_kwargs. Ideally, we can</span>
<span class="c1"># move this to the HF model factory but we don&#39;t have access to max_seq_len there right now.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">model_kwargs</span><span class="p">[</span><span class="s2">&quot;max_position_embeddings&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="nb">min</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">model_kwargs</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;max_position_embeddings&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_seq_len</span><span class="p">),</span>
<span class="p">)</span>
<span class="c1"># TODO: Remove this after the PyTorch backend is fully migrated to TorchLlmArgs from ExecutorConfig</span>
<span class="k">def</span><span class="w"> </span><span class="nf">get_pytorch_backend_config</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;_AutoDeployLlmArgs&quot;</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Return the _AutoDeployLlmArgs (self) object.&quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="k">def</span><span class="w"> </span><span class="nf">update_llm_args_with_extra_dict</span><span class="p">(</span>
<span class="n">llm_args</span><span class="p">:</span> <span class="n">Dict</span><span class="p">,</span>
<span class="n">llm_args_dict</span><span class="p">:</span> <span class="n">Dict</span><span class="p">,</span>
@ -2831,9 +2733,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1149,9 +1149,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -881,9 +881,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1185,9 +1185,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -793,9 +793,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -810,9 +810,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1009,9 +1009,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -838,9 +838,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -669,9 +669,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -922,9 +922,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -820,9 +820,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -684,9 +684,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -810,9 +810,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -904,9 +904,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -986,9 +986,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1022,9 +1022,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1958,9 +1958,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -2863,9 +2863,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -745,9 +745,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -907,9 +907,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -835,9 +835,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1027,9 +1027,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -954,9 +954,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1057,9 +1057,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -683,9 +683,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -835,9 +835,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -775,9 +775,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -909,9 +909,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1257,9 +1257,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1102,9 +1102,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -742,9 +742,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -892,9 +892,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -2203,9 +2203,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1269,9 +1269,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -2664,9 +2664,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -807,9 +807,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -741,9 +741,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -809,9 +809,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -812,9 +812,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -856,9 +856,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -952,9 +952,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1255,9 +1255,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -530,7 +530,7 @@
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm._common</span><span class="w"> </span><span class="kn">import</span> <span class="n">default_net</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.bindings</span><span class="w"> </span><span class="kn">import</span> <span class="n">KVCacheType</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.functional</span><span class="w"> </span><span class="kn">import</span> <span class="n">Tensor</span><span class="p">,</span> <span class="n">cast</span><span class="p">,</span> <span class="n">categorical_sample</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.models</span><span class="w"> </span><span class="kn">import</span> <span class="n">LLaMAForCausalLM</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.models</span><span class="w"> </span><span class="kn">import</span> <span class="n">LLaMAForCausalLM</span><span class="p">,</span> <span class="n">QWenForCausalLM</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">tensorrt_llm.models.generation_mixin</span><span class="w"> </span><span class="kn">import</span> <span class="n">GenerationMixin</span>
<span class="kn">from</span><span class="w"> </span><span class="nn">..._utils</span><span class="w"> </span><span class="kn">import</span> <span class="n">pad_vocab_size</span><span class="p">,</span> <span class="n">str_dtype_to_trt</span>
@ -539,9 +539,7 @@
<span class="n">_process_logits_and_hidden_states</span><span class="p">)</span>
<div class="viewcode-block" id="ReDrafterForCausalLM">
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.ReDrafterForCausalLM">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">ReDrafterForCausalLM</span><span class="p">(</span><span class="n">LLaMAForCausalLM</span><span class="p">):</span>
<span class="k">class</span><span class="w"> </span><span class="nc">ReDrafterMixin</span><span class="p">:</span>
<span class="k">def</span><span class="w"> </span><span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config</span><span class="p">):</span>
@ -624,8 +622,6 @@
<span class="k">return</span> <span class="n">next_token</span><span class="p">,</span> <span class="n">probs</span><span class="p">,</span> <span class="n">draft_input</span>
<div class="viewcode-block" id="ReDrafterForCausalLM.forward">
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.ReDrafterForCausalLM.forward">[docs]</a>
<span class="k">def</span><span class="w"> </span><span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> 0. run base model, get logits, hidden_states</span>
@ -661,11 +657,8 @@
<span class="bp">self</span><span class="o">.</span><span class="n">drafter</span><span class="p">,</span>
<span class="n">kwargs</span><span class="o">=</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">return</span> <span class="n">new_draft_tokens</span><span class="p">,</span> <span class="n">new_draft_logits</span><span class="p">,</span> <span class="n">probs</span></div>
<span class="k">return</span> <span class="n">new_draft_tokens</span><span class="p">,</span> <span class="n">new_draft_logits</span><span class="p">,</span> <span class="n">probs</span>
<div class="viewcode-block" id="ReDrafterForCausalLM.prepare_inputs">
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.ReDrafterForCausalLM.prepare_inputs">[docs]</a>
<span class="k">def</span><span class="w"> </span><span class="nf">prepare_inputs</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Inputs needed:</span>
@ -813,8 +806,30 @@
<span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;rand_data_validation&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">rand_data_validation</span>
<span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;rand_data_sample&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">rand_data_sample</span>
<span class="n">inputs</span><span class="p">[</span><span class="s1">&#39;position_ids_base&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">position_ids_base</span>
<span class="k">return</span> <span class="n">inputs</span></div>
</div>
<span class="k">return</span> <span class="n">inputs</span>
<div class="viewcode-block" id="ReDrafterForQWenLM">
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.ReDrafterForQWenLM">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">ReDrafterForQWenLM</span><span class="p">(</span><span class="n">ReDrafterMixin</span><span class="p">,</span> <span class="n">QWenForCausalLM</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;ReDrafter implementation for QWen models.</span>
<span class="sd"> Combines:</span>
<span class="sd"> - Base QWen model functionality from QWenForCausalLM</span>
<span class="sd"> - Drafting/speculative decoding logic from ReDrafterMixin</span>
<span class="sd"> &quot;&quot;&quot;</span></div>
<div class="viewcode-block" id="ReDrafterForLLaMALM">
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.ReDrafterForLLaMALM">[docs]</a>
<span class="k">class</span><span class="w"> </span><span class="nc">ReDrafterForLLaMALM</span><span class="p">(</span><span class="n">ReDrafterMixin</span><span class="p">,</span> <span class="n">LLaMAForCausalLM</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;ReDrafter implementation for LLaMA models.</span>
<span class="sd"> Combines:</span>
<span class="sd"> - Base LLaMA model functionality from LLaMAForCausalLM</span>
<span class="sd"> - Drafting/speculative decoding logic from ReDrafterMixin</span>
<span class="sd"> &quot;&quot;&quot;</span></div>
</pre></div>
@ -927,9 +942,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1483,9 +1483,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1051,9 +1051,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1898,9 +1898,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1166,9 +1166,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -5454,9 +5454,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1113,9 +1113,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1625,9 +1625,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1835,9 +1835,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -3408,9 +3408,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -973,9 +973,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -50,7 +50,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -60,7 +60,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -531,7 +531,7 @@
<span class="sd"> regex (str, optional): The generated text is amenable to the user-specified regular expression. Defaults to None.</span>
<span class="sd"> grammar (str, optional): The generated text is amenable to the user-specified extended Backus-Naur form (EBNF) grammar. Defaults to None.</span>
<span class="sd"> json_object (bool): If True, the generated text is amenable to json format. Defaults to False.</span>
<span class="sd"> structural_tag (str, optional): The generated text is amenable to the user-specified structural tag. Defaults to None.</span>
<span class="sd"> structural_tag (str, optional): The generated text is amenable to the user-specified structural tag. Structural tag is supported by xgrammar in PyTorch backend only. Defaults to None.</span>
<span class="sd"> &quot;&quot;&quot;</span> <span class="c1"># noqa: E501</span>
<span class="n">json</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">BaseModel</span><span class="p">,</span> <span class="nb">dict</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
@ -1099,9 +1099,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -809,9 +809,9 @@ export UCX_RNDV_PIPELINE_ERROR_HANDLING=y
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -814,9 +814,9 @@ the TensorRT-LLM C++ Executor API.</p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -681,9 +681,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -999,9 +999,9 @@ is computed as:</p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1040,9 +1040,9 @@ The <code class="docutils literal notranslate"><span class="pre">GptDecoder</spa
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -860,9 +860,9 @@ techniques to optimize the underlying graph. It provides a wrapper similar to P
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -765,9 +765,9 @@ An “event” is any significant change in the lifecycle or state of a KV cache
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -738,9 +738,9 @@ Assume vocabulary size is 100, which means normal text token ids are in range [0
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -852,9 +852,9 @@ The shape of <code class="docutils literal notranslate"><span class="pre">LoraWe
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -701,9 +701,9 @@ This feature is optimized for PCIe-based GPU topologies and may affect model acc
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -656,9 +656,9 @@ Note that support for these static libraries will be gradually deprioritized in
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -840,9 +840,9 @@ However, similar to any new model, you can follow the same approach to define yo
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -689,9 +689,9 @@ python3<span class="w"> </span>examples/summarize.py<span class="w"> </span><spa
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -751,9 +751,9 @@ python<span class="w"> </span>../summarize.py<span class="w"> </span>--engine_di
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1018,9 +1018,9 @@ trtllm-build<span class="w"> </span>--checkpoint_dir<span class="w"> </span>./op
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1022,9 +1022,9 @@ srun<span class="w"> </span><span class="se">\</span>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -940,9 +940,9 @@ The support for Qwen-1 is in <code class="docutils literal notranslate"><span cl
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -669,9 +669,9 @@ Server</a> to easily create web-based services for LLMs. TensorRT-LLM supports m
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -848,9 +848,9 @@ The usage of this API looks like this:</p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -61,7 +61,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -1126,9 +1126,9 @@ For more details on <code class="docutils literal notranslate"><span class="pre"
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -807,9 +807,9 @@ ISL = Input Sequence Length
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -759,9 +759,9 @@
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -751,9 +751,9 @@ TensorRT-LLM v0.5.0, TensorRT v9.1.0.4 | H200, H100 FP8. </sub></p>
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

View File

@ -51,7 +51,7 @@
<script>
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc0';
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '1.0.0rc1';
DOCUMENTATION_OPTIONS.show_version_warning_banner =
false;
</script>
@ -63,7 +63,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<meta name="docsearch:language" content="en"/>
<meta name="docsearch:version" content="1.0.0rc0" />
<meta name="docsearch:version" content="1.0.0rc1" />
</head>
@ -718,9 +718,9 @@ ISL = Input Sequence Length
<div class="footer-item">
<div class="extra_footer">
<p>Last updated on June 21, 2025.</p>
<p>Last updated on June 29, 2025.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/ebadc13">ebadc13</a>.</p>
<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/de97799">de97799</a>.</p>
</div></div>

Some files were not shown because too many files have changed in this diff Show More