mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
859 lines
178 KiB
HTML
859 lines
178 KiB
HTML
|
||
|
||
<!DOCTYPE html>
|
||
<html class="writer-html5" lang="en" data-content_root="../">
|
||
<head>
|
||
<meta charset="utf-8" /><meta name="viewport" content="width=device-width, initial-scale=1" />
|
||
|
||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||
<title>Layers — tensorrt_llm documentation</title>
|
||
<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=80d5e7a1" />
|
||
<link rel="stylesheet" type="text/css" href="../_static/css/theme.css?v=e59714d7" />
|
||
<link rel="stylesheet" type="text/css" href="../_static/copybutton.css?v=76b2166b" />
|
||
|
||
|
||
<script src="../_static/jquery.js?v=5d32c60e"></script>
|
||
<script src="../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
||
<script src="../_static/documentation_options.js?v=5929fcd5"></script>
|
||
<script src="../_static/doctools.js?v=888ff710"></script>
|
||
<script src="../_static/sphinx_highlight.js?v=dc90522c"></script>
|
||
<script src="../_static/clipboard.min.js?v=a7894cd8"></script>
|
||
<script src="../_static/copybutton.js?v=65e89d2a"></script>
|
||
<script src="../_static/js/theme.js"></script>
|
||
<link rel="index" title="Index" href="../genindex.html" />
|
||
<link rel="search" title="Search" href="../search.html" />
|
||
<link rel="next" title="Functionals" href="tensorrt_llm.functional.html" />
|
||
<link rel="prev" title="Generate text with multiple LoRA adapters" href="../llm-api-examples/llm_multilora.html" />
|
||
</head>
|
||
|
||
<body class="wy-body-for-nav">
|
||
<div class="wy-grid-for-nav">
|
||
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
||
<div class="wy-side-scroll">
|
||
<div class="wy-side-nav-search" >
|
||
|
||
|
||
|
||
<a href="../index.html" class="icon icon-home">
|
||
tensorrt_llm
|
||
</a>
|
||
<div role="search">
|
||
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
|
||
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
||
<input type="hidden" name="check_keywords" value="yes" />
|
||
<input type="hidden" name="area" value="default" />
|
||
</form>
|
||
</div>
|
||
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
||
<p class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../overview.html">Overview</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../quick-start-guide.html">Quick Start Guide</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../key-features.html">Key Features</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../release-notes.html">Release Notes</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Installation</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/linux.html">Installing on Linux</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/windows.html">Installing on Windows</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-windows.html">Building from Source Code on Windows</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../installation/grace-hopper.html">Installing on Grace Hopper</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">LLM API</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api/index.html">API Introduction</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api/reference.html">API Reference</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">LLM API Examples</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api-examples/index.html">LLM Examples Introduction</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api-examples/customization.html">Common Customizations</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../llm-api-examples/llm_api_examples.html">Examples</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Model Definition API</span></p>
|
||
<ul class="current">
|
||
<li class="toctree-l1 current"><a class="current reference internal" href="#">Layers</a><ul>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.activation">Activation</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.activation.Mish"><code class="docutils literal notranslate"><span class="pre">Mish</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.activation.Mish.forward"><code class="docutils literal notranslate"><span class="pre">Mish.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.attention">Attention</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention"><code class="docutils literal notranslate"><span class="pre">Attention</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention.create_attention_const_params"><code class="docutils literal notranslate"><span class="pre">Attention.create_attention_const_params()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention.fill_attention_params"><code class="docutils literal notranslate"><span class="pre">Attention.fill_attention_params()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention.forward"><code class="docutils literal notranslate"><span class="pre">Attention.forward()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention.postprocess"><code class="docutils literal notranslate"><span class="pre">Attention.postprocess()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.Attention.set_rel_attn_table"><code class="docutils literal notranslate"><span class="pre">Attention.set_rel_attn_table()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionMaskParams"><code class="docutils literal notranslate"><span class="pre">AttentionMaskParams</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionParams"><code class="docutils literal notranslate"><span class="pre">AttentionParams</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_long_rope"><code class="docutils literal notranslate"><span class="pre">AttentionParams.fill_attention_const_params_for_long_rope()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_rope"><code class="docutils literal notranslate"><span class="pre">AttentionParams.fill_attention_const_params_for_rope()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionParams.is_valid"><code class="docutils literal notranslate"><span class="pre">AttentionParams.is_valid()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.AttentionParams.is_valid_cross_attn"><code class="docutils literal notranslate"><span class="pre">AttentionParams.is_valid_cross_attn()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.BertAttention"><code class="docutils literal notranslate"><span class="pre">BertAttention</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.BertAttention.forward"><code class="docutils literal notranslate"><span class="pre">BertAttention.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.BlockSparseAttnParams"><code class="docutils literal notranslate"><span class="pre">BlockSparseAttnParams</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.CogVLMAttention"><code class="docutils literal notranslate"><span class="pre">CogVLMAttention</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.CogVLMAttention.forward"><code class="docutils literal notranslate"><span class="pre">CogVLMAttention.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.DeepseekV2Attention"><code class="docutils literal notranslate"><span class="pre">DeepseekV2Attention</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.DeepseekV2Attention.forward"><code class="docutils literal notranslate"><span class="pre">DeepseekV2Attention.forward()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.DeepseekV2Attention.weight_loader"><code class="docutils literal notranslate"><span class="pre">DeepseekV2Attention.weight_loader()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.KeyValueCacheParams"><code class="docutils literal notranslate"><span class="pre">KeyValueCacheParams</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.fill_none_tensor_list"><code class="docutils literal notranslate"><span class="pre">KeyValueCacheParams.fill_none_tensor_list()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.get_first_past_key_value"><code class="docutils literal notranslate"><span class="pre">KeyValueCacheParams.get_first_past_key_value()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.is_valid"><code class="docutils literal notranslate"><span class="pre">KeyValueCacheParams.is_valid()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.MropeParams"><code class="docutils literal notranslate"><span class="pre">MropeParams</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.SpecDecodingParams"><code class="docutils literal notranslate"><span class="pre">SpecDecodingParams</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.compute_relative_bias"><code class="docutils literal notranslate"><span class="pre">compute_relative_bias()</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.attention.make_causal_mask"><code class="docutils literal notranslate"><span class="pre">make_causal_mask()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.cast">Cast</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.cast.Cast"><code class="docutils literal notranslate"><span class="pre">Cast</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.cast.Cast.forward"><code class="docutils literal notranslate"><span class="pre">Cast.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.conv">Conv</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.conv.Conv1d"><code class="docutils literal notranslate"><span class="pre">Conv1d</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.conv.Conv1d.forward"><code class="docutils literal notranslate"><span class="pre">Conv1d.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.conv.Conv2d"><code class="docutils literal notranslate"><span class="pre">Conv2d</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.conv.Conv2d.forward"><code class="docutils literal notranslate"><span class="pre">Conv2d.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.conv.ConvTranspose2d"><code class="docutils literal notranslate"><span class="pre">ConvTranspose2d</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.conv.ConvTranspose2d.forward"><code class="docutils literal notranslate"><span class="pre">ConvTranspose2d.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.embedding">Embedding</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.embedding.Embedding"><code class="docutils literal notranslate"><span class="pre">Embedding</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.embedding.Embedding.forward"><code class="docutils literal notranslate"><span class="pre">Embedding.forward()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.embedding.Embedding.postprocess"><code class="docutils literal notranslate"><span class="pre">Embedding.postprocess()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.embedding.Embedding.weight_loader"><code class="docutils literal notranslate"><span class="pre">Embedding.weight_loader()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.embedding.PromptTuningEmbedding"><code class="docutils literal notranslate"><span class="pre">PromptTuningEmbedding</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.embedding.PromptTuningEmbedding.forward"><code class="docutils literal notranslate"><span class="pre">PromptTuningEmbedding.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.linear">Linear</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.linear.ColumnLinear"><code class="docutils literal notranslate"><span class="pre">ColumnLinear</span></code></a></li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.linear.Linear"><code class="docutils literal notranslate"><span class="pre">Linear</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.Linear.collect_and_bias"><code class="docutils literal notranslate"><span class="pre">Linear.collect_and_bias()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.Linear.postprocess"><code class="docutils literal notranslate"><span class="pre">Linear.postprocess()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.Linear.tp_split_dim"><code class="docutils literal notranslate"><span class="pre">Linear.tp_split_dim()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase"><code class="docutils literal notranslate"><span class="pre">LinearBase</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.collect_and_bias"><code class="docutils literal notranslate"><span class="pre">LinearBase.collect_and_bias()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.forward"><code class="docutils literal notranslate"><span class="pre">LinearBase.forward()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.get_weight"><code class="docutils literal notranslate"><span class="pre">LinearBase.get_weight()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.multiply_and_lora"><code class="docutils literal notranslate"><span class="pre">LinearBase.multiply_and_lora()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.multiply_collect"><code class="docutils literal notranslate"><span class="pre">LinearBase.multiply_collect()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.tp_split_dim"><code class="docutils literal notranslate"><span class="pre">LinearBase.tp_split_dim()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.weight_is_kn"><code class="docutils literal notranslate"><span class="pre">LinearBase.weight_is_kn()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase.weight_loader"><code class="docutils literal notranslate"><span class="pre">LinearBase.weight_loader()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.linear.RowLinear"><code class="docutils literal notranslate"><span class="pre">RowLinear</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.RowLinear.collect_and_bias"><code class="docutils literal notranslate"><span class="pre">RowLinear.collect_and_bias()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.linear.RowLinear.tp_split_dim"><code class="docutils literal notranslate"><span class="pre">RowLinear.tp_split_dim()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.mlp">MLP</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.mlp.FusedGatedMLP"><code class="docutils literal notranslate"><span class="pre">FusedGatedMLP</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate"><code class="docutils literal notranslate"><span class="pre">FusedGatedMLP.fc_gate()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate_plugin"><code class="docutils literal notranslate"><span class="pre">FusedGatedMLP.fc_gate_plugin()</span></code></a></li>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.forward"><code class="docutils literal notranslate"><span class="pre">FusedGatedMLP.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.mlp.GatedMLP"><code class="docutils literal notranslate"><span class="pre">GatedMLP</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.mlp.GatedMLP.forward"><code class="docutils literal notranslate"><span class="pre">GatedMLP.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.mlp.MLP"><code class="docutils literal notranslate"><span class="pre">MLP</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.mlp.MLP.forward"><code class="docutils literal notranslate"><span class="pre">MLP.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.mlp.fc_gate_lora"><code class="docutils literal notranslate"><span class="pre">fc_gate_lora()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.normalization">Normalization</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.normalization.GroupNorm"><code class="docutils literal notranslate"><span class="pre">GroupNorm</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.normalization.GroupNorm.forward"><code class="docutils literal notranslate"><span class="pre">GroupNorm.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.normalization.LayerNorm"><code class="docutils literal notranslate"><span class="pre">LayerNorm</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.normalization.LayerNorm.forward"><code class="docutils literal notranslate"><span class="pre">LayerNorm.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.normalization.RmsNorm"><code class="docutils literal notranslate"><span class="pre">RmsNorm</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.normalization.RmsNorm.forward"><code class="docutils literal notranslate"><span class="pre">RmsNorm.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l2"><a class="reference internal" href="#module-tensorrt_llm.layers.pooling">Pooling</a><ul>
|
||
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.layers.pooling.AvgPool2d"><code class="docutils literal notranslate"><span class="pre">AvgPool2d</span></code></a><ul>
|
||
<li class="toctree-l4"><a class="reference internal" href="#tensorrt_llm.layers.pooling.AvgPool2d.forward"><code class="docutils literal notranslate"><span class="pre">AvgPool2d.forward()</span></code></a></li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.functional.html">Functionals</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.models.html">Models</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.plugin.html">Plugin</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.quantization.html">Quantization</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.runtime.html">Runtime</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/executor.html">Executor</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/runtime.html">Runtime</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Command-Line Reference</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-build.html">trtllm-build</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-serve.html">trtllm-serve</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Architecture</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/overview.html">TensorRT-LLM Architecture</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html">Model Definition</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#compilation">Compilation</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#runtime">Runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html#multi-gpu-and-multi-node-support">Multi-GPU and Multi-Node Support</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../architecture/add-model.html">Adding a Model</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Advanced</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/gpt-runtime.html">C++ GPT Runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/executor.html">Executor API</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/graph-rewriting.html">Graph Rewriting Module</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/inference-request.html">Inference Request</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/inference-request.html#responses">Responses</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/lora.html">Run gpt-2b + LoRA using GptManager / cpp runtime</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/kv-cache-reuse.html">KV cache reuse</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../advanced/speculative-decoding.html">Speculative Sampling</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Performance</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-overview.html">Overview</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-benchmarking.html">Benchmarking</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-best-practices.html">Best Practices</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-analysis.html">Performance Analysis</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Reference</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/troubleshooting.html">Troubleshooting</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
|
||
</ul>
|
||
<p class="caption" role="heading"><span class="caption-text">Blogs</span></p>
|
||
<ul>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
|
||
<li class="toctree-l1"><a class="reference internal" href="../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</div>
|
||
</nav>
|
||
|
||
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
||
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
||
<a href="../index.html">tensorrt_llm</a>
|
||
</nav>
|
||
|
||
<div class="wy-nav-content">
|
||
<div class="rst-content">
|
||
<div role="navigation" aria-label="Page navigation">
|
||
<ul class="wy-breadcrumbs">
|
||
<li><a href="../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
||
<li class="breadcrumb-item active">Layers</li>
|
||
<li class="wy-breadcrumbs-aside">
|
||
<a href="../_sources/python-api/tensorrt_llm.layers.rst.txt" rel="nofollow"> View page source</a>
|
||
</li>
|
||
</ul>
|
||
<hr/>
|
||
</div>
|
||
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
||
<div itemprop="articleBody">
|
||
|
||
<section id="module-tensorrt_llm">
|
||
<span id="layers"></span><h1>Layers<a class="headerlink" href="#module-tensorrt_llm" title="Link to this heading"></a></h1>
|
||
<section id="module-tensorrt_llm.layers.activation">
|
||
<span id="activation"></span><h2>Activation<a class="headerlink" href="#module-tensorrt_llm.layers.activation" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.activation.Mish">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.activation.</span></span><span class="sig-name descname"><span class="pre">Mish</span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/activation.html#Mish"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.activation.Mish" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.activation.Mish.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/activation.html#Mish.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.activation.Mish.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.attention">
|
||
<span id="attention"></span><h2>Attention<a class="headerlink" href="#module-tensorrt_llm.layers.attention" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">Attention</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">*</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">local_layer_idx</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings=1024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_layers=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_head_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">qk_layernorm=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_type=LayerNormType.LayerNorm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_share=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inner_layernorm=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask_type=AttentionMaskType.padding</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.learned_absolute</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_base=10000.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_percentage=1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rope_scaling_short_factors=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rope_scaling_long_factors=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rope_scaling_short_mscale=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rope_scaling_long_mscale=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">original_max_position_embeddings=1024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode:</span> <span class="pre">~tensorrt_llm.quantization.mode.QuantMode</span> <span class="pre">=</span> <span class="pre">QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">q_scaling=1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_attention=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">relative_attention=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_distance=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_buckets=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dense_bias=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">clip_qkv=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alibi_bias_max=8</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">skip_cross_kv=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_attn_value=0.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">block_sparse_params=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_implicit_relative_attention=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reorder=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layer_idx_in_cache_pool=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">enable_qkv=True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention.create_attention_const_params">
|
||
<em class="property"><span class="pre">static</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">create_attention_const_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_cls</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">config</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention.create_attention_const_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention.create_attention_const_params" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention.fill_attention_params">
|
||
<em class="property"><span class="pre">static</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">fill_attention_params</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model_cls</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention.fill_attention_params"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention.fill_attention_params" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_packed_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mrope_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_output</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">norm_before_bmm1</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_kv_cache_gen</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_kv_reuse</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduce_fusion_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.AllReduceFusionParams" title="tensorrt_llm.functional.AllReduceFusionParams"><span class="pre">AllReduceFusionParams</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">skip_attn</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention.postprocess">
|
||
<span class="sig-name descname"><span class="pre">postprocess</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tllm_key</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weights</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention.postprocess"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention.postprocess" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.Attention.set_rel_attn_table">
|
||
<span class="sig-name descname"><span class="pre">set_rel_attn_table</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_seq_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">precomputed_relative_attention</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#Attention.set_rel_attn_table"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.Attention.set_rel_attn_table" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionMaskParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">AttentionMaskParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">self_attention_mask</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">self_attention_packed_mask</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_attention_mask</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_attention_packed_mask</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionMaskParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionMaskParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">AttentionParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">sequence_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">context_lengths</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_context_lengths</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_context_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_request_types</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_input_lengths</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_max_input_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_runtime_perf_knobs</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_context_progress</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_long_rope">
|
||
<span class="sig-name descname"><span class="pre">fill_attention_const_params_for_long_rope</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">embed_positions_short_factors</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embed_positions_long_factors</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embed_positions_short_factors_for_attention_plugin</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embed_positions_long_factors_for_attention_plugin</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">short_mscale</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">long_mscale</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">short_inv_freq</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">long_inv_freq</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionParams.fill_attention_const_params_for_long_rope"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_long_rope" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_rope">
|
||
<span class="sig-name descname"><span class="pre">fill_attention_const_params_for_rope</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">embed_positions</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_inv_freq</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embed_positions_for_gpt_attention</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionParams.fill_attention_const_params_for_rope"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionParams.fill_attention_const_params_for_rope" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionParams.is_valid">
|
||
<span class="sig-name descname"><span class="pre">is_valid</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">gpt_attention_plugin</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">remove_input_padding</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_kv_cache</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionParams.is_valid"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionParams.is_valid" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.AttentionParams.is_valid_cross_attn">
|
||
<span class="sig-name descname"><span class="pre">is_valid_cross_attn</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">do_cross_attention</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#AttentionParams.is_valid_cross_attn"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.AttentionParams.is_valid_cross_attn" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.BertAttention">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">BertAttention</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings=1024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_layers=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_head_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">q_scaling=1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">relative_attention=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_distance=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_buckets=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#BertAttention"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.BertAttention" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.BertAttention.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">input_lengths</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_length</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#BertAttention.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.BertAttention.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.BlockSparseAttnParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">BlockSparseAttnParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">block_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">64</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">homo_head_pattern</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_local_blocks</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">16</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vertical_stride</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">8</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#BlockSparseAttnParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.BlockSparseAttnParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.CogVLMAttention">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">CogVLMAttention</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">*</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">local_layer_idx</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings=1024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask_type=AttentionMaskType.causal</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.learned_absolute</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_base=10000.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode:</span> <span class="pre">~tensorrt_llm.quantization.mode.QuantMode</span> <span class="pre">=</span> <span class="pre">QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dense_bias=None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#CogVLMAttention"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.CogVLMAttention" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.attention.Attention" title="tensorrt_llm.layers.attention.Attention"><code class="xref py py-class docutils literal notranslate"><span class="pre">Attention</span></code></a></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.CogVLMAttention.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vision_token_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#CogVLMAttention.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.CogVLMAttention.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.DeepseekV2Attention">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">DeepseekV2Attention</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">*</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">local_layer_idx</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">q_lora_rank</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_lora_rank</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">qk_nope_head_dim=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">qk_rope_head_dim=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">v_head_dim=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-06</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask_type=AttentionMaskType.causal</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.learned_absolute</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings=1024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_base=10000.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_beta_fast=32</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_beta_slow=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_mscale=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_mscale_all_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_origin_max_position=4096</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode:</span> <span class="pre">~tensorrt_llm.quantization.mode.QuantMode</span> <span class="pre">=</span> <span class="pre">QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#DeepseekV2Attention"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.DeepseekV2Attention" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.attention.Attention" title="tensorrt_llm.layers.attention.Attention"><code class="xref py py-class docutils literal notranslate"><span class="pre">Attention</span></code></a></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.DeepseekV2Attention.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#DeepseekV2Attention.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.DeepseekV2Attention.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.DeepseekV2Attention.weight_loader">
|
||
<span class="sig-name descname"><span class="pre">weight_loader</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mapping</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Mapping</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">param</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Parameter</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loaded_weight</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tensor</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#DeepseekV2Attention.weight_loader"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.DeepseekV2Attention.weight_loader" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.KeyValueCacheParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">KeyValueCacheParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">past_key_value</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">List</span><span class="p"><span class="pre">[</span></span><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_past_key_value_lengths</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_max_attention_window_sizes</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_sink_token_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_block_offsets</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_kv_cache_block_offsets</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_kv_cache_pool_pointers</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_kv_cache_pool_mapping</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cache_indirection</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">past_key_value_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">cross_kv_cache_block_offsets</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_cross_kv_cache_block_offsets</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_cross_kv_cache_pool_pointers</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">host_cross_kv_cache_pool_mapping</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#KeyValueCacheParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.KeyValueCacheParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.KeyValueCacheParams.fill_none_tensor_list">
|
||
<span class="sig-name descname"><span class="pre">fill_none_tensor_list</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">list_size</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#KeyValueCacheParams.fill_none_tensor_list"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.fill_none_tensor_list" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.KeyValueCacheParams.get_first_past_key_value">
|
||
<span class="sig-name descname"><span class="pre">get_first_past_key_value</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#KeyValueCacheParams.get_first_past_key_value"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.get_first_past_key_value" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.KeyValueCacheParams.is_valid">
|
||
<span class="sig-name descname"><span class="pre">is_valid</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">gpt_attention_plugin</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#KeyValueCacheParams.is_valid"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.KeyValueCacheParams.is_valid" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.MropeParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">MropeParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mrope_rotary_sin_cos</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mrope_position_deltas</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#MropeParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.MropeParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.SpecDecodingParams">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">SpecDecodingParams</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">spec_decoding_is_generation_length_variable</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_max_generation_length</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_generation_lengths</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_position_offsets</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">spec_decoding_packed_mask</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#SpecDecodingParams"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.SpecDecodingParams" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
|
||
</dd></dl>
|
||
|
||
<dl class="py function">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.compute_relative_bias">
|
||
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">compute_relative_bias</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">query_length</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">key_length</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_buckets</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_distance</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bidirectional</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rel_attn_table</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#compute_relative_bias"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.compute_relative_bias" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py function">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.attention.make_causal_mask">
|
||
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.attention.</span></span><span class="sig-name descname"><span class="pre">make_causal_mask</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">bsz</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tgt_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">past_key_values_length</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/attention.html#make_causal_mask"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.attention.make_causal_mask" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.cast">
|
||
<span id="cast"></span><h2>Cast<a class="headerlink" href="#module-tensorrt_llm.layers.cast" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.cast.Cast">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.cast.</span></span><span class="sig-name descname"><span class="pre">Cast</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">output_dtype</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'float32'</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/cast.html#Cast"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.cast.Cast" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.cast.Cast.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/cast.html#Cast.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.cast.Cast.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.conv">
|
||
<span id="conv"></span><h2>Conv<a class="headerlink" href="#module-tensorrt_llm.layers.conv" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.Conv1d">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.conv.</span></span><span class="sig-name descname"><span class="pre">Conv1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">in_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding_mode</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'zeros'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#Conv1d"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.Conv1d" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.Conv1d.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#Conv1d.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.Conv1d.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.Conv2d">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.conv.</span></span><span class="sig-name descname"><span class="pre">Conv2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">in_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding_mode</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'zeros'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#Conv2d"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.Conv2d" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.Conv2d.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#Conv2d.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.Conv2d.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.ConvTranspose2d">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.conv.</span></span><span class="sig-name descname"><span class="pre">ConvTranspose2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">in_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_channels</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_padding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">,</span></span><span class="w"> </span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding_mode</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">'zeros'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#ConvTranspose2d"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.ConvTranspose2d" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.conv.ConvTranspose2d.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/conv.html#ConvTranspose2d.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.conv.ConvTranspose2d.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.embedding">
|
||
<span id="embedding"></span><h2>Embedding<a class="headerlink" href="#module-tensorrt_llm.layers.embedding" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.Embedding">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.embedding.</span></span><span class="sig-name descname"><span class="pre">Embedding</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_embeddings</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_dim</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">list</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">sharding_dim</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#Embedding"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.Embedding" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<p>The embedding layer takes input indices (x) and the embedding lookup table (weight) as input.
|
||
And output the corresponding embeddings according to input indices.
|
||
The size of weight is [num_embeddings, embedding_dim]</p>
|
||
<p>Four parameters (tp_size, tp_group, sharding_dim, tp_rank) are involved in tensor parallelism.
|
||
Only when “tp_size > 1 and tp_group is not None”, tensor parallelism is enabled.</p>
|
||
<blockquote>
|
||
<div><dl class="simple">
|
||
<dt>When “sharding_dim == 0”, the weight is shared in the vocabulary dimension.</dt><dd><p>tp_rank must be set when sharding_dim == 0.</p>
|
||
</dd>
|
||
</dl>
|
||
<p>When “sharding_dim == 1”, the weight is shard in the hidden dimension.</p>
|
||
</div></blockquote>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.Embedding.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#Embedding.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.Embedding.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.Embedding.postprocess">
|
||
<span class="sig-name descname"><span class="pre">postprocess</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tllm_key</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weights</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#Embedding.postprocess"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.Embedding.postprocess" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.Embedding.weight_loader">
|
||
<span class="sig-name descname"><span class="pre">weight_loader</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mapping</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Mapping</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">param</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Parameter</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loaded_weight</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tensor</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#Embedding.weight_loader"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.Embedding.weight_loader" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.PromptTuningEmbedding">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.embedding.</span></span><span class="sig-name descname"><span class="pre">PromptTuningEmbedding</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_dim</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">sharding_dim</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_rank</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#PromptTuningEmbedding"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.PromptTuningEmbedding" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.embedding.Embedding" title="tensorrt_llm.layers.embedding.Embedding"><code class="xref py py-class docutils literal notranslate"><span class="pre">Embedding</span></code></a></p>
|
||
<p>PromptTuningEmbedding handles fine-tuned prompts with virtual tokens. At runtime,
|
||
a supplementary embedding dictionary is passed. Tokens whose ids are >= vocab_size are embedded
|
||
with that additional dictionary.
|
||
The prompt tuning dictionary holds multiple tasks, and each sequence is assigned a given task.
|
||
Prompt-tuned tokens from a given sequence use the adequate task dictionary, as defined by the <cite>tasks</cite> input.</p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.embedding.PromptTuningEmbedding.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tasks</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">task_vocab_size</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/embedding.html#PromptTuningEmbedding.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.embedding.PromptTuningEmbedding.forward" title="Link to this definition"></a></dt>
|
||
<dd><blockquote>
|
||
<div><p>Pass all tokens through both normal and prompt embedding tables.
|
||
Tokens are masked so that “normal” embedding only see “normal” tokens. Same logic for “prompt” embedding.
|
||
After those two embedding, combine results based on whether the token was “normal” or “prompt-tuned”.</p>
|
||
</div></blockquote>
|
||
<dl class="field-list simple">
|
||
<dt class="field-odd">Parameters<span class="colon">:</span></dt>
|
||
<dd class="field-odd"><ul class="simple">
|
||
<li><p><strong>tokens</strong> – Tensor
|
||
the ids to embed, size [batch_size, seq_len]</p></li>
|
||
<li><p><strong>prompt_embedding_table</strong> – Tensor
|
||
the additional embedding table for prompt-tuned tokens, size [num_tasks * num_tokens_per_task, hidden_size]</p></li>
|
||
<li><p><strong>tasks</strong> – Tensor
|
||
the task required by each token, size [batch_size, seq_len]</p></li>
|
||
<li><p><strong>task_vocab_size</strong> – Tensor
|
||
the number of tokens used for each task, should be equal to prompt_embedding_table’s num_tokens_per_task, size [1]</p></li>
|
||
</ul>
|
||
</dd>
|
||
<dt class="field-even">Returns<span class="colon">:</span></dt>
|
||
<dd class="field-even"><p>Tokens’ embedding</p>
|
||
</dd>
|
||
</dl>
|
||
</dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.linear">
|
||
<span id="linear"></span><h2>Linear<a class="headerlink" href="#module-tensorrt_llm.layers.linear" title="Link to this heading"></a></h2>
|
||
<dl class="py attribute">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.ColumnLinear">
|
||
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.linear.</span></span><span class="sig-name descname"><span class="pre">ColumnLinear</span></span><a class="headerlink" href="#tensorrt_llm.layers.linear.ColumnLinear" title="Link to this definition"></a></dt>
|
||
<dd><p>alias of <a class="reference internal" href="#tensorrt_llm.layers.linear.Linear" title="tensorrt_llm.layers.linear.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a></p>
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.Linear">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.linear.</span></span><span class="sig-name descname"><span class="pre">Linear</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">in_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gather_output</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">share_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strict_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_lda</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prefer_managed_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_qkv</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#Linear"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.Linear" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase" title="tensorrt_llm.layers.linear.LinearBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearBase</span></code></a></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.Linear.collect_and_bias">
|
||
<span class="sig-name descname"><span class="pre">collect_and_bias</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#Linear.collect_and_bias"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.Linear.collect_and_bias" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.Linear.postprocess">
|
||
<span class="sig-name descname"><span class="pre">postprocess</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tllm_key</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weights</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#Linear.postprocess"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.Linear.postprocess" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.Linear.tp_split_dim">
|
||
<em class="property"><span class="pre">classmethod</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">tp_split_dim</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">int</span></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#Linear.tp_split_dim"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.Linear.tp_split_dim" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.linear.</span></span><span class="sig-name descname"><span class="pre">LinearBase</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">local_in_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">local_out_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">share_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strict_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_lda</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prefer_managed_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.collect_and_bias">
|
||
<em class="property"><span class="pre">abstract</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">collect_and_bias</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.collect_and_bias"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.collect_and_bias" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_runtime_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">LoraRuntimeParams</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_hidden_state</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.get_weight">
|
||
<span class="sig-name descname"><span class="pre">get_weight</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.get_weight"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.get_weight" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.multiply_and_lora">
|
||
<span class="sig-name descname"><span class="pre">multiply_and_lora</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gemm_plugin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">low_latency_gemm_plugin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_fp8</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_runtime_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">LoraRuntimeParams</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_hidden_state</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.multiply_and_lora"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.multiply_and_lora" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.multiply_collect">
|
||
<span class="sig-name descname"><span class="pre">multiply_collect</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gemm_plugin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">low_latency_gemm_plugin</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">str</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_fp8</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">ndarray</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_runtime_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">LoraRuntimeParams</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_hidden_state</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.multiply_collect"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.multiply_collect" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.tp_split_dim">
|
||
<em class="property"><span class="pre">abstract</span><span class="w"> </span><span class="pre">classmethod</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">tp_split_dim</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">int</span></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.tp_split_dim"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.tp_split_dim" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.weight_is_kn">
|
||
<span class="sig-name descname"><span class="pre">weight_is_kn</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.weight_is_kn"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.weight_is_kn" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.LinearBase.weight_loader">
|
||
<span class="sig-name descname"><span class="pre">weight_loader</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">mapping</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Mapping</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">param</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Parameter</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">loaded_weight</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tensor</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">None</span></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#LinearBase.weight_loader"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.LinearBase.weight_loader" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.RowLinear">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.linear.</span></span><span class="sig-name descname"><span class="pre">RowLinear</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">in_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_features</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strict_dtype</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_lda</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prefer_managed_weight</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_expert</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#RowLinear"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.RowLinear" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.linear.LinearBase" title="tensorrt_llm.layers.linear.LinearBase"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearBase</span></code></a></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.RowLinear.collect_and_bias">
|
||
<span class="sig-name descname"><span class="pre">collect_and_bias</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="o"><span class="pre">**</span></span><span class="n"><span class="pre">kwargs</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#RowLinear.collect_and_bias"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.RowLinear.collect_and_bias" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.linear.RowLinear.tp_split_dim">
|
||
<em class="property"><span class="pre">classmethod</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">tp_split_dim</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">→</span> <span class="sig-return-typehint"><span class="pre">int</span></span></span><a class="reference internal" href="../_modules/tensorrt_llm/layers/linear.html#RowLinear.tp_split_dim"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.linear.RowLinear.tp_split_dim" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.mlp">
|
||
<span id="mlp"></span><h2>MLP<a class="headerlink" href="#module-tensorrt_llm.layers.mlp" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.FusedGatedMLP">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.mlp.</span></span><span class="sig-name descname"><span class="pre">FusedGatedMLP</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inner_layernorm=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_expert=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#FusedGatedMLP"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.FusedGatedMLP" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate">
|
||
<span class="sig-name descname"><span class="pre">fc_gate</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#FusedGatedMLP.fc_gate"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate_plugin">
|
||
<span class="sig-name descname"><span class="pre">fc_gate_plugin</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#FusedGatedMLP.fc_gate_plugin"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.fc_gate_plugin" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.FusedGatedMLP.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduce_fusion_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.AllReduceFusionParams" title="tensorrt_llm.functional.AllReduceFusionParams"><span class="pre">AllReduceFusionParams</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#FusedGatedMLP.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.FusedGatedMLP.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.GatedMLP">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.mlp.</span></span><span class="sig-name descname"><span class="pre">GatedMLP</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inner_layernorm=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_expert=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#GatedMLP"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.GatedMLP" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.layers.mlp.MLP" title="tensorrt_llm.layers.mlp.MLP"><code class="xref py py-class docutils literal notranslate"><span class="pre">MLP</span></code></a></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.GatedMLP.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduce_fusion_params</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.AllReduceFusionParams" title="tensorrt_llm.functional.AllReduceFusionParams"><span class="pre">AllReduceFusionParams</span></a><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#GatedMLP.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.GatedMLP.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.MLP">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.mlp.</span></span><span class="sig-name descname"><span class="pre">MLP</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size=1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inner_layernorm=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_expert=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#MLP"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.MLP" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.MLP.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gegelu_limit</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#MLP.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.MLP.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py function">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.mlp.fc_gate_lora">
|
||
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.mlp.</span></span><span class="sig-name descname"><span class="pre">fc_gate_lora</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">lora_layer_params</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/mlp.html#fc_gate_lora"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.mlp.fc_gate_lora" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.normalization">
|
||
<span id="normalization"></span><h2>Normalization<a class="headerlink" href="#module-tensorrt_llm.layers.normalization" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.GroupNorm">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.normalization.</span></span><span class="sig-name descname"><span class="pre">GroupNorm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_groups</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_channels</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">affine</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#GroupNorm"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.GroupNorm" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.GroupNorm.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#GroupNorm.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.GroupNorm.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.LayerNorm">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.normalization.</span></span><span class="sig-name descname"><span class="pre">LayerNorm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">normalized_shape</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">elementwise_affine</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_dim</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#LayerNorm"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.LayerNorm" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.LayerNorm.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">normalized_shape</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#LayerNorm.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.LayerNorm.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.RmsNorm">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.normalization.</span></span><span class="sig-name descname"><span class="pre">RmsNorm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">normalized_shape</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-06</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">elementwise_affine</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#RmsNorm"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.RmsNorm" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.normalization.RmsNorm.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">normalized_shape</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/normalization.html#RmsNorm.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.normalization.RmsNorm.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
<section id="module-tensorrt_llm.layers.pooling">
|
||
<span id="pooling"></span><h2>Pooling<a class="headerlink" href="#module-tensorrt_llm.layers.pooling" title="Link to this heading"></a></h2>
|
||
<dl class="py class">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.pooling.AvgPool2d">
|
||
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.layers.pooling.</span></span><span class="sig-name descname"><span class="pre">AvgPool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">]</span></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">]</span></span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">Tuple</span><span class="p"><span class="pre">[</span></span><span class="pre">int</span><span class="p"><span class="pre">]</span></span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">count_include_pad</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/pooling.html#AvgPool2d"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.pooling.AvgPool2d" title="Link to this definition"></a></dt>
|
||
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
||
<dl class="py method">
|
||
<dt class="sig sig-object py" id="tensorrt_llm.layers.pooling.AvgPool2d.forward">
|
||
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/layers/pooling.html#AvgPool2d.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.layers.pooling.AvgPool2d.forward" title="Link to this definition"></a></dt>
|
||
<dd></dd></dl>
|
||
|
||
</dd></dl>
|
||
|
||
</section>
|
||
</section>
|
||
|
||
|
||
</div>
|
||
</div>
|
||
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
||
<a href="../llm-api-examples/llm_multilora.html" class="btn btn-neutral float-left" title="Generate text with multiple LoRA adapters" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
||
<a href="tensorrt_llm.functional.html" class="btn btn-neutral float-right" title="Functionals" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
||
</div>
|
||
|
||
<hr/>
|
||
|
||
<div role="contentinfo">
|
||
<jinja2.runtime.BlockReference object at 0x7f9458605c10>
|
||
|
||
<div class="footer">
|
||
<p>
|
||
Copyright © 2024 NVIDIA Corporation
|
||
</p>
|
||
<p>
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-policy/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Privacy Policy</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-center/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Manage My Privacy</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/preferences/start/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Do Not Sell or Share My Data</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/terms-of-service/" target="_blank"
|
||
rel="noopener" data-cms-ai="0">Terms of Service</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/accessibility/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Accessibility</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/company-policies/" target="_blank"
|
||
rel="noopener" data-cms-ai="0">Corporate Policies</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/product-security/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Product Security</a> |
|
||
<a class="Link" href="https://www.nvidia.com/en-us/contact/" target="_blank" rel="noopener"
|
||
data-cms-ai="0">Contact</a>
|
||
</p>
|
||
</div>
|
||
|
||
|
||
</div>
|
||
|
||
|
||
|
||
</footer>
|
||
</div>
|
||
</div>
|
||
</section>
|
||
</div>
|
||
<script>
|
||
jQuery(function () {
|
||
SphinxRtdTheme.Navigation.enable(true);
|
||
});
|
||
</script>
|
||
|
||
</body>
|
||
</html> |