mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
610 lines
152 KiB
HTML
610 lines
152 KiB
HTML
<!DOCTYPE html>
|
|
<html class="writer-html5" lang="en" >
|
|
<head>
|
|
<meta charset="utf-8" /><meta name="generator" content="Docutils 0.18.1: http://docutils.sourceforge.net/" />
|
|
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
<title>Models — tensorrt_llm documentation</title>
|
|
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
|
|
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
|
|
<!--[if lt IE 9]>
|
|
<script src="../_static/js/html5shiv.min.js"></script>
|
|
<![endif]-->
|
|
|
|
<script src="../_static/jquery.js?v=5d32c60e"></script>
|
|
<script src="../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
|
<script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js?v=b3ba4146"></script>
|
|
<script src="../_static/doctools.js?v=888ff710"></script>
|
|
<script src="../_static/sphinx_highlight.js?v=4825356b"></script>
|
|
<script src="../_static/js/theme.js"></script>
|
|
<link rel="index" title="Index" href="../genindex.html" />
|
|
<link rel="search" title="Search" href="../search.html" />
|
|
<link rel="next" title="Plugin" href="tensorrt_llm.plugin.html" />
|
|
<link rel="prev" title="Functionals" href="tensorrt_llm.functional.html" />
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav">
|
|
<div class="wy-grid-for-nav">
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
|
<div class="wy-side-scroll">
|
|
<div class="wy-side-nav-search" >
|
|
|
|
|
|
|
|
<a href="../index.html" class="icon icon-home">
|
|
tensorrt_llm
|
|
</a>
|
|
<div role="search">
|
|
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
|
<input type="hidden" name="check_keywords" value="yes" />
|
|
<input type="hidden" name="area" value="default" />
|
|
</form>
|
|
</div>
|
|
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
|
<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture.html">TensorRT-LLM Architecture</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../gpt_runtime.html">C++ GPT Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../batch_manager.html">The Batch Manager in TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../gpt_attention.html">Multi-head, Multi-query and Group-query Attention</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../precision.html">Numerical Precision</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../performance.html">Performance of TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../installation.html">Build From Sources</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../2023-05-19-how-to-debug.html">How to debug</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../2023-05-17-how-to-add-a-new-model.html">How to add a new model</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../graph-rewriting.html">Graph Rewriting Module</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Python API</span></p>
|
|
<ul class="current">
|
|
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.layers.html">Layers</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.functional.html">Functionals</a></li>
|
|
<li class="toctree-l1 current"><a class="current reference internal" href="#">Models</a><ul>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.BaichuanForCausalLM"><code class="docutils literal notranslate"><span class="pre">BaichuanForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BaichuanForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">BaichuanForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BaichuanForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">BaichuanForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.BertForQuestionAnswering"><code class="docutils literal notranslate"><span class="pre">BertForQuestionAnswering</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BertForQuestionAnswering.forward"><code class="docutils literal notranslate"><span class="pre">BertForQuestionAnswering.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.BertModel"><code class="docutils literal notranslate"><span class="pre">BertModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BertModel.forward"><code class="docutils literal notranslate"><span class="pre">BertModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.BloomForCausalLM"><code class="docutils literal notranslate"><span class="pre">BloomForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BloomForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">BloomForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BloomForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">BloomForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.BloomModel"><code class="docutils literal notranslate"><span class="pre">BloomModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.BloomModel.forward"><code class="docutils literal notranslate"><span class="pre">BloomModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM2HeadModel"><code class="docutils literal notranslate"><span class="pre">ChatGLM2HeadModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM2HeadModel.forward"><code class="docutils literal notranslate"><span class="pre">ChatGLM2HeadModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM2HeadModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">ChatGLM2HeadModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM2Model"><code class="docutils literal notranslate"><span class="pre">ChatGLM2Model</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM2Model.forward"><code class="docutils literal notranslate"><span class="pre">ChatGLM2Model.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BHeadModel"><code class="docutils literal notranslate"><span class="pre">ChatGLM6BHeadModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BHeadModel.forward"><code class="docutils literal notranslate"><span class="pre">ChatGLM6BHeadModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BHeadModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">ChatGLM6BHeadModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BModel"><code class="docutils literal notranslate"><span class="pre">ChatGLM6BModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BModel.forward"><code class="docutils literal notranslate"><span class="pre">ChatGLM6BModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.DecoderModel"><code class="docutils literal notranslate"><span class="pre">DecoderModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.DecoderModel.forward"><code class="docutils literal notranslate"><span class="pre">DecoderModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.DecoderModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">DecoderModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.EncoderModel"><code class="docutils literal notranslate"><span class="pre">EncoderModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.EncoderModel.forward"><code class="docutils literal notranslate"><span class="pre">EncoderModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.EncoderModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">EncoderModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.FalconForCausalLM"><code class="docutils literal notranslate"><span class="pre">FalconForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.FalconForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">FalconForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.FalconForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">FalconForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.FalconModel"><code class="docutils literal notranslate"><span class="pre">FalconModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.FalconModel.forward"><code class="docutils literal notranslate"><span class="pre">FalconModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTJForCausalLM"><code class="docutils literal notranslate"><span class="pre">GPTJForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTJForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">GPTJForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTJForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">GPTJForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTJModel"><code class="docutils literal notranslate"><span class="pre">GPTJModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTJModel.forward"><code class="docutils literal notranslate"><span class="pre">GPTJModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTLMHeadModel"><code class="docutils literal notranslate"><span class="pre">GPTLMHeadModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTLMHeadModel.forward"><code class="docutils literal notranslate"><span class="pre">GPTLMHeadModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTLMHeadModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">GPTLMHeadModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTModel"><code class="docutils literal notranslate"><span class="pre">GPTModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTModel.forward"><code class="docutils literal notranslate"><span class="pre">GPTModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTNeoXForCausalLM"><code class="docutils literal notranslate"><span class="pre">GPTNeoXForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTNeoXForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">GPTNeoXForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTNeoXForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">GPTNeoXForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.GPTNeoXModel"><code class="docutils literal notranslate"><span class="pre">GPTNeoXModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.GPTNeoXModel.forward"><code class="docutils literal notranslate"><span class="pre">GPTNeoXModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.LLaMAForCausalLM"><code class="docutils literal notranslate"><span class="pre">LLaMAForCausalLM</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.LLaMAForCausalLM.forward"><code class="docutils literal notranslate"><span class="pre">LLaMAForCausalLM.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.LLaMAForCausalLM.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">LLaMAForCausalLM.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.LLaMAModel"><code class="docutils literal notranslate"><span class="pre">LLaMAModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.LLaMAModel.forward"><code class="docutils literal notranslate"><span class="pre">LLaMAModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.OPTLMHeadModel"><code class="docutils literal notranslate"><span class="pre">OPTLMHeadModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.OPTLMHeadModel.forward"><code class="docutils literal notranslate"><span class="pre">OPTLMHeadModel.forward()</span></code></a></li>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.OPTLMHeadModel.prepare_inputs"><code class="docutils literal notranslate"><span class="pre">OPTLMHeadModel.prepare_inputs()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.OPTModel"><code class="docutils literal notranslate"><span class="pre">OPTModel</span></code></a><ul>
|
|
<li class="toctree-l3"><a class="reference internal" href="#tensorrt_llm.models.OPTModel.forward"><code class="docutils literal notranslate"><span class="pre">OPTModel.forward()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.fp8_quantize"><code class="docutils literal notranslate"><span class="pre">fp8_quantize()</span></code></a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.smooth_quantize"><code class="docutils literal notranslate"><span class="pre">smooth_quantize()</span></code></a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.weight_only_groupwise_quantize"><code class="docutils literal notranslate"><span class="pre">weight_only_groupwise_quantize()</span></code></a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="#tensorrt_llm.models.weight_only_quantize"><code class="docutils literal notranslate"><span class="pre">weight_only_quantize()</span></code></a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.plugin.html">Plugin</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.quantization.html">Qunatization</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="tensorrt_llm.runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/runtime.html">Runtime</a></li>
|
|
</ul>
|
|
|
|
</div>
|
|
</div>
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../index.html">tensorrt_llm</a>
|
|
</nav>
|
|
|
|
<div class="wy-nav-content">
|
|
<div class="rst-content">
|
|
<div role="navigation" aria-label="Page navigation">
|
|
<ul class="wy-breadcrumbs">
|
|
<li><a href="../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
|
<li class="breadcrumb-item active">Models</li>
|
|
<li class="wy-breadcrumbs-aside">
|
|
<a href="../_sources/python-api/tensorrt_llm.models.rst.txt" rel="nofollow"> View page source</a>
|
|
</li>
|
|
</ul>
|
|
<hr/>
|
|
</div>
|
|
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
|
<div itemprop="articleBody">
|
|
|
|
<section id="module-tensorrt_llm">
|
|
<span id="models"></span><h1>Models<a class="headerlink" href="#module-tensorrt_llm" title="Permalink to this heading"></a></h1>
|
|
<span class="target" id="module-tensorrt_llm.models"></span><dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BaichuanForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">BaichuanForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/baichuan/model.html#BaichuanForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BaichuanForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">BaichuanModel</span></code>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BaichuanForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/baichuan/model.html#BaichuanForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BaichuanForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BaichuanForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_num_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/baichuan/model.html#BaichuanForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BaichuanForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BertForQuestionAnswering">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">BertForQuestionAnswering</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">type_vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_labels=2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bert/model.html#BertForQuestionAnswering"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BertForQuestionAnswering" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BertForQuestionAnswering.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">input_lengths</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">token_type_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bert/model.html#BertForQuestionAnswering.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BertForQuestionAnswering.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BertModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">BertModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">type_vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bert/model.html#BertModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BertModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BertModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">input_lengths</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">token_type_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bert/model.html#BertModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BertModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BloomForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">BloomForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act='gelu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_mode=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">share_embedding_table=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bloom/model.html#BloomForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BloomForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.BloomModel" title="tensorrt_llm.models.bloom.model.BloomModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">BloomModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BloomForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bloom/model.html#BloomForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BloomForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BloomForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bloom/model.html#BloomForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BloomForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BloomModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">BloomModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_mode=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bloom/model.html#BloomModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BloomModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.BloomModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/bloom/model.html#BloomModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.BloomModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM2HeadModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">ChatGLM2HeadModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_channels=128</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_group_num=2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask_type=AttentionMaskType.causal</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">qkv_bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">linear_bias=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_int8_kv_cache=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hiden_size=13696</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_layers=28</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">act_func='swiglu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=<DataType.HALF:</span> <span class="pre">1></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_seq_length=32768</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size=65024</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_block_pointers=None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm2_6b/model.html#ChatGLM2HeadModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM2HeadModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.ChatGLM2Model" title="tensorrt_llm.models.chatglm2_6b.model.ChatGLM2Model"><code class="xref py py-class docutils literal notranslate"><span class="pre">ChatGLM2Model</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM2HeadModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm2_6b/model.html#ChatGLM2HeadModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM2HeadModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM2HeadModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm2_6b/model.html#ChatGLM2HeadModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM2HeadModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM2Model">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">ChatGLM2Model</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_attention_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_channels=128</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_group_num=2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask_type=AttentionMaskType.causal</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">qkv_bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">linear_bias=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_int8_kv_cache=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hiden_size=13696</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_layers=28</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps=1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">act_func='swiglu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=<DataType.HALF:</span> <span class="pre">1></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_seq_length=32768</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size=65024</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm2_6b/model.html#ChatGLM2Model"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM2Model" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM2Model.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm2_6b/model.html#ChatGLM2Model.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM2Model.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM6BHeadModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">ChatGLM6BHeadModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inter_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm6b/model.html#ChatGLM6BHeadModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM6BHeadModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.ChatGLM6BModel" title="tensorrt_llm.models.chatglm6b.model.ChatGLM6BModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">ChatGLM6BModel</span></code></a></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM6BHeadModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm6b/model.html#ChatGLM6BHeadModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM6BHeadModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM6BHeadModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm6b/model.html#ChatGLM6BHeadModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM6BHeadModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM6BModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">ChatGLM6BModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inter_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm6b/model.html#ChatGLM6BModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM6BModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.ChatGLM6BModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/chatglm6b/model.html#ChatGLM6BModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.ChatGLM6BModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.DecoderModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">DecoderModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">logits_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'float32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_position_embedding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">relative_attention</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_distance</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_buckets</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">type_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_embedding_layernorm</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_embedding_scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">q_scaling</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_attention_qkvo_bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_mlp_bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_model_final_layernorm</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_position</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">LayerNormPositionType.pre_layernorm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_type</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">LayerNormType.LayerNorm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'relu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_lm_head_bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">residual_scaling</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1.0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#DecoderModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.DecoderModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.DecoderModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">decoder_input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">encoder_output</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">token_type_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#DecoderModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.DecoderModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.DecoderModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_encoder_input_len</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#DecoderModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.DecoderModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.EncoderModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">EncoderModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ffn_hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_position_embedding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">relative_attention</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_distance</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_buckets</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">type_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_embedding_layernorm</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_embedding_scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">q_scaling</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_attention_qkvo_bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_mlp_bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">has_model_final_layernorm</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_eps</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_position</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">LayerNormPositionType.pre_layernorm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layernorm_type</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">LayerNormType.LayerNorm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'relu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_group</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tp_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">residual_scaling</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1.0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#EncoderModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.EncoderModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.EncoderModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">input_lengths</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">token_type_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#EncoderModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.EncoderModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.EncoderModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/enc_dec/model.html#EncoderModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.EncoderModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.FalconForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">FalconForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act:</span> <span class="pre">str</span> <span class="pre">=</span> <span class="pre">'gelu'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype:</span> <span class="pre">str</span> <span class="pre">|</span> <span class="pre">~tensorrt.tensorrt.DataType</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads:</span> <span class="pre">int</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size:</span> <span class="pre">int</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode:</span> <span class="pre">~tensorrt_llm.quantization.mode.QuantMode</span> <span class="pre">=</span> <span class="pre">QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_alibi:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_attention:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">new_decoder_architecture:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">logits_dtype:</span> <span class="pre">str</span> <span class="pre">|</span> <span class="pre">~tensorrt.tensorrt.DataType</span> <span class="pre">=</span> <span class="pre">'float32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/falcon/model.html#FalconForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.FalconForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.FalconModel" title="tensorrt_llm.models.falcon.model.FalconModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">FalconModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.FalconForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">all_reduce_workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/falcon/model.html#FalconForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.FalconForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.FalconForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_num_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/falcon/model.html#FalconForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.FalconForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used
|
|
to determine the ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.FalconModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">FalconModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings:</span> <span class="pre">int</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype:</span> <span class="pre">str</span> <span class="pre">|</span> <span class="pre">~tensorrt.tensorrt.DataType</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping:</span> <span class="pre">~tensorrt_llm.mapping.Mapping</span> <span class="pre">=</span> <span class="pre"><tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads:</span> <span class="pre">int</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size:</span> <span class="pre">int</span> <span class="pre">|</span> <span class="pre">None</span> <span class="pre">=</span> <span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode:</span> <span class="pre">~tensorrt_llm.quantization.mode.QuantMode</span> <span class="pre">=</span> <span class="pre">QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_alibi:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">parallel_attention:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">new_decoder_architecture:</span> <span class="pre">bool</span> <span class="pre">=</span> <span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/falcon/model.html#FalconModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.FalconModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.FalconModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">all_reduce_workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/falcon/model.html#FalconModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.FalconModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTJForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTJForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_dim</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">logits_dtype='float32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptj/model.html#GPTJForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTJForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.GPTJModel" title="tensorrt_llm.models.gptj.model.GPTJModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">GPTJModel</span></code></a></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTJForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptj/model.html#GPTJForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTJForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTJForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_num_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">enable_two_optimization_profiles</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptj/model.html#GPTJForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTJForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTJModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTJModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_dim</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptj/model.html#GPTJModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTJModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTJModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptj/model.html#GPTJModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTJModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTLMHeadModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTLMHeadModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">logits_dtype='float32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.learned_absolute</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_percentage=1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inter_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_mode=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_prompt_tuning=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">share_embedding_table=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gpt/model.html#GPTLMHeadModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTLMHeadModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.GPTModel" title="tensorrt_llm.models.gpt.model.GPTModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">GPTModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTLMHeadModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_tasks</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gpt/model.html#GPTLMHeadModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTLMHeadModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTLMHeadModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_num_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table_size</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">128</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gather_all_token_logits</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">bool</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gpt/model.html#GPTLMHeadModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTLMHeadModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.learned_absolute</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_embedding_percentage=1.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">inter_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">multi_query_mode=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_prompt_tuning=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gpt/model.html#GPTModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_tasks</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gpt/model.html#GPTModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTNeoXForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTNeoXForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_dim</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.rope_gpt_neox</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptneox/model.html#GPTNeoXForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTNeoXForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.GPTNeoXModel" title="tensorrt_llm.models.gptneox.model.GPTNeoXModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">GPTNeoXModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTNeoXForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptneox/model.html#GPTNeoXForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTNeoXForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTNeoXForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptneox/model.html#GPTNeoXForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTNeoXForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTNeoXModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">GPTNeoXModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_dim</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.rope_gpt_neox</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">apply_query_key_layer_scaling=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptneox/model.html#GPTNeoXModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTNeoXModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.GPTNeoXModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/gptneox/model.html#GPTNeoXModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.GPTNeoXModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.LLaMAForCausalLM">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">LLaMAForCausalLM</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">logits_dtype='float32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.rope_gpt_neox</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_base=10000.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rms_norm_eps=1e-06</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/llama/model.html#LLaMAForCausalLM"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.LLaMAForCausalLM" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.LLaMAModel" title="tensorrt_llm.models.llama.model.LLaMAModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">LLaMAModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.LLaMAForCausalLM.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">all_reduce_workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/llama/model.html#LLaMAForCausalLM.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.LLaMAForCausalLM.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.LLaMAForCausalLM.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_num_tokens</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">int</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/llama/model.html#LLaMAForCausalLM.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.LLaMAForCausalLM.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.LLaMAModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">LLaMAModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_kv_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mlp_hidden_size=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_embedding_type=PositionEmbeddingType.rope_gpt_neox</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_base=10000.0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rotary_scaling=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode=QuantMode.None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rms_norm_eps=1e-06</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/llama/model.html#LLaMAModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.LLaMAModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.LLaMAModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_states</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">all_reduce_workspace</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/llama/model.html#LLaMAModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.LLaMAModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.OPTLMHeadModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">OPTLMHeadModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pre_norm=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">do_layer_norm_before=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_prompt_tuning=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">share_embedding_table=False</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/opt/model.html#OPTLMHeadModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.OPTLMHeadModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <a class="reference internal" href="#tensorrt_llm.models.OPTModel" title="tensorrt_llm.models.opt.model.OPTModel"><code class="xref py py-class docutils literal notranslate"><span class="pre">OPTModel</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">GenerationMixin</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.OPTLMHeadModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">last_token_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_tasks</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/opt/model.html#OPTLMHeadModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.OPTLMHeadModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.OPTLMHeadModel.prepare_inputs">
|
|
<span class="sig-name descname"><span class="pre">prepare_inputs</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">max_batch_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_input_len</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_new_tokens</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_beam_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">32</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/opt/model.html#OPTLMHeadModel.prepare_inputs"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.OPTLMHeadModel.prepare_inputs" title="Permalink to this definition"></a></dt>
|
|
<dd><p>@brief: Prepare inputs Tensors for the model, the given sizes are used to determine the
|
|
ranges of the dimensions of when using TRT dynamic shapes.</p>
|
|
<p>@return: a list contains values which can be fed into the self.forward()</p>
|
|
</dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py class">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.OPTModel">
|
|
<em class="property"><span class="pre">class</span><span class="w"> </span></em><span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">OPTModel</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">num_layers</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_heads</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">vocab_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">hidden_act</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_position_embeddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype=None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mapping=<tensorrt_llm.mapping.Mapping</span> <span class="pre">object></span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pre_norm=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">do_layer_norm_before=True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_prompt_tuning=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_parallel_embedding=False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">embedding_sharding_dim=0</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/opt/model.html#OPTModel"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.OPTModel" title="Permalink to this definition"></a></dt>
|
|
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Module</span></code></p>
|
|
<dl class="py method">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.OPTModel.forward">
|
|
<span class="sig-name descname"><span class="pre">forward</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">input_ids</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.functional.html#tensorrt_llm.functional.Tensor" title="tensorrt_llm.functional.Tensor"><span class="pre">Tensor</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">position_ids</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">use_cache</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_mask</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kv_cache_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">attention_params</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_embedding_table</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_tasks</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">prompt_vocab_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/opt/model.html#OPTModel.forward"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.OPTModel.forward" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</dd></dl>
|
|
|
|
<dl class="py function">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.fp8_quantize">
|
|
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">fp8_quantize</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference internal" href="tensorrt_llm.quantization.html#tensorrt_llm.quantization.QuantMode" title="tensorrt_llm.quantization.mode.QuantMode"><span class="pre">QuantMode</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_scales</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">dict</span><span class="w"> </span><span class="p"><span class="pre">|</span></span><span class="w"> </span><span class="pre">None</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/quantized/quant.html#fp8_quantize"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.fp8_quantize" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py function">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.smooth_quantize">
|
|
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">smooth_quantize</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/quantized/quant.html#smooth_quantize"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.smooth_quantize" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py function">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.weight_only_groupwise_quantize">
|
|
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">weight_only_groupwise_quantize</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">group_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">128</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pre_quant_scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">zero</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">exclude_modules</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">current_key_name</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/quantized/quant.html#weight_only_groupwise_quantize"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.weight_only_groupwise_quantize" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
<dl class="py function">
|
|
<dt class="sig sig-object py" id="tensorrt_llm.models.weight_only_quantize">
|
|
<span class="sig-prename descclassname"><span class="pre">tensorrt_llm.models.</span></span><span class="sig-name descname"><span class="pre">weight_only_quantize</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">quant_mode</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">exclude_modules</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">current_key_name</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/tensorrt_llm/models/quantized/quant.html#weight_only_quantize"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#tensorrt_llm.models.weight_only_quantize" title="Permalink to this definition"></a></dt>
|
|
<dd></dd></dl>
|
|
|
|
</section>
|
|
|
|
|
|
</div>
|
|
</div>
|
|
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
|
|
<a href="tensorrt_llm.functional.html" class="btn btn-neutral float-left" title="Functionals" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
|
|
<a href="tensorrt_llm.plugin.html" class="btn btn-neutral float-right" title="Plugin" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
|
|
</div>
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<p>© Copyright 2023, NVidia.</p>
|
|
</div>
|
|
|
|
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
|
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
|
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
|
|
|
|
|
</footer>
|
|
</div>
|
|
</div>
|
|
</section>
|
|
</div>
|
|
<script>
|
|
jQuery(function () {
|
|
SphinxRtdTheme.Navigation.enable(true);
|
|
});
|
|
</script>
|
|
|
|
</body>
|
|
</html> |