mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
609 lines
67 KiB
HTML
609 lines
67 KiB
HTML
<!DOCTYPE html>
|
|
<html class="writer-html5" lang="en" data-content_root="../../../">
|
|
<head>
|
|
<meta charset="utf-8" />
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
<title>tensorrt_llm.plugin.plugin — tensorrt_llm documentation</title>
|
|
<link rel="stylesheet" type="text/css" href="../../../_static/pygments.css?v=80d5e7a1" />
|
|
<link rel="stylesheet" type="text/css" href="../../../_static/css/theme.css?v=19f00094" />
|
|
|
|
|
|
<!--[if lt IE 9]>
|
|
<script src="../../../_static/js/html5shiv.min.js"></script>
|
|
<![endif]-->
|
|
|
|
<script src="../../../_static/jquery.js?v=5d32c60e"></script>
|
|
<script src="../../../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
|
<script src="../../../_static/documentation_options.js?v=5929fcd5"></script>
|
|
<script src="../../../_static/doctools.js?v=888ff710"></script>
|
|
<script src="../../../_static/sphinx_highlight.js?v=dc90522c"></script>
|
|
<script src="../../../_static/js/theme.js"></script>
|
|
<link rel="index" title="Index" href="../../../genindex.html" />
|
|
<link rel="search" title="Search" href="../../../search.html" />
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav">
|
|
<div class="wy-grid-for-nav">
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
|
<div class="wy-side-scroll">
|
|
<div class="wy-side-nav-search" >
|
|
|
|
|
|
|
|
<a href="../../../index.html" class="icon icon-home">
|
|
tensorrt_llm
|
|
</a>
|
|
<div role="search">
|
|
<form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
|
<input type="hidden" name="check_keywords" value="yes" />
|
|
<input type="hidden" name="area" value="default" />
|
|
</form>
|
|
</div>
|
|
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
|
<p class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../quick-start-guide.html">Quick Start Guide</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../release-notes.html">Release Notes</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Installation</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../installation/linux.html">Installing on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../installation/windows.html">Installing on Windows</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../installation/build-from-source-windows.html">Building from Source Code on Windows</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Architecture</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/overview.html">TensorRT-LLM Architecture</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/core-concepts.html">Model Definition</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/core-concepts.html#compilation">Compilation</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/core-concepts.html#runtime">Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/core-concepts.html#multi-gpu-and-multi-node-support">Multi-GPU and Multi-Node Support</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/add-model.html">Adding a Model</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Advanced</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/gpt-runtime.html">C++ GPT Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/graph-rewriting.html">Graph Rewriting Module</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/batch-manager.html">The Batch Manager in TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/inference-request.html">Inference Request</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/lora.html">Run gpt-2b + LoRA using GptManager / cpp runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../advanced/expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Performance</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../performance/perf-overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../performance/perf-best-practices.html">Best Practices for Tuning the Performance of TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../performance/perf-analysis.html">Performance Analysis</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Reference</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../reference/troubleshooting.html">Troubleshooting</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../reference/support-matrix.html">Support Matrix</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../reference/precision.html">Numerical Precision</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../_cpp_gen/runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Python API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.layers.html">Layers</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.functional.html">Functionals</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.models.html">Models</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.plugin.html">Plugin</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.quantization.html">Quantization</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../python-api/tensorrt_llm.runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Blogs</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
|
|
</ul>
|
|
|
|
</div>
|
|
</div>
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../../../index.html">tensorrt_llm</a>
|
|
</nav>
|
|
|
|
<div class="wy-nav-content">
|
|
<div class="rst-content">
|
|
<div role="navigation" aria-label="Page navigation">
|
|
<ul class="wy-breadcrumbs">
|
|
<li><a href="../../../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
|
<li class="breadcrumb-item"><a href="../../index.html">Module code</a></li>
|
|
<li class="breadcrumb-item active">tensorrt_llm.plugin.plugin</li>
|
|
<li class="wy-breadcrumbs-aside">
|
|
</li>
|
|
</ul>
|
|
<hr/>
|
|
</div>
|
|
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
|
<div itemprop="articleBody">
|
|
|
|
<h1>Source code for tensorrt_llm.plugin.plugin</h1><div class="highlight"><pre>
|
|
<span></span><span class="c1"># SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.</span>
|
|
<span class="c1"># SPDX-License-Identifier: Apache-2.0</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># Licensed under the Apache License, Version 2.0 (the "License");</span>
|
|
<span class="c1"># you may not use this file except in compliance with the License.</span>
|
|
<span class="c1"># You may obtain a copy of the License at</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
|
|
<span class="c1"># distributed under the License is distributed on an "AS IS" BASIS,</span>
|
|
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
|
|
<span class="c1"># See the License for the specific language governing permissions and</span>
|
|
<span class="c1"># limitations under the License.</span>
|
|
<span class="kn">import</span> <span class="nn">argparse</span>
|
|
<span class="kn">import</span> <span class="nn">ctypes</span>
|
|
<span class="kn">import</span> <span class="nn">platform</span>
|
|
<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">OrderedDict</span>
|
|
<span class="kn">from</span> <span class="nn">dataclasses</span> <span class="kn">import</span> <span class="n">dataclass</span><span class="p">,</span> <span class="n">fields</span>
|
|
<span class="kn">from</span> <span class="nn">enum</span> <span class="kn">import</span> <span class="n">IntEnum</span>
|
|
<span class="kn">from</span> <span class="nn">pathlib</span> <span class="kn">import</span> <span class="n">Path</span>
|
|
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">List</span><span class="p">,</span> <span class="n">Optional</span><span class="p">,</span> <span class="n">Tuple</span><span class="p">,</span> <span class="n">Union</span>
|
|
|
|
<span class="kn">import</span> <span class="nn">tensorrt</span> <span class="k">as</span> <span class="nn">trt</span>
|
|
|
|
<span class="kn">from</span> <span class="nn">tensorrt_llm.logger</span> <span class="kn">import</span> <span class="n">logger</span>
|
|
|
|
<span class="kn">from</span> <span class="nn">.._ipc_utils</span> <span class="kn">import</span> <span class="n">IpcMemory</span>
|
|
<span class="kn">from</span> <span class="nn">..mapping</span> <span class="kn">import</span> <span class="n">Mapping</span>
|
|
|
|
<span class="n">TRT_LLM_PLUGIN_NAMESPACE</span> <span class="o">=</span> <span class="s1">'tensorrt_llm'</span>
|
|
|
|
|
|
<span class="k">def</span> <span class="nf">plugin_lib_path</span><span class="p">()</span> <span class="o">-></span> <span class="nb">str</span><span class="p">:</span>
|
|
<span class="n">project_dir</span> <span class="o">=</span> <span class="n">Path</span><span class="p">(</span><span class="vm">__file__</span><span class="p">)</span><span class="o">.</span><span class="n">parent</span><span class="o">.</span><span class="n">parent</span><span class="o">.</span><span class="n">absolute</span><span class="p">()</span>
|
|
<span class="n">dyn_lib</span> <span class="o">=</span> <span class="s2">"libnvinfer_plugin_tensorrt_llm.so"</span> <span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">(</span>
|
|
<span class="p">)</span> <span class="o">!=</span> <span class="s2">"Windows"</span> <span class="k">else</span> <span class="s2">"nvinfer_plugin_tensorrt_llm.dll"</span>
|
|
<span class="k">return</span> <span class="nb">str</span><span class="p">(</span><span class="n">project_dir</span><span class="o">.</span><span class="n">joinpath</span><span class="p">(</span><span class="s2">"libs"</span><span class="p">,</span> <span class="n">dyn_lib</span><span class="p">))</span>
|
|
|
|
|
|
<span class="k">def</span> <span class="nf">_load_plugin_lib</span><span class="p">():</span>
|
|
<span class="n">winmode</span> <span class="o">=</span> <span class="mi">0</span> <span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s2">"Windows"</span> <span class="k">else</span> <span class="kc">None</span>
|
|
<span class="n">handle</span> <span class="o">=</span> <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">plugin_lib_path</span><span class="p">(),</span>
|
|
<span class="n">mode</span><span class="o">=</span><span class="n">ctypes</span><span class="o">.</span><span class="n">RTLD_GLOBAL</span><span class="p">,</span>
|
|
<span class="n">winmode</span><span class="o">=</span><span class="n">winmode</span><span class="p">)</span>
|
|
<span class="k">try</span><span class="p">:</span>
|
|
<span class="n">handle</span><span class="o">.</span><span class="n">initTrtLlmPlugins</span><span class="o">.</span><span class="n">argtypes</span> <span class="o">=</span> <span class="p">[</span><span class="n">ctypes</span><span class="o">.</span><span class="n">c_void_p</span><span class="p">,</span> <span class="n">ctypes</span><span class="o">.</span><span class="n">c_char_p</span><span class="p">]</span>
|
|
<span class="n">handle</span><span class="o">.</span><span class="n">initTrtLlmPlugins</span><span class="o">.</span><span class="n">restype</span> <span class="o">=</span> <span class="n">ctypes</span><span class="o">.</span><span class="n">c_bool</span>
|
|
<span class="k">except</span> <span class="ne">AttributeError</span> <span class="k">as</span> <span class="n">err</span><span class="p">:</span>
|
|
<span class="k">raise</span> <span class="ne">ImportError</span><span class="p">(</span><span class="s1">'TensorRT-LLM Plugin is unavailable'</span><span class="p">)</span> <span class="kn">from</span> <span class="nn">err</span>
|
|
<span class="k">assert</span> <span class="n">handle</span><span class="o">.</span><span class="n">initTrtLlmPlugins</span><span class="p">(</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">TRT_LLM_PLUGIN_NAMESPACE</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="s1">'utf-8'</span><span class="p">))</span>
|
|
|
|
|
|
<span class="k">class</span> <span class="nc">ContextFMHAType</span><span class="p">(</span><span class="n">IntEnum</span><span class="p">):</span>
|
|
<span class="n">disabled</span> <span class="o">=</span> <span class="mi">0</span>
|
|
<span class="c1"># FP16 I/O, FP16 Accumulation</span>
|
|
<span class="n">enabled</span> <span class="o">=</span> <span class="mi">1</span>
|
|
<span class="c1"># FP16 I/O, FP32 Accumulation</span>
|
|
<span class="n">enabled_with_fp32_acc</span> <span class="o">=</span> <span class="mi">2</span>
|
|
|
|
|
|
<div class="viewcode-block" id="PluginConfig">
|
|
<a class="viewcode-back" href="../../../python-api/tensorrt_llm.plugin.html#tensorrt_llm.plugin.PluginConfig">[docs]</a>
|
|
<span class="nd">@dataclass</span>
|
|
<span class="k">class</span> <span class="nc">PluginConfig</span><span class="p">:</span>
|
|
|
|
<span class="c1"># Plugins</span>
|
|
<span class="n">bert_attention_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span>
|
|
<span class="n">gpt_attention_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span>
|
|
<span class="n">gemm_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">smooth_quant_gemm_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">identity_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">layernorm_quantization_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">rmsnorm_quantization_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">nccl_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span>
|
|
<span class="n">lookup_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">lora_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">weight_only_groupwise_quant_matmul_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">weight_only_quant_matmul_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="n">quantize_per_token_plugin</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">quantize_tensor_plugin</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">moe_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span>
|
|
<span class="n">mamba_conv1d_plugin</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span>
|
|
|
|
<span class="c1"># Features</span>
|
|
<span class="n">context_fmha</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="n">context_fmha_fp32_acc</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span> <span class="c1"># will use fp16 if disabled</span>
|
|
<span class="n">paged_kv_cache</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="n">remove_input_padding</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="c1"># TODO[kevin]: smart strategy to choose all reduce plugin</span>
|
|
<span class="n">use_custom_all_reduce</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="n">multi_block_mode</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">enable_xqa</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="n">attention_qk_half_accumulation</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">tokens_per_block</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">128</span>
|
|
<span class="n">use_paged_context_fmha</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">use_fp8_context_fmha</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">use_context_fmha_for_generation</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">multiple_profiles</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="n">paged_state</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="n">streamingllm</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">bool</span><span class="p">,</span> <span class="nb">int</span><span class="p">]):</span>
|
|
<span class="k">assert</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">),</span> <span class="sa">f</span><span class="s2">"Plugin name doesn't exist: </span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2">"</span>
|
|
<span class="k">if</span> <span class="n">value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="n">target_type</span> <span class="o">=</span> <span class="nb">type</span><span class="p">(</span><span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">))</span>
|
|
<span class="k">assert</span> <span class="nb">type</span><span class="p">(</span><span class="n">value</span><span class="p">)</span> <span class="o">==</span> <span class="n">target_type</span><span class="p">,</span> \
|
|
<span class="sa">f</span><span class="s2">"Plugin </span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2"> expects </span><span class="si">{</span><span class="n">target_type</span><span class="si">}</span><span class="s2">, got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">value</span><span class="p">)</span><span class="si">}</span><span class="s2">"</span>
|
|
<span class="nb">setattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span>
|
|
<span class="n">logger</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="sa">f</span><span class="s2">"Set </span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2"> to </span><span class="si">{</span><span class="n">value</span><span class="si">}</span><span class="s2">."</span><span class="p">)</span>
|
|
|
|
<span class="k">def</span> <span class="nf">update_from_dict</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config</span><span class="p">:</span> <span class="nb">dict</span><span class="p">):</span>
|
|
<span class="k">for</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">config</span><span class="o">.</span><span class="n">keys</span><span class="p">():</span>
|
|
<span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">):</span>
|
|
<span class="n">value_to_be_update</span> <span class="o">=</span> <span class="n">config</span><span class="p">[</span><span class="n">name</span><span class="p">]</span>
|
|
<span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">))</span> <span class="o">==</span> <span class="nb">bool</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">value_to_be_update</span> <span class="ow">is</span> <span class="kc">True</span> <span class="ow">or</span> \
|
|
<span class="n">value_to_be_update</span> <span class="o">==</span> <span class="s2">"enable"</span><span class="p">:</span>
|
|
<span class="n">value_to_be_update</span> <span class="o">=</span> <span class="kc">True</span>
|
|
<span class="k">elif</span> <span class="n">value_to_be_update</span> <span class="ow">is</span> <span class="kc">False</span> <span class="ow">or</span> \
|
|
<span class="n">value_to_be_update</span> <span class="o">==</span> <span class="s2">"disable"</span><span class="p">:</span>
|
|
<span class="n">value_to_be_update</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
|
|
<span class="sa">f</span><span class="s2">"Unexpected value (</span><span class="si">{</span><span class="n">value_to_be_update</span><span class="si">}</span><span class="s2">) to be updated for </span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2">."</span>
|
|
<span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">value_to_be_update</span> <span class="o">==</span> <span class="s2">"disable"</span><span class="p">:</span>
|
|
<span class="n">value_to_be_update</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">value_to_be_update</span><span class="p">)</span>
|
|
|
|
<span class="nd">@classmethod</span>
|
|
<span class="k">def</span> <span class="nf">from_dict</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">config</span><span class="p">:</span> <span class="nb">dict</span><span class="p">):</span>
|
|
<span class="n">plugin_config</span> <span class="o">=</span> <span class="bp">cls</span><span class="p">()</span>
|
|
<span class="n">plugin_config</span><span class="o">.</span><span class="n">update_from_dict</span><span class="p">(</span><span class="n">config</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="n">plugin_config</span>
|
|
|
|
<span class="nd">@classmethod</span>
|
|
<span class="k">def</span> <span class="nf">from_arguments</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">args</span><span class="p">:</span> <span class="n">argparse</span><span class="o">.</span><span class="n">Namespace</span><span class="p">):</span>
|
|
<span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">from_dict</span><span class="p">(</span><span class="nb">vars</span><span class="p">(</span><span class="n">args</span><span class="p">))</span>
|
|
|
|
<div class="viewcode-block" id="PluginConfig.to_legacy_setting">
|
|
<a class="viewcode-back" href="../../../python-api/tensorrt_llm.plugin.html#tensorrt_llm.plugin.PluginConfig.to_legacy_setting">[docs]</a>
|
|
<span class="k">def</span> <span class="nf">to_legacy_setting</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="w"> </span><span class="sd">'''Legacy setting means that all of the plugins and features are</span>
|
|
<span class="sd"> disabled, this needed for the legacy `build.py` script, which will be</span>
|
|
<span class="sd"> migrated to the centralized building script `tensorrt_llm/commands/build.py`.</span>
|
|
|
|
<span class="sd"> After the migration is done, this function may or may not be deleted.</span>
|
|
<span class="sd"> '''</span>
|
|
<span class="k">for</span> <span class="n">field</span> <span class="ow">in</span> <span class="n">fields</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="n">field</span><span class="o">.</span><span class="n">type</span> <span class="o">==</span> <span class="nb">str</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">field</span><span class="o">.</span><span class="n">type</span> <span class="o">==</span> <span class="nb">bool</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span></div>
|
|
|
|
|
|
<span class="nd">@property</span>
|
|
<span class="k">def</span> <span class="nf">context_fmha_type</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">context_fmha</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">context_fmha_fp32_acc</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">enabled_with_fp32_acc</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">enabled</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">disabled</span>
|
|
|
|
<span class="nd">@context_fmha_type</span><span class="o">.</span><span class="n">setter</span>
|
|
<span class="k">def</span> <span class="nf">context_fmha_type</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="n">value</span> <span class="o">==</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">disabled</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"context_fmha"</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"context_fmha"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">value</span> <span class="o">==</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"context_fmha_fp32_acc"</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">value</span> <span class="o">==</span> <span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">enabled_with_fp32_acc</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"context_fmha_fp32_acc"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_smooth_quant_plugins</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s2">"float16"</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"smooth_quant_gemm_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"rmsnorm_quantization_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"layernorm_quantization_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"quantize_per_token_plugin"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"quantize_tensor_plugin"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
|
|
<span class="k">def</span> <span class="nf">enable_qk_half_accum</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"attention_qk_half_accumulation"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_context_fmha</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">context_fmha_type</span><span class="o">=</span><span class="n">ContextFMHAType</span><span class="o">.</span><span class="n">enabled</span><span class="p">):</span>
|
|
<span class="k">assert</span> <span class="nb">type</span><span class="p">(</span><span class="n">context_fmha_type</span><span class="p">)</span> <span class="o">==</span> <span class="n">ContextFMHAType</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">context_fmha_type</span> <span class="o">=</span> <span class="n">context_fmha_type</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">enable_remove_input_padding</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"remove_input_padding"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">enable_paged_kv_cache</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">tokens_per_block</span><span class="o">=</span><span class="mi">128</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"paged_kv_cache"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"tokens_per_block"</span><span class="p">,</span> <span class="n">tokens_per_block</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_gpt_attention_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"gpt_attention_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">enable_mmha_multi_block_mode</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"multi_block_mode"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">enable_xqa_optimization</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"enable_xqa"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_bert_attention_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"bert_attention_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_identity_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"identity_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_gemm_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"gemm_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_moe_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">moe_plugin</span> <span class="o">=</span> <span class="n">dtype</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_mamba_conv1d_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mamba_conv1d_plugin</span> <span class="o">=</span> <span class="n">dtype</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_smooth_quant_gemm_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"smooth_quant_gemm_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_layernorm_quantization_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"layernorm_quantization_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_rmsnorm_quantization_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"rmsnorm_quantization_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_weight_only_quant_matmul_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"weight_only_quant_matmul_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_weight_only_groupwise_quant_matmul_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"weight_only_groupwise_quant_matmul_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_nccl_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">,</span>
|
|
<span class="n">use_custom_all_reduce</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"nccl_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"use_custom_all_reduce"</span><span class="p">,</span> <span class="n">use_custom_all_reduce</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">use_custom_all_reduce</span><span class="p">:</span>
|
|
<span class="n">init_all_reduce_helper</span><span class="p">()</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_quantize_per_token_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"quantize_per_token_plugin"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_quantize_tensor_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"quantize_tensor_plugin"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_lookup_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"lookup_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_lora_plugin</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">'float16'</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"lora_plugin"</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_paged_context_fmha</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"use_paged_context_fmha"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_context_fmha_for_generation</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"use_context_fmha_for_generation"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_streamingllm</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">set_plugin</span><span class="p">(</span><span class="s2">"streamingllm"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="bp">self</span></div>
|
|
|
|
|
|
|
|
<span class="n">cli_plugin_args</span> <span class="o">=</span> <span class="p">[</span>
|
|
<span class="c1"># Plugins</span>
|
|
<span class="s2">"bert_attention_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"gpt_attention_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"gemm_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"lookup_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"lora_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"moe_plugin"</span><span class="p">,</span>
|
|
<span class="s2">"mamba_conv1d_plugin"</span><span class="p">,</span>
|
|
|
|
<span class="c1"># Features</span>
|
|
<span class="s2">"context_fmha"</span><span class="p">,</span>
|
|
<span class="s2">"context_fmha_fp32_acc"</span><span class="p">,</span>
|
|
<span class="s2">"paged_kv_cache"</span><span class="p">,</span>
|
|
<span class="s2">"remove_input_padding"</span><span class="p">,</span>
|
|
<span class="s2">"use_custom_all_reduce"</span><span class="p">,</span>
|
|
<span class="s2">"multi_block_mode"</span><span class="p">,</span>
|
|
<span class="s2">"enable_xqa"</span><span class="p">,</span>
|
|
<span class="s2">"attention_qk_half_accumulation"</span><span class="p">,</span>
|
|
<span class="s2">"tokens_per_block"</span><span class="p">,</span>
|
|
<span class="s2">"use_paged_context_fmha"</span><span class="p">,</span>
|
|
<span class="s2">"use_fp8_context_fmha"</span><span class="p">,</span>
|
|
<span class="s2">"use_context_fmha_for_generation"</span><span class="p">,</span>
|
|
<span class="s2">"multiple_profiles"</span><span class="p">,</span>
|
|
<span class="s2">"paged_state"</span><span class="p">,</span>
|
|
<span class="s2">"streamingllm"</span><span class="p">,</span>
|
|
<span class="p">]</span>
|
|
|
|
<span class="n">plugin_options</span> <span class="o">=</span> <span class="p">[</span><span class="s2">"float16"</span><span class="p">,</span> <span class="s2">"float32"</span><span class="p">,</span> <span class="s2">"bfloat16"</span><span class="p">,</span> <span class="s2">"disable"</span><span class="p">]</span>
|
|
|
|
|
|
<span class="k">def</span> <span class="nf">add_plugin_argument</span><span class="p">(</span><span class="n">parser</span><span class="p">):</span>
|
|
<span class="n">plugin_config</span> <span class="o">=</span> <span class="n">PluginConfig</span><span class="p">()</span>
|
|
<span class="k">for</span> <span class="n">field</span> <span class="ow">in</span> <span class="n">fields</span><span class="p">(</span><span class="n">plugin_config</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="n">field</span><span class="o">.</span><span class="n">name</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">cli_plugin_args</span><span class="p">:</span>
|
|
<span class="k">continue</span>
|
|
<span class="k">if</span> <span class="n">field</span><span class="o">.</span><span class="n">type</span> <span class="o">==</span> <span class="nb">str</span><span class="p">:</span>
|
|
<span class="n">parser</span><span class="o">.</span><span class="n">add_argument</span><span class="p">(</span>
|
|
<span class="s2">"--"</span> <span class="o">+</span> <span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
|
|
<span class="nb">type</span><span class="o">=</span><span class="nb">str</span><span class="p">,</span>
|
|
<span class="n">default</span><span class="o">=</span><span class="n">field</span><span class="o">.</span><span class="n">default</span> <span class="k">if</span> <span class="n">field</span><span class="o">.</span><span class="n">default</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">choices</span><span class="o">=</span><span class="n">plugin_options</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">field</span><span class="o">.</span><span class="n">type</span> <span class="o">==</span> <span class="nb">bool</span><span class="p">:</span>
|
|
<span class="n">parser</span><span class="o">.</span><span class="n">add_argument</span><span class="p">(</span>
|
|
<span class="s2">"--"</span> <span class="o">+</span> <span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
|
|
<span class="nb">type</span><span class="o">=</span><span class="nb">str</span><span class="p">,</span>
|
|
<span class="n">default</span><span class="o">=</span><span class="s2">"enable"</span> <span class="k">if</span> <span class="n">field</span><span class="o">.</span><span class="n">default</span> <span class="k">else</span> <span class="s2">"disable"</span><span class="p">,</span>
|
|
<span class="n">choices</span><span class="o">=</span><span class="p">[</span><span class="s2">"enable"</span><span class="p">,</span> <span class="s2">"disable"</span><span class="p">])</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">parser</span><span class="o">.</span><span class="n">add_argument</span><span class="p">(</span><span class="s2">"--"</span> <span class="o">+</span> <span class="n">field</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
|
|
<span class="nb">type</span><span class="o">=</span><span class="n">field</span><span class="o">.</span><span class="n">type</span><span class="p">,</span>
|
|
<span class="n">default</span><span class="o">=</span><span class="n">field</span><span class="o">.</span><span class="n">default</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="n">parser</span>
|
|
|
|
|
|
<span class="k">class</span> <span class="nc">CustomAllReduceHelper</span><span class="p">:</span>
|
|
<span class="w"> </span><span class="sd">"""</span>
|
|
<span class="sd"> Globally visible class to help usage of custom_all_reduce plugin.</span>
|
|
<span class="sd"> Provides the following utilities:</span>
|
|
|
|
<span class="sd"> gen_id: int</span>
|
|
<span class="sd"> Used for synchronization with custom kernels. Plugins instances MUST have the same</span>
|
|
<span class="sd"> id across GPUs. I.e.: GPU#0's allreduce after MLP at layer i must have the same id as</span>
|
|
<span class="sd"> GPU#1, GPU#2... Also, ids MUST be unique per model. There should not be two allreduce instances</span>
|
|
<span class="sd"> in GPU#0 that have the same id.</span>
|
|
|
|
<span class="sd"> workspace: Tensor</span>
|
|
<span class="sd"> When using CUSTOM or AUTO mode, a tensor containing pointers to memory</span>
|
|
<span class="sd"> visible to all GPUs. It should be 3 pointers per TP rank -</span>
|
|
<span class="sd"> ptr to data buffer, ptr to barriers in, ptr to barriers out.</span>
|
|
<span class="sd"> It must be initialized using IpcMemory class.</span>
|
|
|
|
<span class="sd"> Usage:</span>
|
|
<span class="sd"> - Use `init_all_reduce_helper` to reset the id counter. This must be done in main model class.</span>
|
|
<span class="sd"> - Set custom_all_reduce_helper.workspace with the required tensor.</span>
|
|
<span class="sd"> Then, each instance of allreduce will reference that tensor automatically.</span>
|
|
<span class="sd"> """</span>
|
|
<span class="n">POINTERS_PER_RANK</span> <span class="o">=</span> <span class="mi">4</span>
|
|
|
|
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-></span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">current_id</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">1</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">workspace</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
|
|
|
|
<span class="k">def</span> <span class="nf">gen_id</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-></span> <span class="nb">int</span><span class="p">:</span>
|
|
<span class="n">result</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">current_id</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">current_id</span> <span class="o">+=</span> <span class="mi">1</span>
|
|
<span class="k">return</span> <span class="n">result</span>
|
|
|
|
<span class="k">def</span> <span class="nf">set_workspace_tensor</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="p">:</span> <span class="n">Mapping</span><span class="p">,</span>
|
|
<span class="n">num_profiles</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">):</span>
|
|
<span class="kn">from</span> <span class="nn">..functional</span> <span class="kn">import</span> <span class="n">Tensor</span>
|
|
<span class="n">workspace_size</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">POINTERS_PER_RANK</span> <span class="o">*</span> <span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span>
|
|
|
|
<span class="n">dim_range</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="k">if</span> <span class="n">num_profiles</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="n">dim_range</span> <span class="o">=</span> <span class="n">OrderedDict</span><span class="p">([(</span><span class="s1">'all_reduce_size'</span><span class="p">,</span>
|
|
<span class="p">[</span><span class="n">workspace_size</span><span class="p">]</span> <span class="o">*</span> <span class="n">num_profiles</span><span class="p">)])</span>
|
|
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">workspace</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span>
|
|
<span class="n">name</span><span class="o">=</span><span class="s1">'all_reduce_workspace'</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">trt</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span>
|
|
<span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">workspace_size</span><span class="p">],</span>
|
|
<span class="n">dim_range</span><span class="o">=</span><span class="n">dim_range</span><span class="p">,</span>
|
|
<span class="p">)</span>
|
|
|
|
<span class="nd">@staticmethod</span>
|
|
<span class="k">def</span> <span class="nf">max_workspace_size_auto</span><span class="p">(</span><span class="n">tp_size</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-></span> <span class="nb">int</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">tp_size</span> <span class="o"><=</span> <span class="mi">2</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="mi">16_000_000</span>
|
|
<span class="k">return</span> <span class="mi">8_000_000</span>
|
|
|
|
<span class="nd">@staticmethod</span>
|
|
<span class="k">def</span> <span class="nf">allocate_workspace</span><span class="p">(</span><span class="n">mapping</span><span class="p">:</span> <span class="n">Mapping</span><span class="p">,</span>
|
|
<span class="n">size</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-></span> <span class="n">Tuple</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="n">IpcMemory</span><span class="p">],</span> <span class="s2">"torch.tensor"</span><span class="p">]:</span>
|
|
<span class="kn">import</span> <span class="nn">torch</span>
|
|
<span class="n">ipc_buffers_ping</span> <span class="o">=</span> <span class="n">IpcMemory</span><span class="p">(</span><span class="n">mapping</span><span class="p">,</span> <span class="n">size</span> <span class="o">*</span> <span class="n">mapping</span><span class="o">.</span><span class="n">world_size</span><span class="p">)</span>
|
|
<span class="n">ipc_buffers_pong</span> <span class="o">=</span> <span class="n">IpcMemory</span><span class="p">(</span><span class="n">mapping</span><span class="p">,</span> <span class="n">size</span> <span class="o">*</span> <span class="n">mapping</span><span class="o">.</span><span class="n">world_size</span><span class="p">)</span>
|
|
<span class="n">ipc_barriers_in</span> <span class="o">=</span> <span class="n">IpcMemory</span><span class="p">(</span>
|
|
<span class="n">mapping</span><span class="p">,</span> <span class="n">IpcMemory</span><span class="o">.</span><span class="n">IPC_BARRIERS_SIZE_PER_GPU</span> <span class="o">*</span> <span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">)</span>
|
|
<span class="n">ipc_barriers_out</span> <span class="o">=</span> <span class="n">IpcMemory</span><span class="p">(</span>
|
|
<span class="n">mapping</span><span class="p">,</span> <span class="n">IpcMemory</span><span class="o">.</span><span class="n">IPC_BARRIERS_SIZE_PER_GPU</span> <span class="o">*</span> <span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">)</span>
|
|
<span class="n">buffers</span> <span class="o">=</span> <span class="p">[</span>
|
|
<span class="n">ipc_buffers_ping</span><span class="p">,</span>
|
|
<span class="n">ipc_buffers_pong</span><span class="p">,</span>
|
|
<span class="n">ipc_barriers_in</span><span class="p">,</span>
|
|
<span class="n">ipc_barriers_out</span><span class="p">,</span>
|
|
<span class="p">]</span>
|
|
|
|
<span class="k">return</span> <span class="n">buffers</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span>
|
|
<span class="n">ipc_buffers_ping</span><span class="o">.</span><span class="n">serialize</span><span class="p">()</span> <span class="o">+</span> <span class="n">ipc_buffers_pong</span><span class="o">.</span><span class="n">serialize</span><span class="p">()</span> <span class="o">+</span>
|
|
<span class="n">ipc_barriers_in</span><span class="o">.</span><span class="n">serialize</span><span class="p">()</span> <span class="o">+</span> <span class="n">ipc_barriers_out</span><span class="o">.</span><span class="n">serialize</span><span class="p">(),</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span>
|
|
<span class="n">device</span><span class="o">=</span><span class="s2">"cpu"</span><span class="p">)</span>
|
|
|
|
|
|
<span class="n">custom_all_reduce_helper</span> <span class="o">=</span> <span class="kc">None</span>
|
|
|
|
|
|
<span class="k">def</span> <span class="nf">init_all_reduce_helper</span><span class="p">():</span>
|
|
<span class="k">global</span> <span class="n">custom_all_reduce_helper</span>
|
|
<span class="n">custom_all_reduce_helper</span> <span class="o">=</span> <span class="n">CustomAllReduceHelper</span><span class="p">()</span>
|
|
|
|
|
|
<span class="k">def</span> <span class="nf">current_all_reduce_helper</span><span class="p">():</span>
|
|
<span class="k">global</span> <span class="n">custom_all_reduce_helper</span>
|
|
<span class="k">assert</span> <span class="n">custom_all_reduce_helper</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> <span class="s2">"You must call `init_all_reduce_helper` first"</span>
|
|
<span class="k">return</span> <span class="n">custom_all_reduce_helper</span>
|
|
</pre></div>
|
|
|
|
</div>
|
|
</div>
|
|
<footer>
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<p>© Copyright 2023, NVidia.</p>
|
|
</div>
|
|
|
|
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
|
|
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
|
|
provided by <a href="https://readthedocs.org">Read the Docs</a>.
|
|
|
|
|
|
</footer>
|
|
</div>
|
|
</div>
|
|
</section>
|
|
</div>
|
|
<script>
|
|
jQuery(function () {
|
|
SphinxRtdTheme.Navigation.enable(true);
|
|
});
|
|
</script>
|
|
|
|
</body>
|
|
</html> |