mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
851 lines
112 KiB
HTML
851 lines
112 KiB
HTML
|
|
|
|
<!DOCTYPE html>
|
|
<html class="writer-html5" lang="en" data-content_root="../../../../">
|
|
<head>
|
|
<meta charset="utf-8" />
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
<title>tensorrt_llm.models.llama.model — tensorrt_llm documentation</title>
|
|
<link rel="stylesheet" type="text/css" href="../../../../_static/pygments.css?v=80d5e7a1" />
|
|
<link rel="stylesheet" type="text/css" href="../../../../_static/css/theme.css?v=e59714d7" />
|
|
<link rel="stylesheet" type="text/css" href="../../../../_static/copybutton.css?v=76b2166b" />
|
|
|
|
|
|
<script src="../../../../_static/jquery.js?v=5d32c60e"></script>
|
|
<script src="../../../../_static/_sphinx_javascript_frameworks_compat.js?v=2cd50e6c"></script>
|
|
<script src="../../../../_static/documentation_options.js?v=5929fcd5"></script>
|
|
<script src="../../../../_static/doctools.js?v=9bcbadda"></script>
|
|
<script src="../../../../_static/sphinx_highlight.js?v=dc90522c"></script>
|
|
<script src="../../../../_static/clipboard.min.js?v=a7894cd8"></script>
|
|
<script src="../../../../_static/copybutton.js?v=65e89d2a"></script>
|
|
<script src="../../../../_static/js/theme.js"></script>
|
|
<link rel="index" title="Index" href="../../../../genindex.html" />
|
|
<link rel="search" title="Search" href="../../../../search.html" />
|
|
</head>
|
|
|
|
<body class="wy-body-for-nav">
|
|
<div class="wy-grid-for-nav">
|
|
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
|
|
<div class="wy-side-scroll">
|
|
<div class="wy-side-nav-search" >
|
|
|
|
|
|
|
|
<a href="../../../../index.html" class="icon icon-home">
|
|
tensorrt_llm
|
|
</a>
|
|
<div role="search">
|
|
<form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
|
|
<input type="text" name="q" placeholder="Search docs" aria-label="Search docs" />
|
|
<input type="hidden" name="check_keywords" value="yes" />
|
|
<input type="hidden" name="area" value="default" />
|
|
</form>
|
|
</div>
|
|
</div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
|
|
<p class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../quick-start-guide.html">Quick Start Guide</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../key-features.html">Key Features</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../torch.html">PyTorch Backend</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../release-notes.html">Release Notes</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Installation</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../installation/linux.html">Installing on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../installation/windows.html">Installing on Windows</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../installation/build-from-source-windows.html">Building from Source Code on Windows</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../installation/grace-hopper.html">Installing on Grace Hopper</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">LLM API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../llm-api/index.html">API Introduction</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../llm-api/reference.html">API Reference</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">LLM API Examples</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../llm-api-examples/index.html">LLM Examples Introduction</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../llm-api-examples/customization.html">Common Customizations</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../llm-api-examples/llm_api_examples.html">Examples</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Model Definition API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.layers.html">Layers</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.functional.html">Functionals</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.models.html">Models</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.plugin.html">Plugin</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.quantization.html">Quantization</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../python-api/tensorrt_llm.runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../_cpp_gen/executor.html">Executor</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../_cpp_gen/runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Command-Line Reference</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../commands/trtllm-build.html">trtllm-build</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../commands/trtllm-serve.html">trtllm-serve</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Architecture</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/overview.html">TensorRT-LLM Architecture</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/core-concepts.html">Model Definition</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/core-concepts.html#compilation">Compilation</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/core-concepts.html#runtime">Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/core-concepts.html#multi-gpu-and-multi-node-support">Multi-GPU and Multi-Node Support</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../architecture/add-model.html">Adding a Model</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Advanced</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/gpt-runtime.html">C++ GPT Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/executor.html">Executor API</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/graph-rewriting.html">Graph Rewriting Module</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/inference-request.html">Inference Request</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/inference-request.html#responses">Responses</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/lora.html">Run gpt-2b + LoRA using GptManager / cpp runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/kv-cache-reuse.html">KV cache reuse</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/speculative-decoding.html">Speculative Sampling</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../advanced/disaggregated-service.html">Disaggregated-Service (experimental)</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Performance</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../performance/perf-overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../performance/perf-benchmarking.html">Benchmarking</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../performance/performance-tuning-guide/index.html">Performance Tuning Guide</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../performance/perf-analysis.html">Performance Analysis</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Reference</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/troubleshooting.html">Troubleshooting</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/support-matrix.html">Support Matrix</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/precision.html">Numerical Precision</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
|
|
</ul>
|
|
<p class="caption" role="heading"><span class="caption-text">Blogs</span></p>
|
|
<ul>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../../../../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
|
|
</ul>
|
|
|
|
</div>
|
|
</div>
|
|
</nav>
|
|
|
|
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu" >
|
|
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
|
|
<a href="../../../../index.html">tensorrt_llm</a>
|
|
</nav>
|
|
|
|
<div class="wy-nav-content">
|
|
<div class="rst-content">
|
|
<div role="navigation" aria-label="Page navigation">
|
|
<ul class="wy-breadcrumbs">
|
|
<li><a href="../../../../index.html" class="icon icon-home" aria-label="Home"></a></li>
|
|
<li class="breadcrumb-item"><a href="../../../index.html">Module code</a></li>
|
|
<li class="breadcrumb-item active">tensorrt_llm.models.llama.model</li>
|
|
<li class="wy-breadcrumbs-aside">
|
|
</li>
|
|
</ul>
|
|
<hr/>
|
|
</div>
|
|
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
|
|
<div itemprop="articleBody">
|
|
|
|
<h1>Source code for tensorrt_llm.models.llama.model</h1><div class="highlight"><pre>
|
|
<span></span><span class="c1"># SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.</span>
|
|
<span class="c1"># SPDX-License-Identifier: Apache-2.0</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># Licensed under the Apache License, Version 2.0 (the "License");</span>
|
|
<span class="c1"># you may not use this file except in compliance with the License.</span>
|
|
<span class="c1"># You may obtain a copy of the License at</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
|
|
<span class="c1">#</span>
|
|
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
|
|
<span class="c1"># distributed under the License is distributed on an "AS IS" BASIS,</span>
|
|
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
|
|
<span class="c1"># See the License for the specific language governing permissions and</span>
|
|
<span class="c1"># limitations under the License.</span>
|
|
<span class="kn">import</span> <span class="nn">os</span>
|
|
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Optional</span><span class="p">,</span> <span class="n">Union</span>
|
|
|
|
<span class="kn">import</span> <span class="nn">transformers</span>
|
|
|
|
<span class="kn">from</span> <span class="nn">..._common</span> <span class="kn">import</span> <span class="n">default_net</span>
|
|
<span class="kn">from</span> <span class="nn">..._utils</span> <span class="kn">import</span> <span class="n">pad_vocab_size</span>
|
|
<span class="kn">from</span> <span class="nn">...functional</span> <span class="kn">import</span> <span class="p">(</span><span class="n">AllReduceFusionOp</span><span class="p">,</span> <span class="n">AllReduceParams</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">,</span>
|
|
<span class="n">allgather</span><span class="p">,</span> <span class="n">concat</span><span class="p">,</span> <span class="n">constant</span><span class="p">,</span> <span class="n">div</span><span class="p">,</span> <span class="n">non_gated_version</span><span class="p">,</span>
|
|
<span class="n">recv</span><span class="p">,</span> <span class="n">send</span><span class="p">,</span> <span class="n">unsqueeze</span><span class="p">)</span>
|
|
<span class="kn">from</span> <span class="nn">...layers</span> <span class="kn">import</span> <span class="p">(</span><span class="n">MOE</span><span class="p">,</span> <span class="n">Attention</span><span class="p">,</span> <span class="n">AttentionMaskType</span><span class="p">,</span> <span class="n">ColumnLinear</span><span class="p">,</span>
|
|
<span class="n">Embedding</span><span class="p">,</span> <span class="n">FusedGatedMLP</span><span class="p">,</span> <span class="n">GatedMLP</span><span class="p">,</span>
|
|
<span class="n">PositionEmbeddingType</span><span class="p">,</span> <span class="n">RmsNorm</span><span class="p">)</span>
|
|
<span class="kn">from</span> <span class="nn">...lora_manager</span> <span class="kn">import</span> <span class="n">LoraConfig</span><span class="p">,</span> <span class="n">use_lora</span>
|
|
<span class="kn">from</span> <span class="nn">...mapping</span> <span class="kn">import</span> <span class="n">Mapping</span>
|
|
<span class="kn">from</span> <span class="nn">...module</span> <span class="kn">import</span> <span class="n">Module</span>
|
|
<span class="kn">from</span> <span class="nn">...quantization.functional</span> <span class="kn">import</span> <span class="n">fused_layernorm</span>
|
|
<span class="kn">from</span> <span class="nn">..convert_utils</span> <span class="kn">import</span> <span class="n">has_safetensors</span>
|
|
<span class="kn">from</span> <span class="nn">..model_weights_loader</span> <span class="kn">import</span> <span class="n">ModelWeightsLoader</span>
|
|
<span class="kn">from</span> <span class="nn">..modeling_utils</span> <span class="kn">import</span> <span class="p">(</span><span class="n">DecoderLayerList</span><span class="p">,</span> <span class="n">DecoderModelForCausalLM</span><span class="p">,</span>
|
|
<span class="n">QuantConfig</span><span class="p">)</span>
|
|
<span class="kn">from</span> <span class="nn">.config</span> <span class="kn">import</span> <span class="n">LLaMAConfig</span>
|
|
<span class="kn">from</span> <span class="nn">.convert</span> <span class="kn">import</span> <span class="p">(</span><span class="n">load_hf_llama</span><span class="p">,</span> <span class="n">load_weights_from_deepcompressor</span><span class="p">,</span>
|
|
<span class="n">load_weights_from_gptq</span><span class="p">,</span> <span class="n">load_weights_from_hf_by_shard</span><span class="p">,</span>
|
|
<span class="n">load_weights_from_hf_model</span><span class="p">,</span>
|
|
<span class="n">load_weights_from_hf_safetensors</span><span class="p">,</span>
|
|
<span class="n">load_weights_from_meta_ckpt</span><span class="p">)</span>
|
|
|
|
|
|
<span class="k">class</span> <span class="nc">LLaMADecoderLayer</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
|
|
|
|
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config</span><span class="p">:</span> <span class="n">LLaMAConfig</span><span class="p">,</span> <span class="n">layer_idx</span><span class="p">:</span> <span class="nb">int</span><span class="p">):</span>
|
|
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">layer_idx</span> <span class="o">=</span> <span class="n">layer_idx</span>
|
|
<span class="n">layer_idx</span> <span class="o">+=</span> <span class="n">config</span><span class="o">.</span><span class="n">layer_idx_offset</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">config</span> <span class="o">=</span> <span class="n">config</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mapping</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span>
|
|
|
|
<span class="k">if</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">use_input_layernorm_in_first_layer</span>
|
|
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">layer_idx</span> <span class="o">==</span> <span class="mi">0</span><span class="p">)</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">layer_idx</span> <span class="o">></span> <span class="mi">0</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">input_layernorm</span> <span class="o">=</span> <span class="n">RmsNorm</span><span class="p">(</span><span class="n">normalized_shape</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">norm_epsilon</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
|
|
<span class="n">layers_range</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">pp_layers</span><span class="p">(</span><span class="n">config</span><span class="o">.</span><span class="n">num_hidden_layers</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">local_layer_idx</span> <span class="o">=</span> <span class="n">layer_idx</span> <span class="o">-</span> <span class="n">layers_range</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">is_last_local_layer</span> <span class="o">=</span> <span class="n">layer_idx</span> <span class="o">==</span> <span class="n">layers_range</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">attention</span> <span class="o">=</span> <span class="n">Attention</span><span class="p">(</span>
|
|
<span class="n">local_layer_idx</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">local_layer_idx</span><span class="p">,</span>
|
|
<span class="n">hidden_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">attention_head_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">head_size</span><span class="p">,</span>
|
|
<span class="n">num_attention_heads</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">num_attention_heads</span><span class="p">,</span>
|
|
<span class="n">num_kv_heads</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">num_key_value_heads</span><span class="p">,</span>
|
|
<span class="n">max_position_embeddings</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">max_position_embeddings</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">attention_mask_type</span><span class="o">=</span><span class="n">AttentionMaskType</span><span class="o">.</span><span class="n">causal</span><span class="p">,</span>
|
|
<span class="n">bias</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">attn_bias</span><span class="p">,</span>
|
|
<span class="n">position_embedding_type</span><span class="o">=</span><span class="n">PositionEmbeddingType</span><span class="o">.</span><span class="n">rope_gpt_neox</span><span class="p">,</span>
|
|
<span class="n">rotary_embedding_base</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">rotary_base</span><span class="p">,</span>
|
|
<span class="n">rotary_embedding_scaling</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">rotary_scaling</span><span class="p">,</span>
|
|
<span class="n">tp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">tp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
|
|
<span class="n">tp_rank</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_rank</span><span class="p">,</span>
|
|
<span class="n">q_scaling</span><span class="o">=</span><span class="mf">1.0</span> <span class="o">/</span> <span class="n">config</span><span class="o">.</span><span class="n">attention_multiplier</span><span class="p">,</span>
|
|
<span class="n">quant_mode</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="p">,</span>
|
|
<span class="n">cp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">cp_group</span><span class="p">,</span>
|
|
<span class="n">cp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">cp_size</span><span class="p">,</span>
|
|
<span class="n">cp_rank</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">cp_rank</span><span class="p">)</span>
|
|
|
|
<span class="n">mlp_hidden_size</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span> <span class="o">*</span> <span class="mi">4</span> <span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">intermediate_size</span> <span class="ow">is</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">config</span><span class="o">.</span><span class="n">intermediate_size</span>
|
|
|
|
<span class="n">ClsMLP</span> <span class="o">=</span> <span class="n">GatedMLP</span>
|
|
<span class="n">mlp_kwargs</span> <span class="o">=</span> <span class="p">{}</span>
|
|
<span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">moe</span><span class="o">.</span><span class="n">has_moe</span><span class="p">():</span>
|
|
<span class="n">ClsMLP</span> <span class="o">=</span> <span class="n">MOE</span>
|
|
<span class="n">mlp_kwargs</span> <span class="o">=</span> <span class="p">{</span>
|
|
<span class="s2">"moe_config"</span><span class="p">:</span> <span class="n">config</span><span class="o">.</span><span class="n">moe</span><span class="p">,</span>
|
|
<span class="s2">"mapping"</span><span class="p">:</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="p">,</span>
|
|
<span class="p">}</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span> <span class="o">=</span> <span class="n">ClsMLP</span><span class="p">(</span><span class="n">hidden_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">ffn_hidden_size</span><span class="o">=</span><span class="n">mlp_hidden_size</span><span class="p">,</span>
|
|
<span class="n">hidden_act</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_act</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">bias</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mlp_bias</span><span class="p">,</span>
|
|
<span class="n">tp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">tp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
|
|
<span class="n">quant_mode</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">mlp_kwargs</span><span class="p">)</span>
|
|
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span> <span class="o">=</span> <span class="n">RmsNorm</span><span class="p">(</span><span class="n">normalized_shape</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">norm_epsilon</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
|
|
<span class="c1"># Residual MLP that applies on pre-attention input</span>
|
|
<span class="c1"># TODO: change to self.has_residual_mlp = self.config.residual_mlp after ModelOpt quantize config is updated</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">has_residual_mlp</span> <span class="o">=</span> <span class="kc">False</span>
|
|
<span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="p">,</span>
|
|
<span class="s2">"residual_mlp"</span><span class="p">)</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">residual_mlp</span> <span class="ow">is</span> <span class="kc">True</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">has_residual_mlp</span> <span class="o">=</span> <span class="kc">True</span>
|
|
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_residual_mlp</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">residual_layernorm</span> <span class="o">=</span> <span class="n">RmsNorm</span><span class="p">(</span>
|
|
<span class="n">normalized_shape</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">norm_epsilon</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
<span class="n">ClsMLP</span> <span class="o">=</span> <span class="n">GatedMLP</span> <span class="c1"># TODO: may use FusedGatedMLP to further speedup</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">residual_mlp</span> <span class="o">=</span> <span class="n">ClsMLP</span><span class="p">(</span>
|
|
<span class="n">hidden_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">ffn_hidden_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span>
|
|
<span class="n">hidden_size</span><span class="p">,</span> <span class="c1"># residual mlp uses hidden_size</span>
|
|
<span class="n">hidden_act</span><span class="o">=</span><span class="n">non_gated_version</span><span class="p">(</span>
|
|
<span class="n">config</span><span class="o">.</span><span class="n">hidden_act</span><span class="p">),</span> <span class="c1"># back to non-gated</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">bias</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mlp_bias</span><span class="p">,</span>
|
|
<span class="n">tp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">tp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
|
|
<span class="n">quant_mode</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="p">)</span>
|
|
|
|
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">attention_mask</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">use_cache</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
|
|
<span class="n">spec_decoding_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">kv_cache_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">attention_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">next_layer_input_layernorm_args</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
|
|
<span class="k">assert</span> <span class="ow">not</span> <span class="p">(</span>
|
|
<span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_residual_mlp</span>
|
|
<span class="p">),</span> <span class="s2">"Custom all reduce and residual mlp can't be enabled at the same time."</span>
|
|
<span class="k">assert</span> <span class="ow">not</span> <span class="p">(</span>
|
|
<span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span>
|
|
<span class="ow">and</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">user_buffer</span>
|
|
<span class="ow">and</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">pp_reduce_scatter</span>
|
|
<span class="p">),</span> <span class="s2">"User buffer reduce fusion enabled with PP reduce scatter is not supported now."</span>
|
|
<span class="k">assert</span> <span class="ow">not</span> <span class="p">(</span>
|
|
<span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span>
|
|
<span class="ow">and</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">norm_quant_fusion</span>
|
|
<span class="p">),</span> <span class="s2">"Reduce fusion and quant fusion can't be enabled at the same time."</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">(</span>
|
|
<span class="p">)</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">local_layer_idx</span> <span class="o">></span> <span class="mi">0</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
<span class="k">elif</span> <span class="n">default_net</span><span class="p">(</span>
|
|
<span class="p">)</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">norm_quant_fusion</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">local_layer_idx</span> <span class="o">></span> <span class="mi">0</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">residual</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
<span class="k">if</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">use_input_layernorm_in_first_layer</span>
|
|
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">layer_idx</span> <span class="o">==</span> <span class="mi">0</span><span class="p">)</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">layer_idx</span> <span class="o">></span> <span class="mi">0</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_layernorm</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
|
|
<span class="n">reduce_fusion_op</span> <span class="o">=</span> <span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">NONE</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">user_buffer</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_fp8_qdq</span><span class="p">():</span>
|
|
<span class="n">reduce_fusion_op</span> <span class="o">=</span> <span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">RESIDUAL_RMS_NORM_QUANT_FP8</span>
|
|
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_nvfp4</span><span class="p">():</span>
|
|
<span class="k">assert</span> <span class="n">default_net</span><span class="p">(</span>
|
|
<span class="p">)</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">gemm_plugin</span> <span class="o">==</span> <span class="s2">"nvfp4"</span><span class="p">,</span> <span class="s2">"UB with nvfp4 model must use nvfp4 gemm plugin"</span>
|
|
<span class="n">reduce_fusion_op</span> <span class="o">=</span> <span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">RESIDUAL_RMS_NORM_QUANT_NVFP4</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">assert</span> <span class="n">false</span><span class="p">,</span> <span class="s2">"UB must enabled with fp8 or nvfp4 model"</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">reduce_fusion_op</span> <span class="o">=</span> <span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">RESIDUAL_RMS_NORM</span>
|
|
|
|
<span class="n">reduce_fusion_scale</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span> <span class="ow">and</span> <span class="n">default_net</span><span class="p">(</span>
|
|
<span class="p">)</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">user_buffer</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">,</span> <span class="n">FusedGatedMLP</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_fp8_qdq</span><span class="p">():</span>
|
|
<span class="n">reduce_fusion_scale</span> <span class="o">=</span> <span class="n">constant</span><span class="p">(</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fused_fc</span><span class="o">.</span><span class="n">activation_scaling_factor</span><span class="o">.</span><span class="n">raw_value</span><span class="o">.</span>
|
|
<span class="n">copy</span><span class="p">())</span>
|
|
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_nvfp4</span><span class="p">():</span>
|
|
<span class="n">reduce_fusion_scale</span> <span class="o">=</span> <span class="n">constant</span><span class="p">(</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fused_fc</span><span class="o">.</span><span class="n">activation_global_scaling_factor</span><span class="o">.</span>
|
|
<span class="n">raw_value</span><span class="o">.</span><span class="n">copy</span><span class="p">())</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_fp8_qdq</span><span class="p">():</span>
|
|
<span class="n">reduce_fusion_scale</span> <span class="o">=</span> <span class="n">constant</span><span class="p">(</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fc</span><span class="o">.</span><span class="n">activation_scaling_factor</span><span class="o">.</span><span class="n">raw_value</span><span class="o">.</span><span class="n">copy</span><span class="p">())</span>
|
|
<span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_nvfp4</span><span class="p">():</span>
|
|
<span class="n">reduce_fusion_scale</span> <span class="o">=</span> <span class="n">constant</span><span class="p">(</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fc</span><span class="o">.</span><span class="n">activation_global_scaling_factor</span><span class="o">.</span><span class="n">raw_value</span><span class="o">.</span>
|
|
<span class="n">copy</span><span class="p">())</span>
|
|
<span class="n">attention_output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">attention</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">attention_mask</span><span class="o">=</span><span class="n">attention_mask</span><span class="p">,</span>
|
|
<span class="n">use_cache</span><span class="o">=</span><span class="n">use_cache</span><span class="p">,</span>
|
|
<span class="n">spec_decoding_params</span><span class="o">=</span><span class="n">spec_decoding_params</span><span class="p">,</span>
|
|
<span class="n">kv_cache_params</span><span class="o">=</span><span class="n">kv_cache_params</span><span class="p">,</span>
|
|
<span class="n">attention_params</span><span class="o">=</span><span class="n">attention_params</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">,</span>
|
|
<span class="n">all_reduce_params</span><span class="o">=</span><span class="n">AllReduceParams</span><span class="p">(</span>
|
|
<span class="n">fusion_op</span><span class="o">=</span><span class="n">reduce_fusion_op</span><span class="p">,</span>
|
|
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">,</span>
|
|
<span class="n">norm_weight</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="o">.</span><span class="n">weight</span><span class="o">.</span><span class="n">value</span><span class="p">,</span>
|
|
<span class="n">scale</span><span class="o">=</span><span class="n">reduce_fusion_scale</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="o">.</span><span class="n">eps</span><span class="p">))</span>
|
|
<span class="k">if</span> <span class="n">use_cache</span><span class="p">:</span>
|
|
<span class="n">attention_output</span><span class="p">,</span> <span class="n">presents</span> <span class="o">=</span> <span class="n">attention_output</span>
|
|
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_residual_mlp</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">residual</span> <span class="o">+</span> <span class="n">attention_output</span>
|
|
<span class="n">residual_attn</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
<span class="c1"># arctic layer w/ residual mlp</span>
|
|
|
|
<span class="c1"># residual mlp</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">residual_layernorm</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">residual_mlp</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
<span class="n">residual_mlp</span> <span class="o">=</span> <span class="n">residual_attn</span> <span class="o">+</span> <span class="n">hidden_states</span>
|
|
|
|
<span class="c1"># parallel moe</span>
|
|
<span class="c1"># parallel moe layers applies on PRE-ATTENTION input residual, therefore achieving pre-fetching and better parallelism</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="p">(</span><span class="n">residual</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">residual_mlp</span> <span class="o">+</span> <span class="n">hidden_states</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span> <span class="o">=</span> <span class="n">attention_output</span>
|
|
<span class="k">elif</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">norm_quant_fusion</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span><span class="p">,</span> <span class="n">act_per_block_scale</span> <span class="o">=</span> <span class="n">fused_layernorm</span><span class="p">(</span>
|
|
<span class="nb">input</span><span class="o">=</span><span class="n">attention_output</span><span class="p">,</span>
|
|
<span class="n">normalized_shape</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">,</span>
|
|
<span class="n">weight</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="o">.</span><span class="n">weight</span><span class="o">.</span><span class="n">value</span><span class="p">,</span>
|
|
<span class="n">scale</span><span class="o">=</span><span class="n">div</span><span class="p">(</span>
|
|
<span class="mi">1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fc</span><span class="o">.</span><span class="n">activation_global_scaling_factor</span><span class="o">.</span><span class="n">value</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="o">.</span><span class="n">fc</span><span class="o">.</span><span class="n">activation_global_scaling_factor</span><span class="o">.</span><span class="n">value</span> <span class="k">else</span>
|
|
<span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="o">.</span><span class="n">eps</span><span class="p">,</span>
|
|
<span class="n">p_dtype</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span> <span class="o">=</span> <span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">act_per_block_scale</span><span class="p">),</span> <span class="n">residual</span>
|
|
<span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">residual</span> <span class="o">+</span> <span class="n">attention_output</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">residual_multiplier</span>
|
|
<span class="n">residual</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">post_layernorm</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">next_layer_input_layernorm_args</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="c1">#this is middle layer</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">,</span>
|
|
<span class="n">all_reduce_params</span><span class="o">=</span><span class="n">AllReduceParams</span><span class="p">(</span>
|
|
<span class="n">fusion_op</span><span class="o">=</span><span class="n">reduce_fusion_op</span><span class="p">,</span>
|
|
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">,</span>
|
|
<span class="n">norm_weight</span><span class="o">=</span><span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
|
<span class="n">scale</span><span class="o">=</span><span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">2</span><span class="p">],</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">1</span><span class="p">]))</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">norm_quant_fusion</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span><span class="p">,</span> <span class="n">act_per_block_scale</span> <span class="o">=</span> <span class="n">fused_layernorm</span><span class="p">(</span>
|
|
<span class="nb">input</span><span class="o">=</span><span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">normalized_shape</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">,</span>
|
|
<span class="n">weight</span><span class="o">=</span><span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
|
<span class="n">scale</span><span class="o">=</span><span class="n">div</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">2</span><span class="p">])</span>
|
|
<span class="k">if</span> <span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">2</span><span class="p">]</span> <span class="k">else</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">next_layer_input_layernorm_args</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span>
|
|
<span class="n">p_dtype</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">act_per_block_scale</span><span class="p">),</span> <span class="n">residual</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">(</span>
|
|
<span class="p">)</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">pp_reduce_scatter</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_last_local_layer</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_last_pp_rank</span><span class="p">(</span>
|
|
<span class="p">):</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">,</span>
|
|
<span class="n">last_local_layer_residual</span><span class="o">=</span><span class="n">residual</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="p">(</span><span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">reduce_fusion</span>
|
|
<span class="ow">and</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">user_buffer</span><span class="p">):</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">residual</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">,</span>
|
|
<span class="n">all_reduce_params</span><span class="o">=</span><span class="n">AllReduceParams</span><span class="p">(</span>
|
|
<span class="n">fusion_op</span><span class="o">=</span><span class="n">AllReduceFusionOp</span><span class="o">.</span><span class="n">LAST_PROCESS_FOR_UB</span><span class="p">,</span>
|
|
<span class="n">residual</span><span class="o">=</span><span class="n">residual</span><span class="p">))</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mlp</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">lora_layer_params</span><span class="o">=</span><span class="n">lora_layer_params</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">residual</span> <span class="o">+</span> <span class="n">hidden_states</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">residual_multiplier</span>
|
|
<span class="k">if</span> <span class="n">use_cache</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span> <span class="n">presents</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="n">hidden_states</span>
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAModel">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAModel">[docs]</a>
|
|
<span class="k">class</span> <span class="nc">LLaMAModel</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
|
|
|
|
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config</span><span class="p">:</span> <span class="n">LLaMAConfig</span><span class="p">)</span> <span class="o">-></span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
|
|
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mapping</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">vocab_size</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">vocab_size</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">has_partial_lora_mask</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">has_partial_lora_mask</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">hidden_size</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_first_pp_rank</span><span class="p">():</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">vocab_embedding</span> <span class="o">=</span> <span class="n">Embedding</span><span class="p">(</span><span class="n">config</span><span class="o">.</span><span class="n">vocab_size</span><span class="p">,</span>
|
|
<span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">embedding_multiplier</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">embedding_multiplier</span>
|
|
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">layers</span> <span class="o">=</span> <span class="n">DecoderLayerList</span><span class="p">(</span><span class="n">LLaMADecoderLayer</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
|
|
<span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">fc_after_embed</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">fc</span> <span class="o">=</span> <span class="n">ColumnLinear</span><span class="p">(</span><span class="mi">2</span> <span class="o">*</span> <span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">tp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">tp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
|
|
<span class="n">gather_output</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
|
|
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_last_pp_rank</span><span class="p">():</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">ln_f</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">use_last_layernorm</span><span class="p">:</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">ln_f</span> <span class="o">=</span> <span class="n">RmsNorm</span><span class="p">(</span><span class="n">normalized_shape</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">eps</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">norm_epsilon</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
|
|
|
|
<div class="viewcode-block" id="LLaMAModel.forward">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAModel.forward">[docs]</a>
|
|
<span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
|
|
<span class="n">input_ids</span><span class="p">,</span>
|
|
<span class="n">position_ids</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">use_cache</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
|
|
<span class="n">attention_mask</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">spec_decoding_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">kv_cache_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">attention_params</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">hidden_states</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">hidden_states_for_embed</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">prompt_embedding_table</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">prompt_tasks</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">prompt_vocab_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Tensor</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">lora_params</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
|
|
|
|
<span class="n">ptuning_args</span> <span class="o">=</span> <span class="p">[</span>
|
|
<span class="n">prompt_embedding_table</span><span class="p">,</span> <span class="n">prompt_tasks</span><span class="p">,</span> <span class="n">prompt_vocab_size</span>
|
|
<span class="p">]</span> <span class="k">if</span> <span class="n">prompt_embedding_table</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="p">[]</span>
|
|
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_first_pp_rank</span><span class="p">():</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">vocab_embedding</span><span class="p">(</span><span class="n">input_ids</span><span class="p">,</span> <span class="o">*</span><span class="n">ptuning_args</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">embedding_multiplier</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">recv</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">prev_pp_rank</span><span class="p">())</span>
|
|
<span class="k">if</span> <span class="n">default_net</span><span class="p">()</span><span class="o">.</span><span class="n">plugin_config</span><span class="o">.</span><span class="n">pp_reduce_scatter</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">allgather</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">gather_dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
|
|
<span class="c1"># reshape to (-1, hidden_size)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">hidden_states</span><span class="o">.</span><span class="n">view</span><span class="p">(</span>
|
|
<span class="n">concat</span><span class="p">([</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">]))</span>
|
|
|
|
<span class="k">if</span> <span class="n">hidden_states_for_embed</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">concat</span><span class="p">([</span><span class="n">hidden_states</span><span class="p">,</span> <span class="n">hidden_states_for_embed</span><span class="p">],</span>
|
|
<span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
|
|
<span class="k">if</span> <span class="n">lora_params</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_partial_lora_mask</span><span class="p">:</span>
|
|
<span class="n">partial_lora_mask</span> <span class="o">=</span> <span class="n">input_ids</span> <span class="o">></span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">vocab_size</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span>
|
|
<span class="n">lora_params</span><span class="o">.</span><span class="n">partial_lora_mask</span> <span class="o">=</span> <span class="n">unsqueeze</span><span class="p">(</span><span class="n">partial_lora_mask</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>
|
|
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">layers</span><span class="o">.</span><span class="n">forward</span><span class="p">(</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span>
|
|
<span class="n">use_cache</span><span class="o">=</span><span class="n">use_cache</span><span class="p">,</span>
|
|
<span class="n">attention_mask</span><span class="o">=</span><span class="n">attention_mask</span><span class="p">,</span>
|
|
<span class="n">kv_cache_params</span><span class="o">=</span><span class="n">kv_cache_params</span><span class="p">,</span>
|
|
<span class="n">attention_params</span><span class="o">=</span><span class="n">attention_params</span><span class="p">,</span>
|
|
<span class="n">lora_params</span><span class="o">=</span><span class="n">lora_params</span><span class="p">,</span>
|
|
<span class="n">spec_decoding_params</span><span class="o">=</span><span class="n">spec_decoding_params</span><span class="p">)</span>
|
|
|
|
<span class="k">if</span> <span class="n">use_cache</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span><span class="p">,</span> <span class="n">presents</span> <span class="o">=</span> <span class="n">hidden_states</span>
|
|
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_last_pp_rank</span><span class="p">():</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">ln_f</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">ln_f</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hidden_states</span> <span class="o">=</span> <span class="n">send</span><span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">next_pp_rank</span><span class="p">())</span>
|
|
|
|
<span class="k">if</span> <span class="n">use_cache</span><span class="p">:</span>
|
|
<span class="k">return</span> <span class="p">(</span><span class="n">hidden_states</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">presents</span><span class="p">))</span>
|
|
<span class="k">return</span> <span class="n">hidden_states</span></div>
|
|
</div>
|
|
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM">[docs]</a>
|
|
<span class="k">class</span> <span class="nc">LLaMAForCausalLM</span><span class="p">(</span><span class="n">DecoderModelForCausalLM</span><span class="p">):</span>
|
|
<span class="n">config_class</span> <span class="o">=</span> <span class="n">LLaMAConfig</span>
|
|
|
|
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config</span><span class="p">:</span> <span class="n">LLaMAConfig</span><span class="p">):</span>
|
|
<span class="n">transformer</span> <span class="o">=</span> <span class="n">LLaMAModel</span><span class="p">(</span><span class="n">config</span><span class="p">)</span>
|
|
<span class="n">vocab_size_padded</span> <span class="o">=</span> <span class="n">pad_vocab_size</span><span class="p">(</span><span class="n">config</span><span class="o">.</span><span class="n">vocab_size</span><span class="p">,</span>
|
|
<span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">is_last_pp_rank</span><span class="p">():</span>
|
|
<span class="n">lm_head</span> <span class="o">=</span> <span class="n">ColumnLinear</span><span class="p">(</span><span class="n">config</span><span class="o">.</span><span class="n">hidden_size</span><span class="p">,</span>
|
|
<span class="n">vocab_size_padded</span><span class="p">,</span>
|
|
<span class="n">bias</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">tp_group</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_group</span><span class="p">,</span>
|
|
<span class="n">tp_size</span><span class="o">=</span><span class="n">config</span><span class="o">.</span><span class="n">mapping</span><span class="o">.</span><span class="n">tp_size</span><span class="p">,</span>
|
|
<span class="n">gather_output</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">lm_head</span> <span class="o">=</span> <span class="kc">None</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">quant_mode</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span>
|
|
<span class="bp">self</span><span class="o">.</span><span class="n">mapping</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">mapping</span>
|
|
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">config</span><span class="p">,</span> <span class="n">transformer</span><span class="p">,</span> <span class="n">lm_head</span><span class="p">)</span>
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM.from_hugging_face">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM.from_hugging_face">[docs]</a>
|
|
<span class="nd">@classmethod</span>
|
|
<span class="k">def</span> <span class="nf">from_hugging_face</span><span class="p">(</span>
|
|
<span class="bp">cls</span><span class="p">,</span>
|
|
<span class="n">hf_model_or_dir</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="s1">'transformers.PreTrainedModel'</span><span class="p">],</span>
|
|
<span class="n">dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">'auto'</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Mapping</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">QuantConfig</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
|
|
<span class="w"> </span><span class="sd">''' Create a LLaMAForCausalLM object from give parameters</span>
|
|
<span class="sd"> '''</span>
|
|
<span class="kn">import</span> <span class="nn">transformers</span>
|
|
|
|
<span class="n">load_by_shard</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s1">'load_by_shard'</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
|
|
<span class="n">load_model_on_cpu</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s1">'load_model_on_cpu'</span><span class="p">,</span> <span class="kc">False</span><span class="p">)</span>
|
|
<span class="n">quant_ckpt_path</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s1">'quant_ckpt_path'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
|
|
<span class="n">use_autoawq</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s1">'use_autoawq'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">"TRTLLM_DISABLE_UNIFIED_CONVERTER"</span>
|
|
<span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span>
|
|
<span class="n">hf_model_or_dir</span><span class="p">,</span> <span class="n">transformers</span><span class="o">.</span><span class="n">PreTrainedModel</span><span class="p">):</span>
|
|
<span class="k">if</span> <span class="s2">"vila"</span> <span class="ow">in</span> <span class="n">hf_model_or_dir</span> <span class="ow">or</span> <span class="s2">"llava"</span> <span class="ow">in</span> <span class="n">hf_model_or_dir</span><span class="p">:</span>
|
|
<span class="n">hf_model_or_dir</span> <span class="o">=</span> <span class="n">load_hf_llama</span><span class="p">(</span><span class="n">hf_model_or_dir</span><span class="p">,</span>
|
|
<span class="n">load_model_on_cpu</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="ow">not</span> <span class="n">load_by_shard</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">has_safetensors</span><span class="p">(</span>
|
|
<span class="n">hf_model_or_dir</span>
|
|
<span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_any_quant</span><span class="p">():</span>
|
|
<span class="n">hf_model_or_dir</span> <span class="o">=</span> <span class="n">load_hf_llama</span><span class="p">(</span><span class="n">hf_model_or_dir</span><span class="p">,</span>
|
|
<span class="n">load_model_on_cpu</span><span class="p">)</span>
|
|
|
|
<span class="k">assert</span> <span class="n">hf_model_or_dir</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
|
|
<span class="n">use_preloading</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">hf_model_or_dir</span><span class="p">,</span>
|
|
<span class="n">transformers</span><span class="o">.</span><span class="n">PreTrainedModel</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">use_preloading</span><span class="p">:</span>
|
|
<span class="n">hf_model</span> <span class="o">=</span> <span class="n">hf_model_or_dir</span>
|
|
<span class="n">hf_config_or_dir</span> <span class="o">=</span> <span class="n">hf_model</span><span class="o">.</span><span class="n">config</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hf_model_dir</span> <span class="o">=</span> <span class="n">hf_model_or_dir</span>
|
|
<span class="n">hf_config_or_dir</span> <span class="o">=</span> <span class="n">hf_model_or_dir</span>
|
|
|
|
<span class="n">config</span> <span class="o">=</span> <span class="n">LLaMAConfig</span><span class="o">.</span><span class="n">from_hugging_face</span><span class="p">(</span><span class="n">hf_config_or_dir</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="o">=</span><span class="n">mapping</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="n">config</span><span class="o">.</span><span class="n">remove_duplicated_kv_heads</span><span class="p">:</span>
|
|
<span class="n">config</span><span class="o">.</span><span class="n">num_key_value_heads</span> <span class="o">=</span> <span class="n">config</span><span class="o">.</span><span class="n">num_key_value_heads</span> <span class="o">//</span> <span class="mi">2</span>
|
|
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">"TRTLLM_DISABLE_UNIFIED_CONVERTER"</span><span class="p">)</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="n">custom_dict</span> <span class="o">=</span> <span class="p">{}</span>
|
|
<span class="n">model_name</span> <span class="o">=</span> <span class="n">hf_model</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">model_type</span> <span class="k">if</span> <span class="n">use_preloading</span> <span class="k">else</span> <span class="n">hf_model_or_dir</span>
|
|
<span class="k">if</span> <span class="s2">"llava"</span> <span class="ow">in</span> <span class="n">model_name</span><span class="p">:</span>
|
|
<span class="n">custom_dict</span> <span class="o">=</span> <span class="p">{</span>
|
|
<span class="s2">"transformer"</span><span class="p">:</span> <span class="s2">"language_model.model"</span><span class="p">,</span>
|
|
<span class="s2">"lm_head"</span><span class="p">:</span> <span class="s2">"language_model.lm_head"</span>
|
|
<span class="p">}</span>
|
|
<span class="k">elif</span> <span class="s2">"vila"</span> <span class="ow">in</span> <span class="n">model_name</span><span class="p">:</span>
|
|
<span class="n">hf_model_dir</span> <span class="o">+=</span> <span class="s2">"/llm"</span>
|
|
<span class="k">elif</span> <span class="s2">"exaone"</span> <span class="ow">in</span> <span class="n">model_name</span><span class="p">:</span>
|
|
<span class="n">custom_dict</span> <span class="o">=</span> <span class="p">{</span>
|
|
<span class="s2">"transformer"</span><span class="p">:</span> <span class="s2">"transformer"</span><span class="p">,</span>
|
|
<span class="s2">"layers"</span><span class="p">:</span> <span class="s2">"h"</span><span class="p">,</span>
|
|
<span class="s2">"vocab_embedding"</span><span class="p">:</span> <span class="s2">"wte"</span><span class="p">,</span>
|
|
<span class="s2">"lm_head"</span><span class="p">:</span> <span class="s2">"lm_head"</span><span class="p">,</span>
|
|
<span class="s2">"ln_f"</span><span class="p">:</span> <span class="s2">"ln_f"</span><span class="p">,</span>
|
|
<span class="s2">"attention"</span><span class="p">:</span> <span class="s2">"attn.attention"</span><span class="p">,</span>
|
|
<span class="s2">"dense"</span><span class="p">:</span> <span class="s2">"out_proj"</span><span class="p">,</span>
|
|
<span class="s2">"gate"</span><span class="p">:</span> <span class="s2">"c_fc_1"</span><span class="p">,</span>
|
|
<span class="s2">"proj"</span><span class="p">:</span> <span class="s2">"c_proj"</span><span class="p">,</span>
|
|
<span class="s2">"fc"</span><span class="p">:</span> <span class="s2">"c_fc_0"</span><span class="p">,</span>
|
|
<span class="s2">"input_layernorm"</span><span class="p">:</span> <span class="s2">"ln_1"</span><span class="p">,</span>
|
|
<span class="s2">"post_layernorm"</span><span class="p">:</span> <span class="s2">"ln_2"</span><span class="p">,</span>
|
|
<span class="p">}</span>
|
|
<span class="k">elif</span> <span class="n">config</span><span class="o">.</span><span class="n">tie_word_embeddings</span><span class="p">:</span>
|
|
<span class="n">custom_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">"lm_head"</span><span class="p">:</span> <span class="s2">"model.embed_tokens"</span><span class="p">}</span>
|
|
|
|
<span class="k">if</span> <span class="n">quant_ckpt_path</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="n">hf_model_dir</span> <span class="o">=</span> <span class="n">quant_ckpt_path</span>
|
|
<span class="n">arg_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">"use_autoawq"</span><span class="p">:</span> <span class="kc">True</span><span class="p">}</span> <span class="k">if</span> <span class="n">use_autoawq</span> <span class="k">else</span> <span class="p">{}</span>
|
|
|
|
<span class="n">loader</span> <span class="o">=</span> <span class="n">ModelWeightsLoader</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span> <span class="n">custom_dict</span><span class="p">)</span>
|
|
<span class="n">model</span> <span class="o">=</span> <span class="bp">cls</span><span class="p">(</span><span class="n">config</span><span class="p">)</span>
|
|
<span class="n">loader</span><span class="o">.</span><span class="n">generate_tllm_weights</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">arg_dict</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">use_preloading</span><span class="p">:</span>
|
|
<span class="k">assert</span> <span class="ow">not</span> <span class="n">load_by_shard</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_hf_model</span><span class="p">(</span><span class="n">hf_model</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">load_by_shard</span><span class="p">:</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_hf_by_shard</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">has_safetensors</span><span class="p">(</span>
|
|
<span class="n">hf_model_dir</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">has_any_quant</span><span class="p">():</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_hf_safetensors</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">quant_ckpt_path</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
|
|
<span class="k">if</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">is_int4_weight_only</span><span class="p">():</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_gptq</span><span class="p">(</span><span class="n">quant_ckpt_path</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">is_qserve_w4a8</span><span class="p">():</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_deepcompressor</span><span class="p">(</span>
|
|
<span class="n">quant_ckpt_path</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
|
|
<span class="s2">"quant_ckpt_path should be specified only for GPTQ or QServe"</span>
|
|
<span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="n">hf_model</span> <span class="o">=</span> <span class="n">load_hf_llama</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span> <span class="n">load_model_on_cpu</span><span class="p">)</span>
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_hf_model</span><span class="p">(</span><span class="n">hf_model</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
<span class="n">model</span> <span class="o">=</span> <span class="bp">cls</span><span class="p">(</span><span class="n">config</span><span class="p">)</span>
|
|
<span class="n">model</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">weights</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="n">model</span></div>
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM.default_plugin_config">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM.default_plugin_config">[docs]</a>
|
|
<span class="k">def</span> <span class="nf">default_plugin_config</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
|
|
<span class="n">plugin_config</span> <span class="o">=</span> <span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">default_plugin_config</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
|
|
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_mode</span><span class="o">.</span><span class="n">is_int4_weight_only_per_group</span><span class="p">():</span>
|
|
<span class="n">plugin_config</span><span class="o">.</span><span class="n">weight_only_groupwise_quant_matmul_plugin</span> <span class="o">=</span> <span class="s1">'auto'</span>
|
|
<span class="k">return</span> <span class="n">plugin_config</span></div>
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM.from_meta_ckpt">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM.from_meta_ckpt">[docs]</a>
|
|
<span class="nd">@classmethod</span>
|
|
<span class="k">def</span> <span class="nf">from_meta_ckpt</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span>
|
|
<span class="n">meta_ckpt_dir</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">'auto'</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Mapping</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">QuantConfig</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
|
|
<span class="n">config</span> <span class="o">=</span> <span class="n">LLaMAConfig</span><span class="o">.</span><span class="n">from_meta_ckpt</span><span class="p">(</span><span class="n">meta_ckpt_dir</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="o">=</span><span class="n">mapping</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
|
|
|
|
<span class="n">weights</span> <span class="o">=</span> <span class="n">load_weights_from_meta_ckpt</span><span class="p">(</span><span class="n">meta_ckpt_dir</span><span class="p">,</span> <span class="n">config</span><span class="p">)</span>
|
|
|
|
<span class="n">model</span> <span class="o">=</span> <span class="bp">cls</span><span class="p">(</span><span class="n">config</span><span class="p">)</span>
|
|
<span class="n">model</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">weights</span><span class="p">)</span>
|
|
<span class="k">return</span> <span class="n">model</span></div>
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM.quantize">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM.quantize">[docs]</a>
|
|
<span class="nd">@classmethod</span>
|
|
<span class="k">def</span> <span class="nf">quantize</span><span class="p">(</span>
|
|
<span class="bp">cls</span><span class="p">,</span>
|
|
<span class="n">hf_model_dir</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
|
|
<span class="n">output_dir</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">'auto'</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Mapping</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">QuantConfig</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
|
|
<span class="o">*</span><span class="p">,</span>
|
|
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">'cuda'</span><span class="p">,</span>
|
|
<span class="n">calib_dataset</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">'cnn_dailymail'</span><span class="p">,</span>
|
|
<span class="n">calib_batches</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">512</span><span class="p">,</span>
|
|
<span class="n">calib_batch_size</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">1</span><span class="p">,</span>
|
|
<span class="n">calib_max_seq_length</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">512</span><span class="p">,</span>
|
|
<span class="n">random_seed</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">1234</span><span class="p">,</span>
|
|
<span class="n">tokenizer_max_seq_length</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">2048</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">,</span>
|
|
<span class="p">):</span>
|
|
<span class="k">if</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">requires_modelopt_quantization</span><span class="p">:</span>
|
|
<span class="c1"># modelopt quantization flow</span>
|
|
<span class="nb">super</span><span class="p">()</span><span class="o">.</span><span class="n">quantize</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span>
|
|
<span class="n">output_dir</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="o">=</span><span class="n">mapping</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config</span><span class="p">,</span>
|
|
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
|
|
<span class="n">calib_dataset</span><span class="o">=</span><span class="n">calib_dataset</span><span class="p">,</span>
|
|
<span class="n">calib_batches</span><span class="o">=</span><span class="n">calib_batches</span><span class="p">,</span>
|
|
<span class="n">calib_batch_size</span><span class="o">=</span><span class="n">calib_batch_size</span><span class="p">,</span>
|
|
<span class="n">calib_max_seq_length</span><span class="o">=</span><span class="n">calib_max_seq_length</span><span class="p">,</span>
|
|
<span class="n">random_seed</span><span class="o">=</span><span class="n">random_seed</span><span class="p">,</span>
|
|
<span class="n">tokenizer_max_seq_length</span><span class="o">=</span><span class="n">tokenizer_max_seq_length</span><span class="p">)</span>
|
|
<span class="k">elif</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">requires_calibration</span><span class="p">:</span>
|
|
<span class="c1"># non-modelopt quantization flow</span>
|
|
<span class="kn">from</span> <span class="nn">.</span> <span class="kn">import</span> <span class="n">convert</span>
|
|
|
|
<span class="n">config</span> <span class="o">=</span> <span class="n">LLaMAConfig</span><span class="o">.</span><span class="n">from_hugging_face</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span>
|
|
<span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span>
|
|
<span class="n">mapping</span><span class="o">=</span><span class="n">mapping</span><span class="p">,</span>
|
|
<span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config</span><span class="p">,</span>
|
|
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
|
|
<span class="n">trust_remote_code</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s2">"trust_remote_code"</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
|
|
|
|
<span class="n">convert</span><span class="o">.</span><span class="n">quantize</span><span class="p">(</span><span class="n">hf_model_dir</span><span class="p">,</span>
|
|
<span class="n">output_dir</span><span class="p">,</span>
|
|
<span class="n">config</span><span class="o">=</span><span class="n">config</span><span class="p">,</span>
|
|
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
|
|
<span class="n">calib_dataset</span><span class="o">=</span><span class="n">calib_dataset</span><span class="p">,</span>
|
|
<span class="n">trust_remote_code</span><span class="o">=</span><span class="n">trust_remote_code</span><span class="p">,</span>
|
|
<span class="n">calib_batches</span><span class="o">=</span><span class="n">calib_batches</span><span class="p">,</span>
|
|
<span class="n">calib_max_seq_length</span><span class="o">=</span><span class="n">calib_max_seq_length</span><span class="p">)</span>
|
|
<span class="k">else</span><span class="p">:</span>
|
|
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
|
|
<span class="sa">f</span><span class="s2">"The quant_config (</span><span class="si">{</span><span class="n">quant_config</span><span class="si">}</span><span class="s2">) does not require calibration, try </span><span class="si">{</span><span class="bp">cls</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.from_hugging_face instead."</span>
|
|
<span class="p">)</span></div>
|
|
|
|
|
|
<div class="viewcode-block" id="LLaMAForCausalLM.use_lora">
|
|
<a class="viewcode-back" href="../../../../python-api/tensorrt_llm.models.html#tensorrt_llm.models.LLaMAForCausalLM.use_lora">[docs]</a>
|
|
<span class="k">def</span> <span class="nf">use_lora</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lora_config</span><span class="p">:</span> <span class="n">LoraConfig</span><span class="p">):</span>
|
|
<span class="n">use_lora</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lora_config</span><span class="p">)</span></div>
|
|
</div>
|
|
|
|
</pre></div>
|
|
|
|
</div>
|
|
</div>
|
|
<footer>
|
|
|
|
<hr/>
|
|
|
|
<div role="contentinfo">
|
|
<jinja2.runtime.BlockReference object at 0x7f9b7ee36a20>
|
|
|
|
<div class="footer">
|
|
<p>
|
|
Copyright © 2024 NVIDIA Corporation
|
|
</p>
|
|
<p>
|
|
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-policy/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Privacy Policy</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/privacy-center/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Manage My Privacy</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/preferences/start/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Do Not Sell or Share My Data</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/terms-of-service/" target="_blank"
|
|
rel="noopener" data-cms-ai="0">Terms of Service</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/accessibility/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Accessibility</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/about-nvidia/company-policies/" target="_blank"
|
|
rel="noopener" data-cms-ai="0">Corporate Policies</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/product-security/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Product Security</a> |
|
|
<a class="Link" href="https://www.nvidia.com/en-us/contact/" target="_blank" rel="noopener"
|
|
data-cms-ai="0">Contact</a>
|
|
</p>
|
|
</div>
|
|
|
|
|
|
</div>
|
|
|
|
|
|
|
|
</footer>
|
|
</div>
|
|
</div>
|
|
</section>
|
|
</div>
|
|
<script>
|
|
jQuery(function () {
|
|
SphinxRtdTheme.Navigation.enable(true);
|
|
});
|
|
</script>
|
|
|
|
</body>
|
|
</html> |