mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
update docs for 0.20.0rc2 Signed-off-by: Shixiaowei02 <39303645+Shixiaowei02@users.noreply.github.com>
741 lines
43 KiB
HTML
741 lines
43 KiB
HTML
|
|
|
|
<!DOCTYPE html>
|
|
|
|
|
|
<html lang="en" data-content_root="../" >
|
|
|
|
<head>
|
|
<meta charset="utf-8" />
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="viewport" content="width=device-width, initial-scale=1" />
|
|
|
|
<title>KV cache reuse — TensorRT-LLM</title>
|
|
|
|
|
|
|
|
<script data-cfasync="false">
|
|
document.documentElement.dataset.mode = localStorage.getItem("mode") || "";
|
|
document.documentElement.dataset.theme = localStorage.getItem("theme") || "";
|
|
</script>
|
|
<!--
|
|
this give us a css class that will be invisible only if js is disabled
|
|
-->
|
|
<noscript>
|
|
<style>
|
|
.pst-js-only { display: none !important; }
|
|
|
|
</style>
|
|
</noscript>
|
|
|
|
<!-- Loaded before other Sphinx assets -->
|
|
<link href="../_static/styles/theme.css?digest=8878045cc6db502f8baf" rel="stylesheet" />
|
|
<link href="../_static/styles/pydata-sphinx-theme.css?digest=8878045cc6db502f8baf" rel="stylesheet" />
|
|
|
|
<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=8f2a1f02" />
|
|
<link rel="stylesheet" type="text/css" href="../_static/styles/nvidia-sphinx-theme.css?v=df3ac72c" />
|
|
<link rel="stylesheet" type="text/css" href="../_static/copybutton.css?v=76b2166b" />
|
|
<link rel="stylesheet" type="text/css" href="../_static/autodoc_pydantic.css" />
|
|
|
|
<!-- So that users can add custom icons -->
|
|
<script src="../_static/scripts/fontawesome.js?digest=8878045cc6db502f8baf"></script>
|
|
<!-- Pre-loaded scripts that we'll load fully later -->
|
|
<link rel="preload" as="script" href="../_static/scripts/bootstrap.js?digest=8878045cc6db502f8baf" />
|
|
<link rel="preload" as="script" href="../_static/scripts/pydata-sphinx-theme.js?digest=8878045cc6db502f8baf" />
|
|
|
|
<script src="../_static/documentation_options.js?v=5929fcd5"></script>
|
|
<script src="../_static/doctools.js?v=9a2dae69"></script>
|
|
<script src="../_static/sphinx_highlight.js?v=dc90522c"></script>
|
|
<script src="../_static/clipboard.min.js?v=a7894cd8"></script>
|
|
<script src="../_static/copybutton.js?v=65e89d2a"></script>
|
|
<script>DOCUMENTATION_OPTIONS.pagename = 'advanced/kv-cache-reuse';</script>
|
|
<script>
|
|
DOCUMENTATION_OPTIONS.theme_version = '0.16.1';
|
|
DOCUMENTATION_OPTIONS.theme_switcher_json_url = './_static/switcher.json';
|
|
DOCUMENTATION_OPTIONS.theme_switcher_version_match = '0.20.0rc2';
|
|
DOCUMENTATION_OPTIONS.show_version_warning_banner =
|
|
false;
|
|
</script>
|
|
<link rel="icon" href="../_static/favicon.png"/>
|
|
<link rel="index" title="Index" href="../genindex.html" />
|
|
<link rel="search" title="Search" href="../search.html" />
|
|
<link rel="next" title="Speculative Sampling" href="speculative-decoding.html" />
|
|
<link rel="prev" title="Expert Parallelism in TensorRT-LLM" href="expert-parallelism.html" />
|
|
|
|
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
|
<meta name="docsearch:language" content="en"/>
|
|
<meta name="docsearch:version" content="0.20.0rc2" />
|
|
|
|
|
|
</head>
|
|
|
|
|
|
<body data-bs-spy="scroll" data-bs-target=".bd-toc-nav" data-offset="180" data-bs-root-margin="0px 0px -60%" data-default-mode="">
|
|
|
|
|
|
|
|
<div id="pst-skip-link" class="skip-link d-print-none"><a href="#main-content">Skip to main content</a></div>
|
|
|
|
<div id="pst-scroll-pixel-helper"></div>
|
|
|
|
<button type="button" class="btn rounded-pill" id="pst-back-to-top">
|
|
<i class="fa-solid fa-arrow-up"></i>Back to top</button>
|
|
|
|
|
|
<dialog id="pst-search-dialog">
|
|
|
|
<form class="bd-search d-flex align-items-center"
|
|
action="../search.html"
|
|
method="get">
|
|
<i class="fa-solid fa-magnifying-glass"></i>
|
|
<input type="search"
|
|
class="form-control"
|
|
name="q"
|
|
placeholder="Search the docs ..."
|
|
aria-label="Search the docs ..."
|
|
autocomplete="off"
|
|
autocorrect="off"
|
|
autocapitalize="off"
|
|
spellcheck="false"/>
|
|
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd>K</kbd></span>
|
|
</form>
|
|
</dialog>
|
|
|
|
<div class="pst-async-banner-revealer d-none">
|
|
<aside id="bd-header-version-warning" class="d-none d-print-none" aria-label="Version warning"></aside>
|
|
</div>
|
|
|
|
|
|
<header class="bd-header navbar navbar-expand-lg bd-navbar d-print-none">
|
|
<div class="bd-header__inner bd-page-width">
|
|
<button class="pst-navbar-icon sidebar-toggle primary-toggle" aria-label="Site navigation">
|
|
<span class="fa-solid fa-bars"></span>
|
|
</button>
|
|
|
|
|
|
<div class="col-lg-3 navbar-header-items__start">
|
|
|
|
<div class="navbar-item">
|
|
|
|
|
|
|
|
|
|
|
|
<a class="navbar-brand logo" href="../index.html">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<img src="../_static/nvidia-logo-horiz-rgb-blk-for-screen.svg" class="logo__image only-light" alt="TensorRT-LLM - Home"/>
|
|
<img src="../_static/nvidia-logo-horiz-rgb-wht-for-screen.svg" class="logo__image only-dark pst-js-only" alt="TensorRT-LLM - Home"/>
|
|
|
|
|
|
<p class="title logo__title">TensorRT-LLM</p>
|
|
|
|
</a></div>
|
|
|
|
</div>
|
|
|
|
<div class="col-lg-9 navbar-header-items">
|
|
|
|
<div class="me-auto navbar-header-items__center">
|
|
|
|
<div class="navbar-item">
|
|
|
|
|
|
<div class="version-switcher__container dropdown pst-js-only">
|
|
<button id="pst-version-switcher-button-2"
|
|
type="button"
|
|
class="version-switcher__button btn btn-sm dropdown-toggle"
|
|
data-bs-toggle="dropdown"
|
|
aria-haspopup="listbox"
|
|
aria-controls="pst-version-switcher-list-2"
|
|
aria-label="Version switcher list"
|
|
>
|
|
Choose version <!-- this text may get changed later by javascript -->
|
|
<span class="caret"></span>
|
|
</button>
|
|
<div id="pst-version-switcher-list-2"
|
|
class="version-switcher__menu dropdown-menu list-group-flush py-0"
|
|
role="listbox" aria-labelledby="pst-version-switcher-button-2">
|
|
<!-- dropdown will be populated by javascript on page load -->
|
|
</div>
|
|
</div></div>
|
|
|
|
</div>
|
|
|
|
|
|
<div class="navbar-header-items__end">
|
|
|
|
<div class="navbar-item navbar-persistent--container">
|
|
|
|
|
|
<button class="btn search-button-field search-button__button pst-js-only" title="Search" aria-label="Search" data-bs-placement="bottom" data-bs-toggle="tooltip">
|
|
<i class="fa-solid fa-magnifying-glass"></i>
|
|
<span class="search-button__default-text">Search</span>
|
|
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd class="kbd-shortcut__modifier">K</kbd></span>
|
|
</button>
|
|
</div>
|
|
|
|
|
|
<div class="navbar-item">
|
|
|
|
<button class="btn btn-sm nav-link pst-navbar-icon theme-switch-button pst-js-only" aria-label="Color mode" data-bs-title="Color mode" data-bs-placement="bottom" data-bs-toggle="tooltip">
|
|
<i class="theme-switch fa-solid fa-sun fa-lg" data-mode="light" title="Light"></i>
|
|
<i class="theme-switch fa-solid fa-moon fa-lg" data-mode="dark" title="Dark"></i>
|
|
<i class="theme-switch fa-solid fa-circle-half-stroke fa-lg" data-mode="auto" title="System Settings"></i>
|
|
</button></div>
|
|
|
|
</div>
|
|
|
|
</div>
|
|
|
|
|
|
<div class="navbar-persistent--mobile">
|
|
|
|
<button class="btn search-button-field search-button__button pst-js-only" title="Search" aria-label="Search" data-bs-placement="bottom" data-bs-toggle="tooltip">
|
|
<i class="fa-solid fa-magnifying-glass"></i>
|
|
<span class="search-button__default-text">Search</span>
|
|
<span class="search-button__kbd-shortcut"><kbd class="kbd-shortcut__modifier">Ctrl</kbd>+<kbd class="kbd-shortcut__modifier">K</kbd></span>
|
|
</button>
|
|
</div>
|
|
|
|
|
|
|
|
<button class="pst-navbar-icon sidebar-toggle secondary-toggle" aria-label="On this page">
|
|
<span class="fa-solid fa-outdent"></span>
|
|
</button>
|
|
|
|
</div>
|
|
|
|
</header>
|
|
|
|
|
|
<div class="bd-container">
|
|
<div class="bd-container__inner bd-page-width">
|
|
|
|
|
|
|
|
<dialog id="pst-primary-sidebar-modal"></dialog>
|
|
<div id="pst-primary-sidebar" class="bd-sidebar-primary bd-sidebar">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<a class="navbar-brand logo" href="../index.html">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<img src="../_static/nvidia-logo-horiz-rgb-blk-for-screen.svg" class="logo__image only-light" alt="TensorRT-LLM - Home"/>
|
|
<img src="../_static/nvidia-logo-horiz-rgb-wht-for-screen.svg" class="logo__image only-dark pst-js-only" alt="TensorRT-LLM - Home"/>
|
|
|
|
|
|
<p class="title logo__title">TensorRT-LLM</p>
|
|
|
|
</a>
|
|
|
|
|
|
|
|
<div class="sidebar-header-items sidebar-primary__section">
|
|
|
|
|
|
<div class="sidebar-header-items__center">
|
|
|
|
|
|
|
|
<div class="navbar-item">
|
|
|
|
|
|
<div class="version-switcher__container dropdown pst-js-only">
|
|
<button id="pst-version-switcher-button-3"
|
|
type="button"
|
|
class="version-switcher__button btn btn-sm dropdown-toggle"
|
|
data-bs-toggle="dropdown"
|
|
aria-haspopup="listbox"
|
|
aria-controls="pst-version-switcher-list-3"
|
|
aria-label="Version switcher list"
|
|
>
|
|
Choose version <!-- this text may get changed later by javascript -->
|
|
<span class="caret"></span>
|
|
</button>
|
|
<div id="pst-version-switcher-list-3"
|
|
class="version-switcher__menu dropdown-menu list-group-flush py-0"
|
|
role="listbox" aria-labelledby="pst-version-switcher-button-3">
|
|
<!-- dropdown will be populated by javascript on page load -->
|
|
</div>
|
|
</div></div>
|
|
|
|
|
|
</div>
|
|
|
|
|
|
|
|
<div class="sidebar-header-items__end">
|
|
|
|
<div class="navbar-item">
|
|
|
|
<button class="btn btn-sm nav-link pst-navbar-icon theme-switch-button pst-js-only" aria-label="Color mode" data-bs-title="Color mode" data-bs-placement="bottom" data-bs-toggle="tooltip">
|
|
<i class="theme-switch fa-solid fa-sun fa-lg" data-mode="light" title="Light"></i>
|
|
<i class="theme-switch fa-solid fa-moon fa-lg" data-mode="dark" title="Dark"></i>
|
|
<i class="theme-switch fa-solid fa-circle-half-stroke fa-lg" data-mode="auto" title="System Settings"></i>
|
|
</button></div>
|
|
|
|
</div>
|
|
|
|
</div>
|
|
|
|
<div class="sidebar-primary-items__start sidebar-primary__section">
|
|
<div class="sidebar-primary-item">
|
|
|
|
|
|
|
|
<nav class="bd-docs-nav bd-links"
|
|
aria-label="Table of Contents">
|
|
<p class="bd-links__title" role="heading" aria-level="1">Table of Contents</p>
|
|
<div class="bd-toc-item navbar-nav"><p aria-level="2" class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../quick-start-guide.html">Quick Start Guide</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../key-features.html">Key Features</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../torch.html">PyTorch Backend</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../release-notes.html">Release Notes</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Installation</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../installation/linux.html">Installing on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../installation/build-from-source-linux.html">Building from Source Code on Linux</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../installation/grace-hopper.html">Installing on Grace Hopper</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">LLM API</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../llm-api/index.html">API Introduction</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../llm-api/reference.html">API Reference</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Examples</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/index.html">LLM Examples Introduction</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async.html">Generate Text Asynchronously</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_kv_events.html">Get KV Cache Events</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_customize.html">Generate text with customization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_lookahead_decoding.html">Generate Text Using Lookahead Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_medusa_decoding.html">Generate Text Using Medusa Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_guided_decoding.html">Generate text with guided decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_logits_processor.html">Control generated text using logits processor</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_quantization.html">Generation with Quantization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference.html">Generate text</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_multilora.html">Generate text with multiple LoRA adapters</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async_streaming.html">Generate Text in Streaming</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_distributed.html">Distributed LLM Generation</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle_decoding.html">Generate Text Using Eagle Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_auto_parallel.html">Automatic Parallelism with LLM</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_llm_distributed.html">Llm Mgmn Llm Distributed</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_bench.html">Llm Mgmn Trtllm Bench</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_serve.html">Llm Mgmn Trtllm Serve</a></li>
|
|
</ul>
|
|
</details></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../examples/customization.html">LLM Common Customizations</a></li>
|
|
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/llm_api_examples.html">LLM Examples</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async.html">Generate Text Asynchronously</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_kv_events.html">Get KV Cache Events</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_customize.html">Generate text with customization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_lookahead_decoding.html">Generate Text Using Lookahead Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_medusa_decoding.html">Generate Text Using Medusa Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_guided_decoding.html">Generate text with guided decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_logits_processor.html">Control generated text using logits processor</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_quantization.html">Generation with Quantization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference.html">Generate text</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_multilora.html">Generate text with multiple LoRA adapters</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_async_streaming.html">Generate Text in Streaming</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_inference_distributed.html">Distributed LLM Generation</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_eagle_decoding.html">Generate Text Using Eagle Decoding</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_auto_parallel.html">Automatic Parallelism with LLM</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_llm_distributed.html">Llm Mgmn Llm Distributed</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_bench.html">Llm Mgmn Trtllm Bench</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/llm_mgmn_trtllm_serve.html">Llm Mgmn Trtllm Serve</a></li>
|
|
</ul>
|
|
</details></li>
|
|
<li class="toctree-l1 has-children"><a class="reference internal" href="../examples/trtllm_serve_examples.html">Online Serving Examples</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_chat_client.html">Curl Chat Client</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_chat_client_for_multimodal.html">Curl Chat Client For Multimodal</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/curl_completion_client.html">Curl Completion Client</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/deepseek_r1_reasoning_parser.html">Deepseek R1 Reasoning Parser</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/genai_perf_client.html">Genai Perf Client</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_chat_client.html">OpenAI Chat Client</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_chat_client_for_multimodal.html">OpenAI Chat Client</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../examples/openai_completion_client.html">OpenAI Completion Client</a></li>
|
|
</ul>
|
|
</details></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Model Definition API</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.layers.html">Layers</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.functional.html">Functionals</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.models.html">Models</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.plugin.html">Plugin</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.quantization.html">Quantization</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../python-api/tensorrt_llm.runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">C++ API</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/executor.html">Executor</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../_cpp_gen/runtime.html">Runtime</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Command-Line Reference</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-build.html">trtllm-build</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../commands/trtllm-serve.html">trtllm-serve</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Architecture</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture/overview.html">TensorRT-LLM Architecture</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture/core-concepts.html">Model Definition</a></li>
|
|
|
|
|
|
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture/checkpoint.html">TensorRT-LLM Checkpoint</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture/workflow.html">TensorRT-LLM Build Workflow</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../architecture/add-model.html">Adding a Model</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Advanced</span></p>
|
|
<ul class="current nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="gpt-attention.html">Multi-Head, Multi-Query, and Group-Query Attention</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="gpt-runtime.html">C++ GPT Runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="executor.html">Executor API</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="graph-rewriting.html">Graph Rewriting Module</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="lora.html">Run gpt-2b + LoRA using Executor / cpp runtime</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="expert-parallelism.html">Expert Parallelism in TensorRT-LLM</a></li>
|
|
<li class="toctree-l1 current active"><a class="current reference internal" href="#">KV cache reuse</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="speculative-decoding.html">Speculative Sampling</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="disaggregated-service.html">Disaggregated-Service (experimental)</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Performance</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-overview.html">Overview</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-benchmarking.html">Benchmarking</a></li>
|
|
<li class="toctree-l1 has-children"><a class="reference internal" href="../performance/performance-tuning-guide/index.html">Performance Tuning Guide</a><details><summary><span class="toctree-toggle" role="presentation"><i class="fa-solid fa-chevron-down"></i></span></summary><ul>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/benchmarking-default-performance.html">Benchmarking Default Performance</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/useful-build-time-flags.html">Useful Build-Time Flags</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/tuning-max-batch-size-and-max-num-tokens.html">Tuning Max Batch Size and Max Num Tokens</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/deciding-model-sharding-strategy.html">Deciding Model Sharding Strategy</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/fp8-quantization.html">FP8 Quantization</a></li>
|
|
<li class="toctree-l2"><a class="reference internal" href="../performance/performance-tuning-guide/useful-runtime-flags.html">Useful Runtime Options</a></li>
|
|
</ul>
|
|
</details></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../performance/perf-analysis.html">Performance Analysis</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Reference</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../reference/troubleshooting.html">Troubleshooting</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../reference/support-matrix.html">Support Matrix</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../reference/precision.html">Numerical Precision</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../reference/memory.html">Memory Usage of TensorRT-LLM</a></li>
|
|
</ul>
|
|
<p aria-level="2" class="caption" role="heading"><span class="caption-text">Blogs</span></p>
|
|
<ul class="nav bd-sidenav">
|
|
<li class="toctree-l1"><a class="reference internal" href="../blogs/H100vsA100.html">H100 has 4.6x A100 Performance in TensorRT-LLM, achieving 10,000 tok/s at 100ms to first token</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../blogs/H200launch.html">H200 achieves nearly 12,000 tokens/sec on Llama2-13B with TensorRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../blogs/Falcon180B-H200.html">Falcon-180B on a single H200 GPU with INT4 AWQ, and 6.7x faster Llama-70B over A100</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../blogs/quantization-in-TRT-LLM.html">Speed up inference with SOTA quantization techniques in TRT-LLM</a></li>
|
|
<li class="toctree-l1"><a class="reference internal" href="../blogs/XQA-kernel.html">New XQA-kernel provides 2.4x more Llama-70B throughput within the same latency budget</a></li>
|
|
</ul>
|
|
</div>
|
|
</nav></div>
|
|
</div>
|
|
|
|
|
|
<div class="sidebar-primary-items__end sidebar-primary__section">
|
|
</div>
|
|
|
|
|
|
|
|
</div>
|
|
|
|
<main id="main-content" class="bd-main" role="main">
|
|
|
|
|
|
<div class="bd-content">
|
|
<div class="bd-article-container">
|
|
|
|
<div class="bd-header-article d-print-none">
|
|
<div class="header-article-items header-article__inner">
|
|
|
|
<div class="header-article-items__start">
|
|
|
|
<div class="header-article-item">
|
|
|
|
<nav aria-label="Breadcrumb" class="d-print-none">
|
|
<ul class="bd-breadcrumbs">
|
|
|
|
<li class="breadcrumb-item breadcrumb-home">
|
|
<a href="../index.html" class="nav-link" aria-label="Home">
|
|
<i class="fa-solid fa-home"></i>
|
|
</a>
|
|
</li>
|
|
<li class="breadcrumb-item active" aria-current="page"><span class="ellipsis">KV cache reuse</span></li>
|
|
</ul>
|
|
</nav>
|
|
</div>
|
|
|
|
</div>
|
|
|
|
|
|
</div>
|
|
</div>
|
|
|
|
|
|
|
|
|
|
<div id="searchbox"></div>
|
|
<article class="bd-article">
|
|
|
|
<section id="kv-cache-reuse">
|
|
<span id="id1"></span><h1>KV cache reuse<a class="headerlink" href="#kv-cache-reuse" title="Link to this heading">#</a></h1>
|
|
<p>This document describes how kv cache pages can be shared and reused by requests that start with the same prompt. This can greatly lower first token latency, the time it takes before the first output token is generated. Many use cases can benefit from this, including multi-turn requests and system prompts.</p>
|
|
<section id="how-to-enable-kv-cache-reuse">
|
|
<h2>How to enable kv cache reuse<a class="headerlink" href="#how-to-enable-kv-cache-reuse" title="Link to this heading">#</a></h2>
|
|
<p>There are two steps to enabling kv cache reuse.</p>
|
|
<ol class="arabic simple">
|
|
<li><p>Model must support it</p></li>
|
|
</ol>
|
|
<p>KV cache reuse requires the model to be built for paged context attention. This is done with <code class="docutils literal notranslate"><span class="pre">trtllm-build</span></code>:</p>
|
|
<p><code class="docutils literal notranslate"><span class="pre">trtllm-build</span> <span class="pre">--use_paged_context_fmha</span> <span class="pre">enable</span></code></p>
|
|
<ol class="arabic simple" start="2">
|
|
<li><p>KV cache reuse is enabled by default in KVCacheManager</p></li>
|
|
</ol>
|
|
<p>If you are running gptManagerBenchmark application, you can disable kv cache reuse with a command-line switch:</p>
|
|
<p><code class="docutils literal notranslate"><span class="pre">gptManagerBenchmark</span> <span class="pre">--enable_kv_cache_reuse</span> <span class="pre">enable=false</span></code></p>
|
|
<p>If you are running a Triton server, you can enable kv cache reuse with a parameter:</p>
|
|
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">parameters</span><span class="p">:</span> <span class="p">{</span>
|
|
<span class="n">key</span><span class="p">:</span> <span class="s2">"enable_kv_cache_reuse"</span>
|
|
<span class="n">value</span><span class="p">:</span> <span class="p">{</span>
|
|
<span class="n">string_value</span><span class="p">:</span> <span class="s2">"true"</span>
|
|
<span class="p">}</span>
|
|
<span class="p">}</span>
|
|
</pre></div>
|
|
</div>
|
|
<p>If you are writing your own application using Executor API, you can enable kv cache reuse by including <code class="docutils literal notranslate"><span class="pre">enableBlockReuse=true</span></code> when you create the <code class="docutils literal notranslate"><span class="pre">KvCacheConfig</span></code> object. Note that this is the default, if you wish to disable kv cache reuse, pass <code class="docutils literal notranslate"><span class="pre">enableBlockReuse=false</span></code> instead.</p>
|
|
<p>GptSession is scheduled to be obsoleted and does not support kv cache reuse.</p>
|
|
<section id="enable-kv-cache-reuse-for-p-tuning">
|
|
<h3>Enable kv cache reuse for p-tuning<a class="headerlink" href="#enable-kv-cache-reuse-for-p-tuning" title="Link to this heading">#</a></h3>
|
|
<p>When using p-tuning, different requests may use same fake input ids (i.e. prompt ids whose values are larger than vocabulary size). That may lead to incorrect kv cache reuse, since TRT-LLM could not distinguish these requests only by input ids. To enable kv cache reuse for p-tuning correctly, users should provide an extra id (uint64) for each input id. Extra ids for normal input ids (i.e. text token ids) should always be 0, while fake input ids should have extra ids which are larger than 0. Requests using same prompt embeddings should use same extra ids, while requests using different prompt embeddings should use different extra ids.</p>
|
|
<p>Example:
|
|
Assume vocabulary size is 100, which means normal text token ids are in range [0, 99] and prompt ids start from 100.</p>
|
|
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># Request 1 uses prompt embedding table 1</span>
|
|
<span class="n">input_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">100</span><span class="p">,</span> <span class="mi">101</span><span class="p">,</span> <span class="mi">102</span><span class="p">,</span> <span class="mi">103</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">]</span>
|
|
<span class="n">extra_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]</span>
|
|
|
|
<span class="c1"># Request 2 uses prompt embedding table 2</span>
|
|
<span class="n">input_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">100</span><span class="p">,</span> <span class="mi">101</span><span class="p">,</span> <span class="mi">102</span><span class="p">,</span> <span class="mi">103</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">]</span>
|
|
<span class="n">extra_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]</span>
|
|
|
|
<span class="c1"># Request 3 uses prompt embedding table 1 and different text tokens</span>
|
|
<span class="n">input_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">100</span><span class="p">,</span> <span class="mi">101</span><span class="p">,</span> <span class="mi">102</span><span class="p">,</span> <span class="mi">103</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">7</span><span class="p">,</span> <span class="mi">8</span><span class="p">]</span>
|
|
<span class="n">extra_ids</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]</span>
|
|
</pre></div>
|
|
</div>
|
|
</section>
|
|
</section>
|
|
<section id="performance-expectations">
|
|
<h2>Performance expectations<a class="headerlink" href="#performance-expectations" title="Link to this heading">#</a></h2>
|
|
<p>KV cache state can be reused when two requests start with the same partial prompt. This reduces first token latency, the time it takes until the first output token is generated. Bigger savings are realized when the shared prompt is longer, relative to the overall prompt length. The biggest saving is realized when two identical requests are run back-to-back, in which case the latency for the first output token approaches latency for subsequent tokens.</p>
|
|
</section>
|
|
<section id="situations-that-can-prevent-kv-cache-reuse">
|
|
<h2>Situations that can prevent kv cache reuse<a class="headerlink" href="#situations-that-can-prevent-kv-cache-reuse" title="Link to this heading">#</a></h2>
|
|
<p>There are a few pitfalls that can prevent kv cache reuse when that seems possible. KV cache state only becomes reusable after the request that computed the state terminates. If you have a shared system prompt, the first request will compute kv cache state for the system prompt, the second request will reuse it, but only if the second request launches after the first request completed. If you run with a large batch-size, it is likely that many requests that share a common system prompt will be launched before the first request has terminated. No reuse will occur until one of the requests terminate, then subsequently scheduled requests can reuse.</p>
|
|
<p>Kv cache state for system prompts will remain reusable until memory is needed for launching a new request or propagating an existing one. When this happens, reusable blocks are evicted based on LRU. System prompts that are frequently used have a better chance of remaining reusable, but there is no guarantee since launching new requests take priority over possible reuse. Running with a larger batch size, or larger output sequence lengths for example will reduce the probability of kv cache blocks being reused, since it increases memory needs.</p>
|
|
<p>KV cache state is stored in blocks, each block holds multiple tokens. Only full blocks can be shared by multiple requests, thus the block size matters. The block size is a trade-off, larger block size may improve efficiency of compute kernels, but it reduces the likelihood of kv cache state reuse. The block defaults to 128 tokens, this can be changed when the model is built with the trtllm-build command, for example</p>
|
|
<p><code class="docutils literal notranslate"><span class="pre">trtllm-build</span> <span class="pre">--tokens_per_block</span> <span class="pre">32</span> <span class="pre">...</span></code></p>
|
|
<p>will create a model where one KV cache block can hold 32 tokens. Note that tokens_per_block must be a power of 2.</p>
|
|
</section>
|
|
<section id="offloading-to-host-memory">
|
|
<h2>Offloading to host memory<a class="headerlink" href="#offloading-to-host-memory" title="Link to this heading">#</a></h2>
|
|
<p>Offloading to host memory increases likelihood of kv cache reuse. Reusable blocks that are needed for higher priority tasks, like propagating an already running request, are copied to a buffer in host memory instead of being evicted. This greatly extends the amount of memory available for reuse, allowing blocks to remain reusable much longer. On the other hand, offloading of blocks (and subsequent onboarding when a block is reused) has some cost since the blocks must be copied from CPU to GPU memory and vice versa. This cost is negligible on Grace-Hopper machines, and small enough to yield a net benefit for many use cases on x86 machines with Hopper GPUs. Offloading is unlikely to yield benefits on older architectures because of the (relatively) slow link between GPU and host memory.</p>
|
|
<p>If you are running gptManagerBenchmark, you can enable offloading with a command-line switch. For example,</p>
|
|
<p><code class="docutils literal notranslate"><span class="pre">gptManagerBenchmark</span> <span class="pre">--kv_host_cache_bytes</span> <span class="pre">45000000000</span></code></p>
|
|
<p>will create a 45 GiB offloading buffer in host memory. Note that this buffer is pinned memory, allocating a lot of pinned memory on x86 machines can take a substantial amount of time (10s of seconds). This is a one-time cost.</p>
|
|
<p>If you are running a Triton server, you can enable offloading to host memory with the kv_cache_host_memory_bytes parameter. For example, adding this to your model config file will create a 45 GiB offloading buffer in host memory.</p>
|
|
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">parameters</span><span class="p">:</span> <span class="p">{</span>
|
|
<span class="n">key</span><span class="p">:</span> <span class="s2">"kv_cache_host_memory_bytes"</span>
|
|
<span class="n">value</span><span class="p">:</span> <span class="p">{</span>
|
|
<span class="n">string_value</span><span class="p">:</span> <span class="s2">"45000000000"</span>
|
|
<span class="p">}</span>
|
|
<span class="p">}</span>
|
|
</pre></div>
|
|
</div>
|
|
<p>If you are writing your own application using Executor API, you can enable offloading to host by including <code class="docutils literal notranslate"><span class="pre">hostCacheSize=45000000000</span></code> when you create the <code class="docutils literal notranslate"><span class="pre">KvCacheConfig</span></code> object. This will create a 45 GiB offloading buffer in host memory.</p>
|
|
<p>GptSession is scheduled to be obsoleted and does not support kv cache block offloading.</p>
|
|
</section>
|
|
</section>
|
|
|
|
|
|
</article>
|
|
|
|
|
|
|
|
|
|
|
|
<footer class="prev-next-footer d-print-none">
|
|
|
|
<div class="prev-next-area">
|
|
<a class="left-prev"
|
|
href="expert-parallelism.html"
|
|
title="previous page">
|
|
<i class="fa-solid fa-angle-left"></i>
|
|
<div class="prev-next-info">
|
|
<p class="prev-next-subtitle">previous</p>
|
|
<p class="prev-next-title">Expert Parallelism in TensorRT-LLM</p>
|
|
</div>
|
|
</a>
|
|
<a class="right-next"
|
|
href="speculative-decoding.html"
|
|
title="next page">
|
|
<div class="prev-next-info">
|
|
<p class="prev-next-subtitle">next</p>
|
|
<p class="prev-next-title">Speculative Sampling</p>
|
|
</div>
|
|
<i class="fa-solid fa-angle-right"></i>
|
|
</a>
|
|
</div>
|
|
</footer>
|
|
|
|
</div>
|
|
|
|
|
|
|
|
|
|
|
|
<dialog id="pst-secondary-sidebar-modal"></dialog>
|
|
<div id="pst-secondary-sidebar" class="bd-sidebar-secondary bd-toc"><div class="sidebar-secondary-items sidebar-secondary__inner">
|
|
|
|
|
|
<div class="sidebar-secondary-item">
|
|
<div
|
|
id="pst-page-navigation-heading-2"
|
|
class="page-toc tocsection onthispage">
|
|
<i class="fa-solid fa-list"></i> On this page
|
|
</div>
|
|
<nav class="bd-toc-nav page-toc" aria-labelledby="pst-page-navigation-heading-2">
|
|
<ul class="visible nav section-nav flex-column">
|
|
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#how-to-enable-kv-cache-reuse">How to enable kv cache reuse</a><ul class="nav section-nav flex-column">
|
|
<li class="toc-h3 nav-item toc-entry"><a class="reference internal nav-link" href="#enable-kv-cache-reuse-for-p-tuning">Enable kv cache reuse for p-tuning</a></li>
|
|
</ul>
|
|
</li>
|
|
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#performance-expectations">Performance expectations</a></li>
|
|
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#situations-that-can-prevent-kv-cache-reuse">Situations that can prevent kv cache reuse</a></li>
|
|
<li class="toc-h2 nav-item toc-entry"><a class="reference internal nav-link" href="#offloading-to-host-memory">Offloading to host memory</a></li>
|
|
</ul>
|
|
</nav></div>
|
|
|
|
</div></div>
|
|
|
|
|
|
|
|
</div>
|
|
<footer class="bd-footer-content">
|
|
|
|
</footer>
|
|
|
|
</main>
|
|
</div>
|
|
</div>
|
|
|
|
<!-- Scripts loaded after <body> so the DOM is not blocked -->
|
|
<script defer src="../_static/scripts/bootstrap.js?digest=8878045cc6db502f8baf"></script>
|
|
<script defer src="../_static/scripts/pydata-sphinx-theme.js?digest=8878045cc6db502f8baf"></script>
|
|
|
|
<footer class="bd-footer">
|
|
<div class="bd-footer__inner bd-page-width">
|
|
|
|
<div class="footer-items__start">
|
|
|
|
<div class="footer-item">
|
|
<a class="footer-brand logo" href="https://www.nvidia.com">
|
|
<img src="../_static/nvidia-logo-horiz-rgb-1c-blk-for-screen.svg" class="logo__image only-light" alt="NVIDIA"/>
|
|
<img src="../_static/nvidia-logo-horiz-rgb-1c-wht-for-screen.svg" class="logo__image only-dark" alt="NVIDIA"/>
|
|
</a></div>
|
|
|
|
<div class="footer-item">
|
|
|
|
<div class="footer-links">
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/privacy-policy/">Privacy Policy</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/privacy-center/">Manage My Privacy</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/preferences/start/">Do Not Sell or Share My Data</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/terms-of-service/">Terms of Service</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/accessibility/">Accessibility</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/about-nvidia/company-policies/">Corporate Policies</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/product-security/">Product Security</a>
|
|
|
|
|
|
|
|
|
|
|
<a class="external" href="https://www.nvidia.com/en-us/contact/">Contact</a>
|
|
|
|
|
|
|
|
</div>
|
|
</div>
|
|
|
|
<div class="footer-item">
|
|
|
|
|
|
|
|
|
|
<p class="copyright">
|
|
|
|
Copyright © 2025, NVidia.
|
|
<br/>
|
|
|
|
</p>
|
|
</div>
|
|
|
|
</div>
|
|
|
|
|
|
|
|
</div>
|
|
|
|
</footer>
|
|
</body>
|
|
</html> |