Merge branch 'main' into fix_spec_gate

Signed-off-by: Zheyu Fu <zheyuf@nvidia.com>
This commit is contained in:
Zheyu Fu 2025-12-19 17:24:31 -08:00 committed by GitHub
commit ec7d5ef574
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
339 changed files with 6124 additions and 1952 deletions

13
.github/CODEOWNERS vendored
View File

@ -1,5 +1,18 @@
# This file defines code ownership rules for the repository.
## TensorRT-LLM QA
### Integration Tests
/tests/integration/test_lists/qa @NVIDIA/trt-llm-qa
/tests/integration/defs/examples/test_ray.py @NVIDIA/trt-llm-qa-function
/tests/integration/defs/examples/test_redrafter.py @NVIDIA/trt-llm-qa-function
/tests/integration/defs/accuracy @NVIDIA/trt-llm-qa-function
/tests/integration/defs/stress_test @NVIDIA/trt-llm-qa-function
/tests/integration/defs/triton_server @NVIDIA/trt-llm-qa-function
/tests/integration/defs/test_e2e.py @NVIDIA/trt-llm-qa-function
/tests/integration/defs/disaggregated @NVIDIA/trt-llm-qa-serving
/tests/integration/defs/sysinfo @NVIDIA/trt-llm-qa-perf
/tests/integration/defs/perf @NVIDIA/trt-llm-qa-perf
/tests/integration/defs/perf/disagg @NVIDIA/trt-llm-qa-serving
## TensorRT-LLM Infra
### CI

2
.gitignore vendored
View File

@ -56,7 +56,7 @@ tensorrt_llm/scripts
docs/source/**/*.rst
!docs/source/examples/index.rst
!docs/source/deployment-guide/config_table.rst
!docs/source/deployment-guide/note_sections.rst
!docs/source/_includes/note_sections.rst
*.swp
# Testing

View File

@ -1468,7 +1468,8 @@ public:
DEFAULT = 0,
MPI = 1,
UCX = 2,
NIXL = 3
NIXL = 3,
MOONCAKE = 4
};
explicit CacheTransceiverConfig(std::optional<BackendType> backendType = std::nullopt,
std::optional<size_t> maxNumTokens = std::nullopt, std::optional<int> kvTransferTimeoutMs = std::nullopt,

View File

@ -391,6 +391,14 @@ template <typename... Args>
"libtensorrt_llm_nixl_wrapper.so", "createNixlTransferAgent");
return func(std::forward<Args>(args)...);
}
if (backend == "mooncake")
{
auto& loader = DynLibLoader::getInstance();
using CreateMooncakeFuncType = std::unique_ptr<BaseTransferAgent> (*)(BaseAgentConfig const*);
auto* func = loader.getFunctionPointer<CreateMooncakeFuncType>(
"libtensorrt_llm_mooncake_wrapper.so", "createMooncakeTransferAgent");
return func(std::forward<Args>(args)...);
}
TLLM_THROW("Unknown backend name.");
}

View File

@ -159,6 +159,10 @@ if(NIXL_ROOT)
set(NIXL_WRAPPER_TARGET tensorrt_llm_nixl_wrapper)
endif()
if(MOONCAKE_ROOT)
set(MOONCAKE_WRAPPER_TARGET tensorrt_llm_mooncake_wrapper)
endif()
add_subdirectory(executor)
find_package(Threads REQUIRED)
@ -272,6 +276,11 @@ if(TARGET ${NIXL_WRAPPER_TARGET})
add_dependencies(${SHARED_TARGET} ${NIXL_WRAPPER_TARGET})
endif()
if(TARGET ${MOONCAKE_WRAPPER_TARGET})
target_link_libraries(${MOONCAKE_WRAPPER_TARGET} INTERFACE ${SHARED_TARGET})
add_dependencies(${SHARED_TARGET} ${MOONCAKE_WRAPPER_TARGET})
endif()
if(NOT WIN32)
# Load libraries at $PREFIX/lib from
# $PREFIX/lib/python3.12/site-packages/tensorrt_llm/libs

View File

@ -81,6 +81,11 @@ std::unique_ptr<BaseCacheTransceiver> CacheTransceiverFactory::createCacheTransc
backendType = executor::CacheTransceiverConfig::BackendType::NIXL;
TLLM_LOG_INFO("Enable NIXL KV cache transport.");
}
else if (common::getEnvUseMooncakeKvCache())
{
backendType = executor::CacheTransceiverConfig::BackendType::MOONCAKE;
TLLM_LOG_INFO("Enable MOONCAKE KV cache transport.");
}
else if (common::getEnvUseMPIKvCache())
{
backendType = executor::CacheTransceiverConfig::BackendType::MPI;
@ -203,9 +208,15 @@ CacheTransceiver::CacheTransceiver(kv_cache_manager::BaseKVCacheManager* cacheMa
else if (backendType.value() == executor::CacheTransceiverConfig::BackendType::NIXL)
{
mManager = std::make_unique<tensorrt_llm::executor::kv_cache::AgentConnectionManager>(
mCacheTransBufferManagerPtrs, *mCacheState);
mCacheTransBufferManagerPtrs, *mCacheState, "nixl");
TLLM_LOG_INFO("NIXL Connection Manager created");
}
else if (backendType.value() == executor::CacheTransceiverConfig::BackendType::MOONCAKE)
{
mManager = std::make_unique<tensorrt_llm::executor::kv_cache::AgentConnectionManager>(
mCacheTransBufferManagerPtrs, *mCacheState, "mooncake");
TLLM_LOG_INFO("MOONCAKE Connection Manager created");
}
else if (backendType.value() == executor::CacheTransceiverConfig::BackendType::MPI)
{
mMpiWorldComm = std::addressof(tensorrt_llm::mpi::MpiComm::world());

View File

@ -281,6 +281,12 @@ bool getEnvUseNixlKvCache()
return useNixlKvCache;
}
bool getEnvUseMooncakeKvCache()
{
static bool const useMooncakeKvCache = getBoolEnv("TRTLLM_USE_MOONCAKE_KVCACHE");
return useMooncakeKvCache;
}
bool getEnvUseRoundRobinBlockDistForCP()
{
static bool const useRoundRobinBlockDistForCP = getBoolEnv("TRTLLM_USE_ROUND_ROBIN_BLOCK_DIST_FOR_CP");
@ -343,6 +349,23 @@ std::string getEnvNixlBackend()
return nixlBackend;
}
std::string getEnvMooncakeInterface()
{
static std::once_flag flag;
static std::string mooncakeInterface;
std::call_once(flag,
[&]()
{
char const* mooncake_interface = std::getenv("TRTLLM_MOONCAKE_INTERFACE");
if (mooncake_interface)
{
mooncakeInterface = mooncake_interface;
}
});
return mooncakeInterface;
}
bool getEnvDisaggLayerwise()
{
static bool const disaggLayerwise = getBoolEnv("TRTLLM_DISAGG_LAYERWISE");

View File

@ -83,8 +83,11 @@ inline void launchWithPdlWhenEnabled(char const* name, KernelFn kernelFn, dim3 g
bool getEnvUseUCXKvCache();
bool getEnvUseMPIKvCache();
bool getEnvUseNixlKvCache();
bool getEnvUseMooncakeKvCache();
bool getEnvUseRoundRobinBlockDistForCP();
std::string getEnvUCXInterface();
@ -93,6 +96,8 @@ std::string getEnvNixlInterface();
std::string getEnvNixlBackend();
std::string getEnvMooncakeInterface();
bool getEnvDisaggLayerwise();
bool getEnvParallelCacheSend();

View File

@ -0,0 +1,226 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ipUtils.h"
#include "tensorrt_llm/common/logger.h"
#include <arpa/inet.h>
#include <dirent.h>
#include <fcntl.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <netdb.h>
#include <netinet/in.h>
#include <string>
#include <sys/socket.h>
#include <unistd.h>
TRTLLM_NAMESPACE_BEGIN
namespace common
{
std::string getLocalIpByNic(std::string const& interface, int rank)
{
struct ifaddrs* ifaddr = nullptr;
if (getifaddrs(&ifaddr) == -1)
{
TLLM_LOG_ERROR(rank,
"getLocalIpByNic: Can't get local ip from NIC Interface. Please check whether corresponding INTERFACE is "
"set "
"correctly.");
return std::string{};
}
for (struct ifaddrs* ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next)
{
if (ifa->ifa_addr == nullptr)
{
continue;
}
if (ifa->ifa_name == interface)
{
if (ifa->ifa_addr->sa_family == AF_INET)
{
char ip[INET_ADDRSTRLEN]{};
void* addr = &((reinterpret_cast<struct sockaddr_in*>(ifa->ifa_addr))->sin_addr);
if ((inet_ntop(AF_INET, addr, ip, sizeof(ip)) != nullptr) && std::strcmp(ip, "0.0.0.0") != 0)
{
freeifaddrs(ifaddr);
return std::string(ip);
}
}
else if (ifa->ifa_addr->sa_family == AF_INET6)
{
char ip[INET6_ADDRSTRLEN]{};
void* addr = &((reinterpret_cast<struct sockaddr_in6*>(ifa->ifa_addr))->sin6_addr);
if ((inet_ntop(AF_INET6, addr, ip, sizeof(ip)) != nullptr) && std::strncmp(ip, "fe80::", 6) != 0
&& std::strcmp(ip, "::1") != 0)
{
freeifaddrs(ifaddr);
return std::string(ip);
}
}
}
}
freeifaddrs(ifaddr);
TLLM_LOG_ERROR(
rank, "Can't get local ip from NIC Interface. Please check whether corresponding INTERFACE is set correctly.");
return std::string{};
}
std::string getLocalIpByHostname(int rank)
{
char hostname[256]{};
if (gethostname(hostname, sizeof(hostname)) == -1)
{
TLLM_LOG_ERROR(rank, "getLocalIpByHostname: Can't get hostname");
return std::string{};
}
struct addrinfo hints = {};
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_CANONNAME;
struct addrinfo* res = nullptr;
if (getaddrinfo(hostname, nullptr, &hints, &res) != 0)
{
TLLM_LOG_WARNING(rank, "getLocalIpByHostname: Can't get address info for hostname");
return std::string{};
}
for (struct addrinfo* p = res; p != nullptr; p = p->ai_next)
{
if (p->ai_family == AF_INET)
{ // IPv4
char ip[INET_ADDRSTRLEN]{};
struct sockaddr_in* ipv4 = reinterpret_cast<struct sockaddr_in*>(p->ai_addr);
void* addr = &(ipv4->sin_addr);
if ((inet_ntop(AF_INET, addr, ip, sizeof(ip)) != nullptr) && std::strcmp(ip, "127.0.0.1") != 0
&& std::strcmp(ip, "0.0.0.0") != 0)
{
freeaddrinfo(res);
return std::string(ip);
}
}
else if (p->ai_family == AF_INET6)
{ // IPv6
char ip[INET6_ADDRSTRLEN]{};
struct sockaddr_in6* ipv6 = reinterpret_cast<struct sockaddr_in6*>(p->ai_addr);
void* addr = &(ipv6->sin6_addr);
if ((inet_ntop(AF_INET6, addr, ip, sizeof(ip)) != nullptr) && std::strncmp(ip, "fe80::", 6) != 0
&& std::strcmp(ip, "::1") != 0)
{
freeaddrinfo(res);
return std::string(ip);
}
}
}
freeaddrinfo(res);
TLLM_LOG_WARNING(rank, "getLocalIpByHostname: Can't get local ip from hostname");
return std::string{};
}
std::string getLocalIpByRemoteOrHostName(int rank)
{
// Try IPv4
struct sockaddr_in addr
{
};
addr.sin_family = AF_INET;
addr.sin_port = htons(80);
// using google's public dns server to get the local ip which can be accessed from remote
char const* dns_ip_v4 = "8.8.8.8";
inet_pton(AF_INET, dns_ip_v4, &addr.sin_addr);
int sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock != -1)
{
if (connect(sock, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)) != -1)
{
socklen_t addr_len = sizeof(addr);
if (getsockname(sock, reinterpret_cast<struct sockaddr*>(&addr), &addr_len) != -1)
{
char ip[INET_ADDRSTRLEN]{};
inet_ntop(AF_INET, &addr.sin_addr, ip, sizeof(ip));
close(sock);
return std::string(ip);
}
}
close(sock);
}
// Try IPv6
struct sockaddr_in6 addr6
{
};
addr6.sin6_family = AF_INET6;
addr6.sin6_port = htons(80);
// using google's public dns server
char const* dns_ipv6 = "2001:4860:4860::8888";
inet_pton(AF_INET6, dns_ipv6, &addr6.sin6_addr);
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock != -1)
{
if (connect(sock, reinterpret_cast<struct sockaddr*>(&addr6), sizeof(addr6)) != -1)
{
socklen_t addr_len = sizeof(addr6);
if (getsockname(sock, reinterpret_cast<struct sockaddr*>(&addr6), &addr_len) != -1)
{
char ip[INET6_ADDRSTRLEN]{};
inet_ntop(AF_INET6, &addr6.sin6_addr, ip, sizeof(ip));
close(sock);
return std::string(ip);
}
}
close(sock);
}
// Try hostname
return getLocalIpByHostname(rank);
}
std::string getLocalIp(std::string interface, int rank)
{
std::string localIP = {};
if (!interface.empty())
{
localIP = getLocalIpByNic(interface, rank);
}
if (localIP.empty())
{
localIP = getLocalIpByRemoteOrHostName(rank);
}
// check whether the localIP is valid
if (localIP.empty())
{
TLLM_THROW("getLocalIp: Can't get local ip");
}
return localIP;
}
} // namespace common
TRTLLM_NAMESPACE_END

View File

@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tensorrt_llm/common/config.h"
#include <string>
TRTLLM_NAMESPACE_BEGIN
namespace common
{
std::string getLocalIp(std::string interface, int rank);
} // namespace common
TRTLLM_NAMESPACE_END

View File

@ -91,3 +91,4 @@ target_compile_definitions(${EXECUTOR_STATIC_TARGET}
add_subdirectory(cache_transmission/ucx_utils)
add_subdirectory(cache_transmission/nixl_utils)
add_subdirectory(cache_transmission/mooncake_utils)

View File

@ -236,7 +236,7 @@ bool AgentConnection::recvReadySignal(DataContext const& ctx) const
AgentConnectionManager::AgentConnectionManager(
std::vector<batch_manager::kv_cache_manager::CacheTransBufferManager*> cacheTransBufferManagers,
CacheState cacheState)
CacheState cacheState, std::string const& backendType)
: mCacheState(std::move(cacheState))
, mCacheTransBufferManagers(std::move(cacheTransBufferManagers))
, mRegMemDescs(MemoryType::kVRAM, {})
@ -247,7 +247,7 @@ AgentConnectionManager::AgentConnectionManager(
mAgentName = genUniqueAgentName();
// Create Agent
BaseAgentConfig config{mAgentName, true};
m_Agent = makeTransferAgent("nixl", &config);
m_Agent = makeTransferAgent(backendType, &config);
TLLM_CHECK(!mCacheTransBufferManagers.empty());
std::vector<MemoryDesc> memDescs;
for (auto* cacheTransBufferManager : mCacheTransBufferManagers)

View File

@ -277,7 +277,7 @@ class AgentConnectionManager : public ConnectionManager
public:
AgentConnectionManager(
std::vector<batch_manager::kv_cache_manager::CacheTransBufferManager*> cacheTransBufferManagers,
CacheState cacheState);
CacheState cacheState, std::string const& backendType);
~AgentConnectionManager();
AgentConnection* recvConnect(DataContext const& ctx, void* data, size_t size) override;
[[nodiscard]] std::vector<Connection const*> getConnections(CommState const& state) override;

View File

@ -0,0 +1,40 @@
# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION &
# AFFILIATES. All rights reserved. SPDX-License-Identifier: NVIDIA TensorRT
# Source Code License Agreement
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this material and related documentation without an express
# license agreement from NVIDIA CORPORATION or its affiliates is strictly
# prohibited.
# MOONCAKE is not supported on Rocky8 for now
set(IS_ROCKY8 FALSE)
if(EXISTS "/etc/redhat-release")
set(IS_ROCKY8 TRUE)
endif()
if(MOONCAKE_ROOT AND NOT IS_ROCKY8)
find_library(TRANSFER_ENGINE_LIB transfer_engine ${MOONCAKE_ROOT}/lib)
find_path(TRANSFER_ENGINE_INCLUDE_DIR transfer_engine_c.h
${MOONCAKE_ROOT}/include)
message(STATUS "Find transfer engine results:")
message(STATUS " TRANSFER_ENGINE_LIB = ${TRANSFER_ENGINE_LIB}")
message(
STATUS " TRANSFER_ENGINE_INCLUDE_DIR = ${TRANSFER_ENGINE_INCLUDE_DIR}")
if(TRANSFER_ENGINE_LIB AND TRANSFER_ENGINE_INCLUDE_DIR)
set(MOONCAKE_WRAPPER_TARGET "tensorrt_llm_mooncake_wrapper")
add_library(${MOONCAKE_WRAPPER_TARGET} SHARED transferAgent.cpp)
target_compile_options(${MOONCAKE_WRAPPER_TARGET} PRIVATE -Wno-error)
target_include_directories(${MOONCAKE_WRAPPER_TARGET}
PRIVATE ${TRANSFER_ENGINE_INCLUDE_DIR})
target_link_libraries(${MOONCAKE_WRAPPER_TARGET}
PRIVATE ${TRANSFER_ENGINE_LIB} CUDA::cudart)
endif()
endif()

View File

@ -0,0 +1,546 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tensorrt_llm/executor/cache_transmission/mooncake_utils/transferAgent.h"
#include "tensorrt_llm/common/envUtils.h"
#include "tensorrt_llm/common/ipUtils.h"
#include "tensorrt_llm/common/logger.h"
#include "tensorrt_llm/executor/transferAgent.h"
#include "tensorrt_llm/runtime/utils/mpiUtils.h"
#include <algorithm>
#include <arpa/inet.h>
#include <chrono>
#include <dirent.h>
#include <fcntl.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <netdb.h>
#include <netinet/in.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <thread>
#include <unistd.h>
namespace tensorrt_llm::executor::kv_cache
{
MooncakeTransferStatus::MooncakeTransferStatus(transfer_engine_t engine, uint64_t batchId, size_t requestCount)
: mEngine{engine}
, mBatchId{batchId}
, mRequestCount{requestCount}
{
TLLM_CHECK(mEngine);
}
void MooncakeTransferStatus::wait() const
{
while (!isCompleted())
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
[[nodiscard]] bool MooncakeTransferStatus::isCompleted() const
{
if (mBatchFreed)
{
return true;
}
bool has_failed = false;
for (size_t index = 0; index < mRequestCount; ++index)
{
transfer_status_t status;
int rc = getTransferStatus(mEngine, mBatchId, index, &status);
if (rc || status.status == STATUS_FAILED)
{
has_failed = true;
if (rc)
{
TLLM_LOG_ERROR(
"Failed to get transfer status for batch %lu, task %zu: error code %d", mBatchId, index, rc);
}
else
{
TLLM_LOG_ERROR("Transfer failed for batch %lu, task %zu: status %d", mBatchId, index, status.status);
}
}
else if (status.status == STATUS_PENDING || status.status == STATUS_WAITING)
{
TLLM_LOG_DEBUG("Transfer is pending for batch %lu, task %zu", mBatchId, index);
return false;
}
}
if (!has_failed)
{
// Each batchId has the batch size, and cannot process more requests
// than the batch size. So, free the batch id here to workaround the issue
// where the same batchId could be used to post multiple transfer.
freeBatchID(mEngine, mBatchId);
mBatchFreed = true;
TLLM_LOG_DEBUG("Batch ID %lu freed, future calls will return true directly", mBatchId);
}
// Currently, we cannot distinguish between failed and completed from return value.
TLLM_LOG_DEBUG("Transfer is completed for batch %lu", mBatchId);
return true;
}
const std::string MooncakeBase64Helper::STANDARD_CHARS
= "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
std::string MooncakeBase64Helper::encode(std::vector<uint8_t> const& data)
{
return encodeInternal(data, STANDARD_CHARS);
}
std::string MooncakeBase64Helper::encode(std::string const& data)
{
std::vector<uint8_t> vec(data.begin(), data.end());
return encode(vec);
}
std::vector<uint8_t> MooncakeBase64Helper::decode(std::string const& encoded)
{
return decodeInternal(encoded, STANDARD_CHARS);
}
std::string MooncakeBase64Helper::decodeToString(std::string const& encoded)
{
auto vec = decode(encoded);
return std::string(vec.begin(), vec.end());
}
std::string MooncakeBase64Helper::encodeInternal(std::vector<uint8_t> const& data, std::string const& chars)
{
std::string encoded;
size_t i = 0;
size_t j = 0;
std::array<uint8_t, 3> charArray3{};
std::array<uint8_t, 4> charArray4{};
size_t dataLen = data.size();
uint8_t const* bytes = data.data();
while (dataLen--)
{
charArray3[i++] = *(bytes++);
if (i == 3)
{
charArray4[0] = (charArray3[0] & 0xfc) >> 2;
charArray4[1] = ((charArray3[0] & 0x03) << 4) + ((charArray3[1] & 0xf0) >> 4);
charArray4[2] = ((charArray3[1] & 0x0f) << 2) + ((charArray3[2] & 0xc0) >> 6);
charArray4[3] = charArray3[2] & 0x3f;
for (i = 0; i < 4; i++)
{
encoded += chars[charArray4[i]];
}
i = 0;
}
}
if (i > 0)
{
for (j = i; j < 3; j++)
{
charArray3[j] = '\0';
}
charArray4[0] = (charArray3[0] & 0xfc) >> 2;
charArray4[1] = ((charArray3[0] & 0x03) << 4) + ((charArray3[1] & 0xf0) >> 4);
charArray4[2] = ((charArray3[1] & 0x0f) << 2) + ((charArray3[2] & 0xc0) >> 6);
charArray4[3] = charArray3[2] & 0x3f;
for (j = 0; j < i + 1; j++)
{
encoded += chars[charArray4[j]];
}
while (i++ < 3)
{
encoded += '=';
}
}
return encoded;
}
std::vector<uint8_t> MooncakeBase64Helper::decodeInternal(std::string const& encoded, std::string const& chars)
{
size_t encodedLen = encoded.size();
size_t i = 0;
size_t j = 0;
size_t in_ = 0;
std::array<uint8_t, 3> charArray3{};
std::array<uint8_t, 4> charArray4{};
std::vector<uint8_t> decoded;
std::string cleanEncoded;
for (char c : encoded)
{
if (!isWhitespace(c))
{
cleanEncoded += c;
}
}
encodedLen = cleanEncoded.size();
while (encodedLen-- && cleanEncoded[in_] != '=' && isBase64(cleanEncoded[in_], chars))
{
charArray4[i++] = cleanEncoded[in_];
in_++;
if (i == 4)
{
for (i = 0; i < 4; i++)
{
charArray4[i] = chars.find(charArray4[i]);
}
charArray3[0] = (charArray4[0] << 2) + ((charArray4[1] & 0x30) >> 4);
charArray3[1] = ((charArray4[1] & 0xf) << 4) + ((charArray4[2] & 0x3c) >> 2);
charArray3[2] = ((charArray4[2] & 0x3) << 6) + charArray4[3];
for (i = 0; i < 3; i++)
{
decoded.push_back(charArray3[i]);
}
i = 0;
}
}
if (i > 0)
{
for (j = i; j < 4; j++)
{
charArray4[j] = 0;
}
for (j = 0; j < 4; j++)
{
charArray4[j] = chars.find(charArray4[j]);
}
charArray3[0] = (charArray4[0] << 2) + ((charArray4[1] & 0x30) >> 4);
charArray3[1] = ((charArray4[1] & 0xf) << 4) + ((charArray4[2] & 0x3c) >> 2);
charArray3[2] = ((charArray4[2] & 0x3) << 6) + charArray4[3];
for (j = 0; j < i - 1; j++)
{
decoded.push_back(charArray3[j]);
}
}
return decoded;
}
bool MooncakeBase64Helper::isBase64(uint8_t c, std::string const& chars)
{
return (isalnum(c) || (c == chars[62]) || (c == chars[63]));
}
bool MooncakeBase64Helper::isWhitespace(uint8_t c)
{
return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
}
MooncakeTransferAgent::MooncakeTransferAgent(BaseAgentConfig const& config)
{
mLocalAgentName = config.mName;
std::string segmentName = "127.0.0.1";
if (getenv("TLLM_MOONCAKE_IP_ADDR"))
{
segmentName = std::string(getenv("TLLM_MOONCAKE_IP_ADDR"));
}
else
{
auto ip = common::getLocalIp(common::getEnvMooncakeInterface(), mpi::MpiComm::session().getRank());
if (!ip.empty())
segmentName = ip;
}
mEngine = createTransferEngine("P2PHANDSHAKE", segmentName.c_str(), "", 0, true);
}
void MooncakeTransferAgent::registerMemory(RegisterDescs const& descs)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::registerMemory");
std::lock_guard<std::mutex> lock(mMutex);
for (auto const& desc : descs.getDescs())
{
auto it = mMemRegInfo.find(desc.getAddr());
if (it != mMemRegInfo.end())
{
it->second->addRef();
continue;
}
int err = registerLocalMemory(mEngine, reinterpret_cast<void*>(desc.getAddr()), desc.getLen(), "*", 1);
TLLM_CHECK_WITH_INFO(err == 0, "registerLocalMemory failed, addr: %p, len: %lu",
reinterpret_cast<void*>(desc.getAddr()), desc.getLen());
auto mooncakeDesc = std::make_shared<MooncakeMemoryDesc>(desc);
mMemRegInfo[desc.getAddr()] = std::move(mooncakeDesc);
}
}
void MooncakeTransferAgent::deregisterMemory(RegisterDescs const& descs)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::deregisterMemory");
std::lock_guard<std::mutex> lock(mMutex);
for (auto const& desc : descs.getDescs())
{
auto it = mMemRegInfo.find(desc.getAddr());
if (it != mMemRegInfo.end())
{
auto const& mooncakeDesc = it->second;
mooncakeDesc->releaseRef();
if (mooncakeDesc->getRefCount())
continue;
int err = unregisterLocalMemory(mEngine, reinterpret_cast<void*>(desc.getAddr()));
TLLM_CHECK_WITH_INFO(
err == 0, "unregisterLocalMemory failed, addr: %p", reinterpret_cast<void*>(desc.getAddr()));
mMemRegInfo.erase(desc.getAddr());
}
}
}
void MooncakeTransferAgent::loadRemoteAgent(std::string const& name, AgentDesc const& agentDesc)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::loadRemoteAgent");
// Do the same thing as loadRemoteAgent(std::string const& name, ConnectionInfoType const& connectionInfo)
loadRemoteAgent(name, std::move(agentDesc.getBackendAgentDesc()));
}
void MooncakeTransferAgent::loadRemoteAgent(std::string const& name, ConnectionInfoType const& connectionInfo)
{
TLLM_LOG_DEBUG(mpi::MpiComm::world().getRank(),
"MooncakeTransferAgent::loadRemoteAgent loadRemoteAgent to %s remoteagent name: %s", connectionInfo.c_str(),
name.c_str());
std::lock_guard<std::mutex> lock(mMutex);
auto segmentId = openSegment(mEngine, connectionInfo.c_str());
TLLM_CHECK_WITH_INFO(
segmentId >= 0, "loadRemoteAgent openSegment failed, connectionInfo: %s", connectionInfo.c_str());
mConnectedAgents[name].segmentId = segmentId;
}
void MooncakeTransferAgent::invalidateRemoteAgent(std::string const& name)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::invalidateRemoteAgent");
}
AgentDesc MooncakeTransferAgent::getLocalAgentDesc()
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::getLocalAgentDesc");
// Using connection info as agent desc
const static size_t kBufLen = 64;
char connectionInfo[kBufLen];
int ret = getLocalIpAndPort(mEngine, connectionInfo, kBufLen);
TLLM_CHECK_WITH_INFO(ret == 0, "MooncakeTransferAgent::getLocalAgentDesc::getLocalIpAndPort failed");
return AgentDesc{std::string(connectionInfo)};
}
ConnectionInfoType MooncakeTransferAgent::getLocalConnectionInfo()
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::getLocalConnectionInfo");
const static size_t kBufLen = 64;
char connectionInfo[kBufLen];
int ret = getLocalIpAndPort(mEngine, connectionInfo, kBufLen);
TLLM_CHECK_WITH_INFO(ret == 0, "MooncakeTransferAgent::getLocalAgentDesc::getLocalConnectionInfo failed");
return std::string(connectionInfo);
}
[[nodiscard]] std::unique_ptr<TransferStatus> MooncakeTransferAgent::submitTransferRequests(
TransferRequest const& request)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::submitTransferRequests");
bool hasNotif = false;
std::string syncMessage;
if (request.getSyncMessage().has_value())
{
hasNotif = true;
syncMessage = request.getSyncMessage().value();
}
const static size_t kMaxRequestCount = 1024;
uint64_t batchId = allocateBatchID(mEngine, kMaxRequestCount);
TLLM_CHECK_WITH_INFO(batchId != INVALID_BATCH, "allocateBatchID failed");
int segmentId;
{
std::lock_guard<std::mutex> lock(mMutex);
std::string remoteName = request.getRemoteName();
auto it = mConnectedAgents.find(remoteName);
if (it == mConnectedAgents.end())
{
std::string error = "Remote agent " + remoteName + "not found";
TLLM_THROW(error);
}
auto const& agentInfo = it->second;
segmentId = agentInfo.segmentId;
}
auto localDescs = request.getSrcDescs().getDescs();
auto remoteDescs = request.getDstDescs().getDescs();
TLLM_CHECK_WITH_INFO(localDescs.size() == remoteDescs.size(), "Number of local and remote memory must match");
size_t requestCount = localDescs.size();
std::vector<transfer_request_t> transferRequests(requestCount);
for (size_t index = 0; index < requestCount; ++index)
{
TLLM_CHECK_WITH_INFO(
localDescs[index].getLen() == remoteDescs[index].getLen(), "Length of local and remote memory must match");
transferRequests[index].opcode = (request.getOp() == TransferOp::kREAD) ? OPCODE_READ : OPCODE_WRITE;
transferRequests[index].source = reinterpret_cast<void*>(localDescs[index].getAddr());
transferRequests[index].target_offset = remoteDescs[index].getAddr();
transferRequests[index].length = localDescs[index].getLen();
transferRequests[index].target_id = segmentId;
}
int rc = 0;
if (hasNotif)
{
notify_msg_t notifyMsg;
notifyMsg.name = const_cast<char*>(mLocalAgentName.c_str());
notifyMsg.msg = const_cast<char*>(syncMessage.c_str());
rc = submitTransferWithNotify(mEngine, batchId, transferRequests.data(), requestCount, notifyMsg);
}
else
{
rc = submitTransfer(mEngine, batchId, transferRequests.data(), requestCount);
}
TLLM_CHECK_WITH_INFO(rc == 0, "submitTransfer failed with status: %d", rc);
return std::make_unique<MooncakeTransferStatus>(mEngine, batchId, requestCount);
}
void MooncakeTransferAgent::notifySyncMessage(std::string const& name, SyncMessage const& syncMessage)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::notifySyncMessage");
int segmentId;
{
std::lock_guard<std::mutex> lock(mMutex);
auto it = mConnectedAgents.find(name);
if (it == mConnectedAgents.end())
{
TLLM_LOG_WARNING("Remote agent %s not found", name.c_str());
return;
}
auto const& agentInfo = it->second;
segmentId = agentInfo.segmentId;
}
notify_msg_t notifyMsg;
notifyMsg.name = const_cast<char*>(mLocalAgentName.c_str());
std::string encoded = MooncakeBase64Helper::encode(syncMessage);
notifyMsg.msg = const_cast<char*>(encoded.c_str());
TLLM_LOG_DEBUG("MooncakeTransferAgent::notifySyncMessage notifyMsg.name: %s, notifyMsg.msg: %s", notifyMsg.name,
notifyMsg.msg);
int ret = genNotifyInEngine(mEngine, segmentId, notifyMsg);
TLLM_CHECK_WITH_INFO(ret == 0, "genNotifyInEngine failed with status: %d", ret);
}
[[nodiscard]] std::unordered_map<std::string, std::vector<SyncMessage>> MooncakeTransferAgent::getNotifiedSyncMessages()
{
std::unordered_map<std::string, std::vector<SyncMessage>> notifs;
int size = 0;
notify_msg_t* notifyMsgs = getNotifsFromEngine(mEngine, &size);
TLLM_CHECK_WITH_INFO(size >= 0, "getNotifsFromEngine returned negative size: %d", size);
for (int i = 0; i < size; i++)
{
if (notifyMsgs[i].msg == nullptr)
{
TLLM_LOG_WARNING("Message pointer is null for: %s", notifyMsgs[i].name);
continue;
}
std::string decoded = MooncakeBase64Helper::decodeToString(notifyMsgs[i].msg);
notifs[notifyMsgs[i].name].emplace_back(std::move(decoded));
TLLM_LOG_DEBUG("MooncakeTransferAgent::getNotifiedSyncMessages getNotifsFromEngine: %s, %s", notifyMsgs[i].name,
notifyMsgs[i].msg);
}
freeNotifsMsgBuf(notifyMsgs, size);
return notifs;
}
bool MooncakeTransferAgent::checkRemoteDescs(std::string const& name, MemoryDescs const& memoryDescs)
{
TLLM_LOG_DEBUG("MooncakeTransferAgent::checkRemoteDescs");
return true;
}
MooncakeTransferAgent::~MooncakeTransferAgent()
{
destroyTransferEngine(mEngine);
TLLM_LOG_DEBUG("MooncakeTransferAgent::~MooncakeTransferAgent");
}
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreturn-type-c-linkage"
#endif
extern "C"
{
std::unique_ptr<BaseTransferAgent> createMooncakeTransferAgent(BaseAgentConfig const* config)
{
TLLM_CHECK(config);
return std::make_unique<MooncakeTransferAgent>(*config);
}
}
} // namespace tensorrt_llm::executor::kv_cache

View File

@ -0,0 +1,165 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <mutex>
#include <thread>
#include <vector>
#include "tensorrt_llm/executor/transferAgent.h"
#include "transfer_engine_c.h"
namespace tensorrt_llm::executor::kv_cache
{
class MooncakeTransferStatus final : public TransferStatus
{
public:
MooncakeTransferStatus(transfer_engine_t engine, uint64_t batchId, size_t requestCount);
[[nodiscard]] bool isCompleted() const override;
void wait() const override;
private:
transfer_engine_t mEngine;
uint64_t mBatchId;
size_t mRequestCount;
mutable bool mBatchFreed = false;
};
class MooncakeMemoryDesc
{
public:
MooncakeMemoryDesc(MemoryDesc desc)
: mDesc{std::move(desc)}
, mRefCnt{0}
{
}
MooncakeMemoryDesc(MooncakeMemoryDesc const& other)
: mDesc{other.mDesc}
, mRefCnt{0}
{
}
MooncakeMemoryDesc& operator=(MooncakeMemoryDesc const&) = delete;
~MooncakeMemoryDesc() = default;
void addRef() noexcept
{
++mRefCnt;
}
int releaseRef() noexcept
{
return --mRefCnt;
}
int getRefCount() const noexcept
{
return mRefCnt;
}
MemoryDesc const& getDesc() const noexcept
{
return mDesc;
}
private:
MemoryDesc mDesc;
int mRefCnt;
};
class MooncakeBase64Helper
{
public:
static std::string encode(std::vector<uint8_t> const& data);
static std::string encode(std::string const& data);
static std::vector<uint8_t> decode(std::string const& encoded);
static std::string decodeToString(std::string const& encoded);
private:
static const std::string STANDARD_CHARS;
static std::string encodeInternal(std::vector<uint8_t> const& data, std::string const& chars);
static std::vector<uint8_t> decodeInternal(std::string const& encoded, std::string const& chars);
static inline bool isBase64(uint8_t c, std::string const& chars);
static inline bool isWhitespace(uint8_t c);
};
class MooncakeTransferAgent final : public BaseTransferAgent
{
public:
MooncakeTransferAgent(BaseAgentConfig const& config);
~MooncakeTransferAgent();
void registerMemory(RegisterDescs const& descs) override;
void deregisterMemory(RegisterDescs const& descs) override;
void loadRemoteAgent(std::string const& name, AgentDesc const& agentDesc) override;
void loadRemoteAgent(std::string const& name, ConnectionInfoType const& connectionInfo) override;
void invalidateRemoteAgent(std::string const& name) override;
AgentDesc getLocalAgentDesc() override;
ConnectionInfoType getLocalConnectionInfo() override;
[[nodiscard]] std::unique_ptr<TransferStatus> submitTransferRequests(TransferRequest const& request) override;
void notifySyncMessage(std::string const& name, SyncMessage const& syncMessage) override;
[[nodiscard]] std::unordered_map<std::string, std::vector<SyncMessage>> getNotifiedSyncMessages() override;
bool checkRemoteDescs(std::string const& name, MemoryDescs const& memoryDescs) override;
private:
struct AgentInfo
{
int segmentId;
};
mutable std::mutex mMutex;
transfer_engine_t mEngine;
std::unordered_map<uintptr_t, std::shared_ptr<MooncakeMemoryDesc>> mMemRegInfo;
std::unordered_map<std::string, AgentInfo> mConnectedAgents;
std::string mLocalAgentName;
};
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreturn-type-c-linkage"
#endif
extern "C"
{
[[nodiscard]] std::unique_ptr<BaseTransferAgent> createMooncakeTransferAgent(BaseAgentConfig const* config);
}
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
} // namespace tensorrt_llm::executor::kv_cache

View File

@ -606,8 +606,8 @@ static __global__ __launch_bounds__(kNumThreadsPerBlock) void topKPerRowPrefill(
int rowEnd = rowEnds[rowIdx];
// Local pointers to this block
outIndices += rowIdx * topK;
logits += rowIdx * stride0;
outIndices += static_cast<int64_t>(rowIdx) * topK;
logits += static_cast<int64_t>(rowIdx) * stride0;
topKPerRowJob<kNumThreadsPerBlock, kNumBins, useRadixSort>(
nullptr, logits, rowStart, rowEnd, outIndices, nullptr, stride1, topK);
@ -638,23 +638,23 @@ static __global__ __launch_bounds__(kNumThreadsPerBlock) void topKPerRowDecode(f
// Local pointers to this block
if constexpr (!multipleBlocksPerRow && !mergeBlocks)
{
outIndices += rowIdx * topK;
outIndices += static_cast<int64_t>(rowIdx) * topK;
}
else if constexpr (multipleBlocksPerRow)
{
auto const blockSize = rowEnd / gridDim.y; // 16384 / 2 = 8192
rowStart = blockSize * blockIdx.y; // 8192 * 1 = 8192
rowEnd = gridDim.y == blockIdx.y + 1 ? rowEnd : rowStart + blockSize;
outIndices += rowIdx * gridDim.y * topK + blockIdx.y * topK;
outLogits += rowIdx * gridDim.y * topK + blockIdx.y * topK;
outIndices += static_cast<int64_t>(rowIdx) * gridDim.y * topK + blockIdx.y * topK;
outLogits += static_cast<int64_t>(rowIdx) * gridDim.y * topK + blockIdx.y * topK;
}
else if constexpr (mergeBlocks)
{
rowEnd = numBlocksToMerge * topK;
indices += rowIdx * numBlocksToMerge * topK;
outIndices += rowIdx * topK;
indices += static_cast<int64_t>(rowIdx) * numBlocksToMerge * topK;
outIndices += static_cast<int64_t>(rowIdx) * topK;
}
logits += rowIdx * stride0;
logits += static_cast<int64_t>(rowIdx) * stride0;
topKPerRowJob<kNumThreadsPerBlock, kNumBins, useRadixSort, multipleBlocksPerRow, mergeBlocks>(
indices, logits, rowStart, rowEnd, outIndices, outLogits, stride1, topK);

View File

@ -449,6 +449,7 @@ void initConfigBindings(nb::module_& m)
.value("MPI", tle::CacheTransceiverConfig::BackendType::MPI)
.value("UCX", tle::CacheTransceiverConfig::BackendType::UCX)
.value("NIXL", tle::CacheTransceiverConfig::BackendType::NIXL)
.value("MOONCAKE", tle::CacheTransceiverConfig::BackendType::MOONCAKE)
.def("from_string",
[](std::string const& str)
{
@ -460,6 +461,8 @@ void initConfigBindings(nb::module_& m)
return tle::CacheTransceiverConfig::BackendType::UCX;
if (str == "NIXL" || str == "nixl")
return tle::CacheTransceiverConfig::BackendType::NIXL;
if (str == "MOONCAKE" || str == "mooncake")
return tle::CacheTransceiverConfig::BackendType::MOONCAKE;
throw std::runtime_error("Invalid backend type: " + str);
});

View File

@ -431,6 +431,7 @@ void initConfigBindings(pybind11::module_& m)
.value("MPI", tle::CacheTransceiverConfig::BackendType::MPI)
.value("UCX", tle::CacheTransceiverConfig::BackendType::UCX)
.value("NIXL", tle::CacheTransceiverConfig::BackendType::NIXL)
.value("MOONCAKE", tle::CacheTransceiverConfig::BackendType::MOONCAKE)
.def("from_string",
[](std::string const& str)
{
@ -442,6 +443,8 @@ void initConfigBindings(pybind11::module_& m)
return tle::CacheTransceiverConfig::BackendType::UCX;
if (str == "NIXL" || str == "nixl")
return tle::CacheTransceiverConfig::BackendType::NIXL;
if (str == "MOONCAKE" || str == "mooncake")
return tle::CacheTransceiverConfig::BackendType::MOONCAKE;
throw std::runtime_error("Invalid backend type: " + str);
});

View File

@ -38,10 +38,31 @@ add_gtest(ucxCommTest ucxCommTest.cpp)
target_link_libraries(ucxCommTest PRIVATE ${Python3_LIBRARIES})
target_link_libraries(serializeUtilsTest PRIVATE ${Python3_LIBRARIES})
if(NIXL_ROOT)
add_gtest(transferAgentTest transferAgentTest.cpp)
add_gtest(agentCommTest agentCommTest.cpp)
target_link_libraries(transferAgentTest PRIVATE tensorrt_llm_nixl_wrapper)
target_link_libraries(agentCommTest PRIVATE tensorrt_llm_nixl_wrapper
${Python3_LIBRARIES})
# Skip MOONCAKE related tests on Rocky8
set(IS_ROCKY8 FALSE)
if(EXISTS "/etc/redhat-release")
set(IS_ROCKY8 TRUE)
endif()
if(NIXL_ROOT OR (MOONCAKE_ROOT AND NOT IS_ROCKY8))
add_gtest(agentCommTest agentCommTest.cpp)
add_gtest(transferAgentTest transferAgentTest.cpp)
if(NIXL_ROOT)
target_link_libraries(transferAgentTest PRIVATE tensorrt_llm_nixl_wrapper)
target_link_libraries(agentCommTest PRIVATE tensorrt_llm_nixl_wrapper
${Python3_LIBRARIES})
target_compile_definitions(transferAgentTest PRIVATE TEST_NIXL_BACKEND=1)
target_compile_definitions(agentCommTest PRIVATE TEST_NIXL_BACKEND=1)
endif()
if(MOONCAKE_ROOT)
target_link_libraries(transferAgentTest
PRIVATE tensorrt_llm_mooncake_wrapper)
target_link_libraries(agentCommTest PRIVATE tensorrt_llm_mooncake_wrapper
${Python3_LIBRARIES})
target_compile_definitions(transferAgentTest
PRIVATE TEST_MOONCAKE_BACKEND=1)
target_compile_definitions(agentCommTest PRIVATE TEST_MOONCAKE_BACKEND=1)
endif()
endif()

View File

@ -22,22 +22,54 @@ using namespace tensorrt_llm::batch_manager::kv_cache_manager;
using namespace tensorrt_llm::runtime;
using namespace tensorrt_llm::executor::kv_cache;
bool needSkipTest(std::string& skipReason)
std::vector<std::string> getAvailableBackends()
{
std::vector<std::string> backends;
#ifdef TEST_NIXL_BACKEND
backends.push_back("nixl");
#endif
#ifdef TEST_MOONCAKE_BACKEND
backends.push_back("mooncake");
#endif
return backends;
}
bool needSkipTest(std::string const& backend, std::string& skipReason)
{
bool skip = false;
try
{
auto& loader = tensorrt_llm::executor::kv_cache::DynLibLoader::getInstance();
using CreateNixlFuncType = std::unique_ptr<tensorrt_llm::executor::kv_cache::BaseTransferAgent> (*)(
tensorrt_llm::executor::kv_cache::BaseAgentConfig const*);
auto* func = loader.getFunctionPointer<CreateNixlFuncType>(
"libtensorrt_llm_nixl_wrapper.so", "createNixlTransferAgent");
if (backend == "nixl")
{
using CreateNixlFuncType = std::unique_ptr<tensorrt_llm::executor::kv_cache::BaseTransferAgent> (*)(
tensorrt_llm::executor::kv_cache::BaseAgentConfig const*);
auto* func = loader.getFunctionPointer<CreateNixlFuncType>(
"libtensorrt_llm_nixl_wrapper.so", "createNixlTransferAgent");
}
else if (backend == "mooncake")
{
using CreateMooncakeFuncType = std::unique_ptr<tensorrt_llm::executor::kv_cache::BaseTransferAgent> (*)(
tensorrt_llm::executor::kv_cache::BaseAgentConfig const*);
auto* func = loader.getFunctionPointer<CreateMooncakeFuncType>(
"libtensorrt_llm_mooncake_wrapper.so", "createMooncakeTransferAgent");
}
else
{
skip = true;
skipReason = "Unknown backend: " + backend;
}
}
catch (std::exception const& e)
{
std::string error = e.what();
if (error.find("libtensorrt_llm_nixl_wrapper.so") != std::string::npos)
std::string libName
= (backend == "nixl") ? "libtensorrt_llm_nixl_wrapper.so" : "libtensorrt_llm_mooncake_wrapper.so";
if (error.find(libName) != std::string::npos)
{
skip = true;
skipReason = error;
@ -46,17 +78,26 @@ bool needSkipTest(std::string& skipReason)
return skip;
}
class AgentCommTest : public ::testing::Test
class AgentCommTest : public ::testing::TestWithParam<std::string>
{
protected:
void SetUp() override
{
backend = GetParam();
std::string skipReason;
if (needSkipTest(skipReason))
if (needSkipTest(backend, skipReason))
{
GTEST_SKIP() << skipReason;
}
setenv("TRTLLM_USE_NIXL_KVCACHE", "1", 1);
if (backend == "nixl")
{
setenv("TRTLLM_USE_NIXL_KVCACHE", "1", 1);
}
else if (backend == "mooncake")
{
setenv("TRTLLM_USE_MOONCAKE_KVCACHE", "1", 1);
}
auto constexpr numLayers = 8;
auto constexpr numHeads = 16;
@ -106,15 +147,16 @@ protected:
mCacheState.reset();
}
std::string backend;
std::unique_ptr<CacheTransBufferManager> mTransBufferManager;
std::unique_ptr<KVCacheManager> mCacheManager;
std::unique_ptr<CacheState> mCacheState;
};
TEST_F(AgentCommTest, AgentConnectionManagerBasic)
TEST_P(AgentCommTest, AgentConnectionManagerBasic)
{
std::vector<CacheTransBufferManager*> bufferManagers{mTransBufferManager.get()};
auto connectionManager = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState);
auto connectionManager = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState, backend);
ASSERT_TRUE(connectionManager != nullptr);
ASSERT_EQ(connectionManager->getCacheTransBufferManagers().size(), bufferManagers.size());
ASSERT_TRUE(connectionManager->getCacheTransBufferManagers().front() != nullptr);
@ -126,11 +168,11 @@ TEST_F(AgentCommTest, AgentConnectionManagerBasic)
ASSERT_EQ(commState.getAgentState().size(), 1);
}
TEST_F(AgentCommTest, AgentConnectionManagerConnect)
TEST_P(AgentCommTest, AgentConnectionManagerConnect)
{
std::vector<CacheTransBufferManager*> bufferManagers{mTransBufferManager.get()};
auto connectionManager0 = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState);
auto connectionManager1 = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState);
auto connectionManager0 = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState, backend);
auto connectionManager1 = std::make_unique<AgentConnectionManager>(bufferManagers, *mCacheState, backend);
auto agentName0 = connectionManager0->getAgentName();
auto agentName1 = connectionManager1->getAgentName();
ASSERT_TRUE(!agentName0.empty());
@ -189,3 +231,6 @@ TEST_F(AgentCommTest, AgentConnectionManagerConnect)
}
TLLM_LOG_INFO("after finish");
}
INSTANTIATE_TEST_SUITE_P(AvailableBackends, AgentCommTest, ::testing::ValuesIn(getAvailableBackends()),
[](::testing::TestParamInfo<AgentCommTest::ParamType> const& info) { return info.param; });

View File

@ -22,11 +22,27 @@
#include <gtest/gtest.h>
#include <filesystem>
#include <vector>
namespace fs = std::filesystem;
using namespace tensorrt_llm::executor::kv_cache;
std::vector<std::string> getAvailableBackends()
{
std::vector<std::string> backends;
#ifdef TEST_NIXL_BACKEND
backends.push_back("nixl");
#endif
#ifdef TEST_MOONCAKE_BACKEND
backends.push_back("mooncake");
#endif
return backends;
}
class RegisteredHostMemory
{
public:
@ -54,100 +70,105 @@ private:
BaseTransferAgent* mAgentPtr{};
};
class TransferAgentTest : public ::testing::Test // NOLINT(cppcoreguidelines-pro-type-member-init)
class TransferAgentTest : public ::testing::TestWithParam<std::string> // NOLINT(cppcoreguidelines-pro-type-member-init)
{
public:
void SetUp() override {}
void SetUp() override
{
backend = GetParam();
}
void TearDown() override {}
[[nodiscard]] std::unique_ptr<BaseTransferAgent> makeTransferAgent(BaseAgentConfig const& config)
{
return tensorrt_llm::executor::kv_cache::makeTransferAgent("nixl", &config);
return tensorrt_llm::executor::kv_cache::makeTransferAgent(backend, &config);
}
std::string backend;
};
TEST_F(TransferAgentTest, Basic)
TEST_P(TransferAgentTest, Basic)
{
std::string const agent0{"agent0"}, agent1{"agent1"};
BaseAgentConfig config0{agent0, true}, config1{agent1, true};
auto nixlAgent0 = makeTransferAgent(config0);
auto nixlAgent1 = makeTransferAgent(config1);
auto xferAgent0 = makeTransferAgent(config0);
auto xferAgent1 = makeTransferAgent(config1);
TLLM_CHECK(nixlAgent0);
TLLM_CHECK(nixlAgent1);
TLLM_CHECK(xferAgent0);
TLLM_CHECK(xferAgent1);
std::vector<char> memory0(100, 10);
std::vector<char> memory1(100, 1);
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, nixlAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, nixlAgent1.get());
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, xferAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, xferAgent1.get());
// nixlAgent0->loadRemoteAgent(agent1);
auto connectionInfo = nixlAgent1->getLocalConnectionInfo();
nixlAgent0->loadRemoteAgent(agent1, connectionInfo);
// xferAgent0->loadRemoteAgent(agent1);
auto connectionInfo = xferAgent1->getLocalConnectionInfo();
xferAgent0->loadRemoteAgent(agent1, connectionInfo);
bool checked = false;
do
{
checked = nixlAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
// wait for regMem is unpacked by nixlAgent0
checked = xferAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
// wait for regMem is unpacked by xferAgent0
} while (!checked);
TransferRequest writeReq{TransferOp::kWRITE, regMem0.getDescs(), regMem1.getDescs(), agent1};
auto status = nixlAgent0->submitTransferRequests(writeReq);
auto status = xferAgent0->submitTransferRequests(writeReq);
status->wait();
TLLM_CHECK(memory0 == memory1);
nixlAgent0->invalidateRemoteAgent(agent1);
xferAgent0->invalidateRemoteAgent(agent1);
}
TEST_F(TransferAgentTest, Basic2)
TEST_P(TransferAgentTest, Basic2)
{
std::string const agent0{"agent0"}, agent1{"agent1"};
BaseAgentConfig config0{agent0, true}, config1{agent1, true};
auto nixlAgent0 = makeTransferAgent(config0);
auto nixlAgent1 = makeTransferAgent(config1);
auto xferAgent0 = makeTransferAgent(config0);
auto xferAgent1 = makeTransferAgent(config1);
TLLM_CHECK(nixlAgent0);
TLLM_CHECK(nixlAgent1);
TLLM_CHECK(xferAgent0);
TLLM_CHECK(xferAgent1);
std::vector<char> memory0(100, 10);
std::vector<char> memory1(100, 1);
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, nixlAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, nixlAgent1.get());
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, xferAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, xferAgent1.get());
// nixlAgent0->loadRemoteAgent(agent1);
auto connectionInfo = nixlAgent1->getLocalConnectionInfo();
nixlAgent0->loadRemoteAgent(agent1, connectionInfo);
// xferAgent0->loadRemoteAgent(agent1);
auto connectionInfo = xferAgent1->getLocalConnectionInfo();
xferAgent0->loadRemoteAgent(agent1, connectionInfo);
bool checked = false;
do
{
checked = nixlAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
checked = xferAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
} while (!checked);
TransferRequest readReq{TransferOp::kREAD, regMem0.getDescs(), regMem1.getDescs(), agent1};
auto status = nixlAgent0->submitTransferRequests(readReq);
auto status = xferAgent0->submitTransferRequests(readReq);
status->wait();
TLLM_CHECK(memory0 == memory1);
nixlAgent0->invalidateRemoteAgent(agent1);
xferAgent0->invalidateRemoteAgent(agent1);
}
TEST_F(TransferAgentTest, DeviceMemory)
TEST_P(TransferAgentTest, DeviceMemory)
{
std::string const agent0{"agent0"}, agent1{"agent1"};
BaseAgentConfig config0{agent0, true}, config1{agent1, true};
auto nixlAgent0 = makeTransferAgent(config0);
auto nixlAgent1 = makeTransferAgent(config1);
auto xferAgent0 = makeTransferAgent(config0);
auto xferAgent1 = makeTransferAgent(config1);
TLLM_CHECK(nixlAgent0);
TLLM_CHECK(nixlAgent1);
TLLM_CHECK(xferAgent0);
TLLM_CHECK(xferAgent1);
char* dev_ptr0;
char* dev_ptr1;
size_t size = 100;
@ -159,20 +180,20 @@ TEST_F(TransferAgentTest, DeviceMemory)
cudaMemcpy(dev_ptr0, memory0.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_ptr1, memory1.data(), size, cudaMemcpyHostToDevice);
RegisteredHostMemory regMem0(
MemoryDescs{MemoryType::kVRAM, {MemoryDesc{dev_ptr0, size, deviceId}}}, nixlAgent0.get());
MemoryDescs{MemoryType::kVRAM, {MemoryDesc{dev_ptr0, size, deviceId}}}, xferAgent0.get());
RegisteredHostMemory regMem1(
MemoryDescs{MemoryType::kVRAM, {MemoryDesc{dev_ptr1, size, deviceId}}}, nixlAgent1.get());
MemoryDescs{MemoryType::kVRAM, {MemoryDesc{dev_ptr1, size, deviceId}}}, xferAgent1.get());
// nixlAgent0->loadRemoteAgent(agent1);
auto connectionInfo = nixlAgent1->getLocalConnectionInfo();
nixlAgent0->loadRemoteAgent(agent1, connectionInfo);
// xferAgent0->loadRemoteAgent(agent1);
auto connectionInfo = xferAgent1->getLocalConnectionInfo();
xferAgent0->loadRemoteAgent(agent1, connectionInfo);
bool checked = false;
do
{
checked = nixlAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
checked = xferAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
} while (!checked);
TransferRequest writeReq{TransferOp::kWRITE, regMem0.getDescs(), regMem1.getDescs(), agent1};
auto status = nixlAgent0->submitTransferRequests(writeReq);
auto status = xferAgent0->submitTransferRequests(writeReq);
status->wait();
cudaMemcpy(memory0.data(), dev_ptr0, size, cudaMemcpyDeviceToHost);
@ -181,98 +202,99 @@ TEST_F(TransferAgentTest, DeviceMemory)
TLLM_CHECK(memory0 == memory1);
TLLM_CUDA_CHECK(cudaFree(dev_ptr0));
TLLM_CUDA_CHECK(cudaFree(dev_ptr1));
nixlAgent0->invalidateRemoteAgent(agent1);
xferAgent0->invalidateRemoteAgent(agent1);
}
TEST_F(TransferAgentTest, Connect)
TEST_P(TransferAgentTest, Connect)
{
std::string const agent0{"agent0"}, agent1{"agent1"}, agent2{"agent2"};
BaseAgentConfig config0{agent0, true}, config1{agent1, true}, config2{agent2, true};
auto nixlAgent0 = makeTransferAgent(config0);
auto nixlAgent1 = makeTransferAgent(config1);
auto nixlAgent2 = makeTransferAgent(config2);
auto xferAgent0 = makeTransferAgent(config0);
auto xferAgent1 = makeTransferAgent(config1);
auto xferAgent2 = makeTransferAgent(config2);
TLLM_CHECK(nixlAgent0);
TLLM_CHECK(nixlAgent1);
TLLM_CHECK(xferAgent0);
TLLM_CHECK(xferAgent1);
std::vector<char> memory0(100, 10);
std::vector<char> memory1(100, 1);
MemoryDescs memDescs0{MemoryType::kDRAM, {MemoryDesc{memory0}}};
MemoryDescs memDescs1{MemoryType::kDRAM, {MemoryDesc{memory1}}};
nixlAgent0->registerMemory(memDescs0);
nixlAgent1->registerMemory(memDescs1);
nixlAgent2->registerMemory(memDescs0);
xferAgent0->registerMemory(memDescs0);
xferAgent1->registerMemory(memDescs1);
xferAgent2->registerMemory(memDescs0);
// nixlAgent0->loadRemoteAgent(agent1);
auto connectionInfo = nixlAgent1->getLocalConnectionInfo();
nixlAgent0->loadRemoteAgent(agent1, connectionInfo);
// xferAgent0->loadRemoteAgent(agent1);
auto connectionInfo = xferAgent1->getLocalConnectionInfo();
xferAgent0->loadRemoteAgent(agent1, connectionInfo);
bool checked = false;
do
{
checked = nixlAgent0->checkRemoteDescs(agent1, memDescs1);
checked = xferAgent0->checkRemoteDescs(agent1, memDescs1);
} while (!checked);
TransferRequest writeReq{TransferOp::kWRITE, memDescs0, memDescs1, agent1};
auto status = nixlAgent0->submitTransferRequests(writeReq);
auto status = xferAgent0->submitTransferRequests(writeReq);
status->wait();
TLLM_CHECK(memory0 == memory1);
nixlAgent2->loadRemoteAgent(agent1, connectionInfo);
xferAgent2->loadRemoteAgent(agent1, connectionInfo);
checked = false;
do
{
checked = nixlAgent2->checkRemoteDescs(agent1, memDescs1);
checked = xferAgent2->checkRemoteDescs(agent1, memDescs1);
} while (!checked);
TransferRequest writeReq2{TransferOp::kWRITE, memDescs0, memDescs1, agent1};
auto status2 = nixlAgent2->submitTransferRequests(writeReq2);
auto status2 = xferAgent2->submitTransferRequests(writeReq2);
status2->wait();
TLLM_CHECK(memory0 == memory1);
nixlAgent0->invalidateRemoteAgent(agent1);
nixlAgent2->invalidateRemoteAgent(agent1);
nixlAgent0->deregisterMemory(memDescs0);
nixlAgent1->deregisterMemory(memDescs1);
nixlAgent2->deregisterMemory(memDescs0);
xferAgent0->invalidateRemoteAgent(agent1);
xferAgent2->invalidateRemoteAgent(agent1);
xferAgent0->deregisterMemory(memDescs0);
xferAgent1->deregisterMemory(memDescs1);
xferAgent2->deregisterMemory(memDescs0);
}
TEST_F(TransferAgentTest, SyncMessage)
TEST_P(TransferAgentTest, SyncMessage)
{
constexpr std::size_t MAX_QUERY_TIMES = std::numeric_limits<size_t>::max();
std::string const agent0{"agent0"}, agent1{"agent1"};
BaseAgentConfig config0{agent0, true}, config1{agent1, true};
auto nixlAgent0 = makeTransferAgent(config0);
auto nixlAgent1 = makeTransferAgent(config1);
auto xferAgent0 = makeTransferAgent(config0);
auto xferAgent1 = makeTransferAgent(config1);
TLLM_CHECK(nixlAgent0);
TLLM_CHECK(nixlAgent1);
TLLM_CHECK(xferAgent0);
TLLM_CHECK(xferAgent1);
std::vector<char> memory0(100, 10);
std::vector<char> memory1(100, 1);
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, nixlAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, nixlAgent0.get());
RegisteredHostMemory regMem0(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, xferAgent0.get());
RegisteredHostMemory regMem1(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, xferAgent0.get());
RegisteredHostMemory regMem2(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, nixlAgent1.get());
RegisteredHostMemory regMem3(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, nixlAgent1.get());
RegisteredHostMemory regMem2(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory0}}}, xferAgent1.get());
RegisteredHostMemory regMem3(MemoryDescs{MemoryType::kDRAM, {MemoryDesc{memory1}}}, xferAgent1.get());
// nixlAgent0->loadRemoteAgent(agent1);
auto connectionInfo = nixlAgent1->getLocalConnectionInfo();
nixlAgent0->loadRemoteAgent(agent1, connectionInfo);
// xferAgent0->loadRemoteAgent(agent1);
auto connectionInfo = xferAgent1->getLocalConnectionInfo();
xferAgent0->loadRemoteAgent(agent1, connectionInfo);
bool checked = false;
do
{
checked = nixlAgent0->checkRemoteDescs(agent1, regMem3.getDescs());
checked = xferAgent0->checkRemoteDescs(agent1, regMem3.getDescs());
} while (!checked);
auto syncMessage = std::string("agent_sync_message");
TransferRequest writeReq{TransferOp::kWRITE, regMem0.getDescs(), regMem3.getDescs(), agent1};
auto status = nixlAgent0->submitTransferRequests(writeReq);
nixlAgent0->notifySyncMessage(agent1, syncMessage);
auto status = xferAgent0->submitTransferRequests(writeReq);
xferAgent0->notifySyncMessage(agent1, syncMessage);
auto notif = nixlAgent1->getNotifiedSyncMessages();
auto notif = xferAgent1->getNotifiedSyncMessages();
for (std::size_t i = 0; i < MAX_QUERY_TIMES && notif.size() == 0; i++)
{
notif = nixlAgent1->getNotifiedSyncMessages();
notif = xferAgent1->getNotifiedSyncMessages();
}
status->wait();
TLLM_CHECK(status->isCompleted());
TLLM_CHECK(notif.size() == 1);
TLLM_CHECK(notif[agent0].size() == 1);
@ -281,25 +303,25 @@ TEST_F(TransferAgentTest, SyncMessage)
TLLM_CHECK(memory0 == memory1);
std::string syncMessage2 = "two_agent_sync_message";
nixlAgent0->notifySyncMessage(agent1, syncMessage2);
auto notif2 = nixlAgent1->getNotifiedSyncMessages();
xferAgent0->notifySyncMessage(agent1, syncMessage2);
auto notif2 = xferAgent1->getNotifiedSyncMessages();
for (std::size_t i = 0; i < MAX_QUERY_TIMES && notif2.size() == 0; i++)
{
notif2 = nixlAgent1->getNotifiedSyncMessages();
notif2 = xferAgent1->getNotifiedSyncMessages();
}
TLLM_CHECK(notif2.size() == 1);
TLLM_CHECK(notif2[agent0].size() == 1);
TLLM_CHECK(notif2[agent0][0] == syncMessage2);
// nixlAgent1->loadRemoteAgent(agent0);
auto connectionInfo2 = nixlAgent0->getLocalConnectionInfo();
nixlAgent1->loadRemoteAgent(agent0, connectionInfo2);
// xferAgent1->loadRemoteAgent(agent0);
auto connectionInfo2 = xferAgent0->getLocalConnectionInfo();
xferAgent1->loadRemoteAgent(agent0, connectionInfo2);
std::string syncMessage3 = "three_agent_sync_message";
nixlAgent1->notifySyncMessage(agent0, syncMessage3);
auto notif3 = nixlAgent0->getNotifiedSyncMessages();
xferAgent1->notifySyncMessage(agent0, syncMessage3);
auto notif3 = xferAgent0->getNotifiedSyncMessages();
for (std::size_t i = 0; i < MAX_QUERY_TIMES && notif3.size() == 0; i++)
{
notif3 = nixlAgent0->getNotifiedSyncMessages();
notif3 = xferAgent0->getNotifiedSyncMessages();
}
TLLM_CHECK(notif3.size() == 1);
TLLM_CHECK(notif3[agent1].size() == 1);
@ -308,19 +330,20 @@ TEST_F(TransferAgentTest, SyncMessage)
bool checked2 = false;
do
{
checked2 = nixlAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
checked2 = xferAgent0->checkRemoteDescs(agent1, regMem1.getDescs());
} while (!checked2);
std::string syncMessage4 = "four_agent_sync_message";
TransferRequest writeReq1{TransferOp::kWRITE, regMem2.getDescs(), regMem1.getDescs(), agent0};
auto status1 = nixlAgent1->submitTransferRequests(writeReq1);
nixlAgent1->notifySyncMessage(agent0, syncMessage4);
auto status1 = xferAgent1->submitTransferRequests(writeReq1);
xferAgent1->notifySyncMessage(agent0, syncMessage4);
auto notif4 = nixlAgent0->getNotifiedSyncMessages();
auto notif4 = xferAgent0->getNotifiedSyncMessages();
for (std::size_t i = 0; i < MAX_QUERY_TIMES && notif4.size() == 0; i++)
{
notif4 = nixlAgent0->getNotifiedSyncMessages();
notif4 = xferAgent0->getNotifiedSyncMessages();
}
status1->wait();
TLLM_CHECK(status1->isCompleted());
TLLM_CHECK(notif4.size() == 1);
TLLM_CHECK(notif4[agent1].size() == 1);
@ -335,11 +358,11 @@ TEST_F(TransferAgentTest, SyncMessage)
std::stringstream ss;
Serialization::serialize(state, ss);
std::string serializedState = ss.str();
nixlAgent0->notifySyncMessage(agent1, serializedState);
auto notif5 = nixlAgent1->getNotifiedSyncMessages();
xferAgent0->notifySyncMessage(agent1, serializedState);
auto notif5 = xferAgent1->getNotifiedSyncMessages();
for (size_t i = 0; i < MAX_QUERY_TIMES && notif5.size() == 0; i++)
{
notif5 = nixlAgent1->getNotifiedSyncMessages();
notif5 = xferAgent1->getNotifiedSyncMessages();
}
TLLM_CHECK(notif5.size() == 1);
TLLM_CHECK(notif5[agent0].size() == 1);
@ -348,10 +371,16 @@ TEST_F(TransferAgentTest, SyncMessage)
auto state2 = Serialization::deserializeCommState(ss2);
TLLM_CHECK(state2 == state);
nixlAgent0->invalidateRemoteAgent(agent1);
nixlAgent1->invalidateRemoteAgent(agent0);
xferAgent0->invalidateRemoteAgent(agent1);
xferAgent1->invalidateRemoteAgent(agent0);
}
INSTANTIATE_TEST_SUITE_P(AvailableBackends, TransferAgentTest, ::testing::ValuesIn(getAvailableBackends()),
[](::testing::TestParamInfo<TransferAgentTest::ParamType> const& info) { return info.param; });
// Skip LoopbackAgentTest for mooncake backend for now
#ifdef TEST_NIXL_BACKEND
class LoopbackAgentTest : public ::testing::Test,
public ::testing::WithParamInterface<bool> // NOLINT(cppcoreguidelines-pro-type-member-init)
{
@ -466,3 +495,5 @@ TEST_P(LoopbackAgentTest, GpuToFile)
}
INSTANTIATE_TEST_SUITE_P(, LoopbackAgentTest, ::testing::Values(true, false));
#endif // TEST_NIXL_BACKEND

View File

@ -46,6 +46,7 @@
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <filesystem>
#include <memory>
#include <random>
#include <tensorrt_llm/batch_manager/cacheTransBuffer.h>
@ -713,7 +714,7 @@ protected:
return;
}
else if (tensorrt_llm::common::getEnvUseMPIKvCache() || tensorrt_llm::common::getEnvUseUCXKvCache()
|| tensorrt_llm::common::getEnvUseNixlKvCache())
|| tensorrt_llm::common::getEnvUseNixlKvCache() || tensorrt_llm::common::getEnvUseMooncakeKvCache())
{
int maxNumTokens = 2048;
mCacheTransBufferManagers.clear();
@ -729,7 +730,15 @@ protected:
}
bool isUcx = tensorrt_llm::common::getEnvUseUCXKvCache();
bool isNixl = tensorrt_llm::common::getEnvUseNixlKvCache();
TLLM_LOG_INFO("Enable %s KV cache transport.", isUcx ? "UCX" : isNixl ? "NIXL" : "MPI");
bool isMooncake = tensorrt_llm::common::getEnvUseMooncakeKvCache();
// Skip tests for MOONCAKE when on Rocky8
bool isRocky8 = std::filesystem::exists("/etc/redhat-release");
isMooncake = isMooncake && !isRocky8;
TLLM_LOG_INFO("Enable %s KV cache transport.",
isUcx ? "UCX"
: isNixl ? "NIXL"
: isMooncake ? "MOONCAKE"
: "MPI");
if (isUcx)
{
@ -756,7 +765,12 @@ protected:
setenv("TRTLLM_NIXL_PORT", std::to_string(port).c_str(), 1);
mConnectionManager
= std::make_unique<texec::kv_cache::AgentConnectionManager>(bufferManagers, *mCacheState);
= std::make_unique<texec::kv_cache::AgentConnectionManager>(bufferManagers, *mCacheState, "nixl");
}
else if (isMooncake)
{
mConnectionManager = std::make_unique<texec::kv_cache::AgentConnectionManager>(
bufferManagers, *mCacheState, "mooncake");
}
else
{
@ -783,7 +797,7 @@ protected:
std::vector<int> contextRankVec(mContextRankSize);
std::iota(contextRankVec.begin(), contextRankVec.end(), 0);
if (isUcx || isNixl)
if (isUcx || isNixl || isMooncake)
{
auto commState = mConnectionManager->getCommState();
namespace su = tensorrt_llm::executor::serialize_utils;
@ -1286,9 +1300,9 @@ TEST_P(AsymmetricalCacheTest, TestCase)
int indexerDimPerHead = std::get<17>(param);
int indexerKCacheQuantBlockSize = std::get<18>(param);
if (genCp > 1 && tensorrt_llm::common::getEnvUseNixlKvCache())
if (genCp > 1 && (tensorrt_llm::common::getEnvUseNixlKvCache() || tensorrt_llm::common::getEnvUseMooncakeKvCache()))
{
GTEST_SKIP() << "Temporarily skipping cache transceiver tests with NIXL backend for CP.";
GTEST_SKIP() << "Temporarily skipping cache transceiver tests with NIXL and MOONCAKE backend for CP.";
}
std::vector<int> lenList = {30, 10, 60, 80};
if (genCp > 1)
@ -1410,9 +1424,9 @@ TEST_P(AsymmetricalCacheTestWithDP, TestCase)
int indexerDimPerHead = std::get<17>(param);
int indexerKCacheQuantBlockSize = std::get<18>(param);
if (genCp > 1 && tensorrt_llm::common::getEnvUseNixlKvCache())
if (genCp > 1 && (tensorrt_llm::common::getEnvUseNixlKvCache() || tensorrt_llm::common::getEnvUseMooncakeKvCache()))
{
GTEST_SKIP() << "Temporarily skipping cache transceiver tests with NIXL backend for CP.";
GTEST_SKIP() << "Temporarily skipping cache transceiver tests with NIXL and MOONCAKE backend for CP.";
}
setUpCommunicator(contextTp, contextPp, contextCp, genTp, genPp, genCp, isMLA, contextDP, generationDP);

View File

@ -1,11 +1,20 @@
..
Reusable note sections for deployment guides.
Reusable note sections for docs.
Include specific notes using:
.. include:: note_sections.rst
.. include:: <path-to>/note_sections.rst
:start-after: .. start-note-<name>
:end-before: .. end-note-<name>
.. start-note-config-flag-alias
.. note::
**Non-breaking**: ``--config <file.yaml>`` is the preferred flag for passing a :ref:`YAML configuration file <configuring-with-yaml-files>`.
Existing workflows using ``--extra_llm_api_options <file.yaml>`` continue to work; it is an equivalent alias.
.. end-note-config-flag-alias
.. start-note-traffic-patterns
.. note::

View File

@ -139,7 +139,7 @@ To do the benchmark, run the following command:
```bash
YOUR_DATA_PATH=<your dataset file following the format>
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
moe_config:
backend: TRTLLM
speculative_config:
@ -157,7 +157,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-FP4 \
--max_batch_size 1 \
--tp 8 \
--ep 2 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
Explanation:
@ -168,7 +168,7 @@ Explanation:
- `--max_batch_size`: Max batch size in each rank.
- `--tp`: Tensor parallel size.
- `--ep`: Expert parallel size.
- `--extra_llm_api_options`: Used to specify some extra config. The content of the file is as follows:
- `--config`: Used to specify extra YAML configuration. The content of the file is as follows:
#### Expected Results
The perf can be different when using different datasets and different machines.
@ -195,7 +195,7 @@ We are seeing meaningful speedup using FP8 KV cache, thus refreshing the numbers
#### Benchmark
```bash
cat >./extra-llm-api-config.yml <<EOF
cat >./config.yml <<EOF
cuda_graph_config:
enable_padding: true
batch_sizes:
@ -218,7 +218,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-0528-FP4
throughput
--dataset ${YOUR_DATA_PATH}
--tp 8 --ep 8
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
--max_batch_size 896
--max_num_tokens 2048
--kv_cache_free_gpu_mem_fraction 0.93
@ -261,7 +261,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-FP4 \
YOUR_DATA_PATH=./dataset.txt
cat >./extra-llm-api-config.yml <<EOF
cat >./config.yml <<EOF
cuda_graph_config:
enable_padding: true
batch_sizes:
@ -290,7 +290,7 @@ trtllm-bench -m nvidia/DeepSeek-R1-FP4 \
--num_requests 49152 \
--concurrency 3072 \
--kv_cache_free_gpu_mem_fraction 0.85 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
#### Expected Result Format
@ -315,7 +315,7 @@ To do the benchmark, run the following command:
```bash
YOUR_DATA_PATH=<your dataset file following the format>
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
speculative_config:
decoding_type: MTP
num_nextn_predict_layers: 3
@ -329,7 +329,7 @@ trtllm-bench --model deepseek-ai/DeepSeek-R1 \
--tp 8 \
--ep 4 \
--concurrency 1 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
#### Expected Result Format
@ -363,7 +363,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-FP4 \
YOUR_DATA_PATH=./dataset.txt
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
cuda_graph_config:
batch_sizes:
- 128
@ -384,7 +384,7 @@ trtllm-bench -m deepseek-ai/DeepSeek-R1 \
--num_requests 5120 \
--concurrency 1024 \
--kv_cache_free_gpu_mem_fraction 0.8 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
#### Expected Result Format
@ -408,7 +408,7 @@ Average request latency (ms): 181540.5739
To benchmark TensorRT LLM on DeepSeek models with more ISL/OSL combinations, you can use the `trtllm-bench prepare-dataset` subcommand to generate the dataset and use similar commands mentioned in the previous section. TensorRT LLM is working on enhancements that can make the benchmark process smoother.
### WIP: Enable more features by default
Currently, there are some features that need to be enabled through a user-defined file `extra-llm-api-config.yml`, such as attention dp. We're working on to enable those features by default, so that users can get good out-of-the-box performance on DeepSeek models.
Currently, there are some features that need to be enabled through a user-defined file `config.yml`, such as attention dp. We're working on to enable those features by default, so that users can get good out-of-the-box performance on DeepSeek models.
Note that, `max_batch_size` and `max_num_tokens` can easily affect the performance. The default values for them are already carefully designed and should deliver good performance on overall cases, however, you may still need to tune it for peak performance.

View File

@ -105,7 +105,7 @@ Notes:
Run the following command inside the container to start the endpoint:
```bash
TRTLLM_ENABLE_PDL=1 trtllm-serve /config/models/gpt-oss-120b --host 0.0.0.0 --port 8000 --max_batch_size 10 --tp_size 8 --ep_size 4 --trust_remote_code --extra_llm_api_options /config/models/eagle/eagle.yaml --max_num_tokens 131072 --max_seq_len 131072
TRTLLM_ENABLE_PDL=1 trtllm-serve /config/models/gpt-oss-120b --host 0.0.0.0 --port 8000 --max_batch_size 10 --tp_size 8 --ep_size 4 --trust_remote_code --config /config/models/eagle/eagle.yaml --max_num_tokens 131072 --max_seq_len 131072
```
The server initializes, loads, and optimizes the models. After it is ready, it listens on port 8000.

View File

@ -122,7 +122,7 @@ To benchmark min-latency performance with MTP, you need to follow [this document
```bash
YOUR_DATA_PATH=<your dataset file following the format>
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
cuda_graph_config: {}
moe_config:
backend: TRTLLM
@ -142,7 +142,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-FP4 \
--max_batch_size 1 \
--tp 8 \
--ep 2 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
## MTP optimization - Relaxed Acceptance
@ -178,7 +178,7 @@ To benchmark min-latency performance with MTP Relaxed Acceptance, you need to fo
```bash
YOUR_DATA_PATH=<your dataset file following the format>
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
cuda_graph_config: {}
moe_config:
backend: TRTLLM
@ -201,7 +201,7 @@ trtllm-bench --model nvidia/DeepSeek-R1-FP4 \
--max_batch_size 1 \
--tp 8 \
--ep 2 \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
```
## Evaluation

View File

@ -541,7 +541,7 @@ Prepare a dataset following the [benchmarking documentation](https://github.com/
Run 32-way expert parallelism inference on the prepared dataset. Please refer to the [LLM API MGMN example](https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/llm-api/llm_mgmn_trtllm_bench.sh) for details on running `trtllm-bench` on Slurm.
```bash
cat > ./extra_llm_api_options.yaml <<EOF
cat > ./config.yaml <<EOF
enable_attention_dp: true
EOF
@ -551,7 +551,7 @@ trtllm-bench --model ${MODEL_NAME} \
throughput \
--tp 32 \
--ep 32 \
--extra_llm_api_options ./extra_llm_api_options.yaml \
--config ./config.yaml \
--kv_cache_free_gpu_mem_fraction 0.75 \
--backend pytorch \
--dataset ./dataset.json \
@ -621,7 +621,7 @@ export EXPERT_STATISTIC_ITER_RANGE=100-200
Run 36-way expert parallelism inference with the EPLB configuration incorporated:
```bash
cat > ./extra_llm_api_options_eplb.yaml <<EOF
cat > ./config_eplb.yaml <<EOF
enable_attention_dp: true
moe_config:
load_balancer: ./moe_load_balancer.yaml
@ -633,7 +633,7 @@ trtllm-bench --model ${MODEL_NAME} \
throughput \
--tp 36 \
--ep 36 \
--extra_llm_api_options ./extra_llm_api_options_eplb.yaml \
--config ./config_eplb.yaml \
--kv_cache_free_gpu_mem_fraction 0.75 \
--backend pytorch \
--dataset ./dataset.json \

View File

@ -73,7 +73,7 @@ docker run -d --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 \
trtllm-serve /config/models/maverick \
--host 0.0.0.0 --port 8000 \
--tp_size 8 --ep_size 1 \
--trust_remote_code --extra_llm_api_options c.yaml \
--trust_remote_code --config c.yaml \
--kv_cache_free_gpu_memory_fraction 0.75"
```

View File

@ -86,7 +86,7 @@ trtllm-bench \
--backend pytorch \
--tp ${num_gpus} \
--ep 1 \
--extra_llm_api_options low_latency.yaml \
--config low_latency.yaml \
--dataset gpt-oss-120b-1k2k.txt \
--max_batch_size ${max_batch_size} \
--concurrency ${max_batch_size} \
@ -149,7 +149,7 @@ trtllm-bench \
--backend pytorch \
--tp ${num_gpus} \
--ep ${num_gpus} \
--extra_llm_api_options max_throughput.yaml \
--config max_throughput.yaml \
--dataset gpt-oss-120b-1k2k.txt \
--max_batch_size ${max_batch_size} \
--concurrency $((max_batch_size * num_gpus)) \
@ -171,7 +171,7 @@ Currently, the best throughput **19.5k tps/gpu** is achieved with DP4EP4 using 4
## Launch the TensorRT-LLM Server
We can use `trtllm-serve` to serve the model by translating the benchmark commands above. For low-latency configuration, run:
We can use `trtllm-serve` to serve the model by translating the benchmark commands above. For low-latency configuration, run:
**Note:** You can also point to a local path containing the model weights instead of the HF repo (e.g., `${local_model_path}`).
```bash
@ -184,7 +184,7 @@ trtllm-serve openai/gpt-oss-120b \
--ep_size 8 \
--max_batch_size 640 \
--trust_remote_code \
--extra_llm_api_options max_throughput.yaml \
--config max_throughput.yaml \
--kv_cache_free_gpu_memory_fraction 0.9
```
</details>
@ -201,7 +201,7 @@ trtllm-serve \
--ep_size 4 \
--max_batch_size 640 \
--trust_remote_code \
--extra_llm_api_options max_throughput.yaml \
--config max_throughput.yaml \
--kv_cache_free_gpu_memory_fraction 0.9
```
</details>
@ -223,7 +223,7 @@ OpenAI ships a set of Triton kernels optimized for its MoE models. TensorRT LLM
### Selecting Triton as the MoE backend
To use the Triton MoE backend with **trtllm-serve** (or other similar commands) add this snippet to the YAML file passed via `--extra_llm_api_options`:
To use the Triton MoE backend with **trtllm-serve** (or other similar commands) add this snippet to the YAML file passed via `--config`:
```yaml
moe_config:
@ -347,7 +347,7 @@ OpenAI ships a set of Triton kernels optimized for its MoE models. TensorRT-LLM
### Selecting Triton as the MoE backend
To use the Triton MoE backend with **trtllm-serve** (or other commands), add this snippet to the YAML file passed via `--extra_llm_api_options`:
To use the Triton MoE backend with **trtllm-serve** (or other commands), add this snippet to the YAML file passed via `--config`:
```yaml
moe_config:

View File

@ -3,9 +3,12 @@ trtllm-bench
trtllm-bench is a comprehensive benchmarking tool for TensorRT LLM engines. It provides three main subcommands for different benchmarking scenarios:
**Common Options for All Commands:**
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
**Usage:**
Syntax
------
.. click:: tensorrt_llm.commands.bench:main
:prog: trtllm-bench
@ -14,8 +17,11 @@ trtllm-bench is a comprehensive benchmarking tool for TensorRT LLM engines. It p
Dataset preparation
------------------
prepare_dataset.py
===========================
^^^^^^^^^^^^^^^^^^
trtllm-bench is designed to work with the `prepare_dataset.py <https://github.com/NVIDIA/TensorRT-LLM/blob/main/benchmarks/cpp/prepare_dataset.py>`_ script, which generates benchmark datasets in the required format. The prepare_dataset script supports:
@ -38,7 +44,7 @@ trtllm-bench is designed to work with the `prepare_dataset.py <https://github.co
**Usage:**
prepare_dataset
-------------------
"""""""""""""""
.. code-block:: bash
@ -72,7 +78,7 @@ prepare_dataset
- Logging level: info or debug (default: info)
dataset
-------------------
"""""""
Process real datasets from various sources.
@ -103,7 +109,7 @@ Process real datasets from various sources.
token_norm_dist
-------------------
"""""""""""""""
Generate synthetic datasets with normal token distribution.
@ -134,7 +140,7 @@ Generate synthetic datasets with normal token distribution.
token_unif_dist
-------------------
"""""""""""""""
Generate synthetic datasets with uniform token distribution

View File

@ -79,6 +79,10 @@ Alternatively, the ``--model`` argument also accepts a local path to pre-built T
For more details, see ``trtllm-eval --help`` and ``trtllm-eval <task> --help``.
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
Syntax

View File

@ -3,30 +3,11 @@
TensorRT LLM provides the OpenAI-compatible API via `trtllm-serve` command.
A complete reference for the API is available in the [OpenAI API Reference](https://platform.openai.com/docs/api-reference).
This step-by-step tutorial covers the following topics for running online serving benchmarking with Llama 3.1 70B and Qwen2.5-VL-7B for multimodal models:
* Methodology Introduction
* Launch the OpenAI-Compatible Server with NGC container
* Run the performance benchmark
* Using `extra_llm_api_options`
* Multimodal Serving and Benchmarking
## Table of Contents
- [Run benchmarking with `trtllm-serve`](#run-benchmarking-with-trtllm-serve)
- [Table of Contents](#table-of-contents)
- [Methodology Introduction](#methodology-introduction)
- [Preparation](#preparation)
- [Launch the NGC container](#launch-the-ngc-container)
- [Start the trtllm-serve service](#start-the-trtllm-serve-service)
- [Benchmark using `tensorrt_llm.serve.scripts.benchmark_serving`](#benchmark-using-tensorrt_llmservescriptsbenchmark_serving)
- [Key Metrics](#key-metrics)
- [About `extra_llm_api_options`](#about-extra_llm_api_options)
- [`kv_cache_config`](#kv_cache_config)
- [`cuda_graph_config`](#cuda_graph_config)
- [`moe_config`](#moe_config)
- [`attention_backend`](#attention_backend)
- [Multimodal Serving and Benchmarking](#multimodal-serving-and-benchmarking)
- [Setting up Multimodal Serving](#setting-up-multimodal-serving)
- [Multimodal Benchmarking](#multimodal-benchmarking)
```{contents}
:Contents
:local:
:depth: 3
```
## Methodology Introduction
@ -57,9 +38,9 @@ For benchmarking purposes, first create a bash script using the following code a
```bash
#! /bin/bash
model_path=/path/to/llama3.1_70B
extra_llm_api_file=/tmp/extra-llm-api-config.yml
config_file=/tmp/config.yml
cat << EOF > ${extra_llm_api_file}
cat << EOF > ${config_file}
enable_attention_dp: false
print_iter_log: true
cuda_graph_config:
@ -77,7 +58,7 @@ trtllm-serve ${model_path} \
--tp_size 1 \
--ep_size 1 \
--trust_remote_code \
--extra_llm_api_options ${extra_llm_api_file}
--config ${config_file}
```
> [!NOTE]
> The trtllm-llmapi-launch is a script that launches the LLM-API code on
@ -215,17 +196,24 @@ $$
To get more detailed metrics besides the key metrics above, there is an [experimental tool](https://github.com/NVIDIA/TensorRT-LLM/tree/main/tensorrt_llm/serve/scripts/time_breakdown) for request time breakdown.
## About `extra_llm_api_options`
trtllm-serve provides `extra_llm_api_options` knob to **overwrite** the parameters specified by trtllm-serve.
Generally, We create a YAML file that contains various performance switches.
e.g
```yaml
cuda_graph_config:
padding_enabled: true
print_iter_log: true
kv_cache_dtype: fp8
enable_attention_dp: true
```
## About `--config`
```{eval-rst}
.. include:: ../../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
`trtllm-serve` provides `--config` to **overwrite** the parameters specified by `trtllm-serve`.
Generally, we create a YAML file that contains various performance switches. For example:
```yaml
cuda_graph_config:
padding_enabled: true
print_iter_log: true
kv_cache_dtype: fp8
enable_attention_dp: true
```
The following is a list of common performance switches.
#### `kv_cache_config`
@ -274,7 +262,7 @@ The following is a list of common performance switches.
&emsp;**Default**: TRTLLM
See the [TorchLlmArgs class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the extra\_llm\_api\_options`.`
See the [TorchLlmArgs class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the `--config`.
## Multimodal Serving and Benchmarking

View File

@ -98,7 +98,7 @@ First, create a configuration file:
.. code-block:: bash
cat >./extra-llm-api-config.yml<<EOF
cat >./config.yml<<EOF
kv_cache_config:
enable_block_reuse: false
EOF
@ -108,7 +108,7 @@ Then, start the server with the configuration file:
.. code-block:: bash
trtllm-serve Qwen/Qwen2-VL-7B-Instruct \
--extra_llm_api_options ./extra-llm-api-config.yml
--config ./config.yml
Multimodal Chat API
~~~~~~~~~~~~~~~~~~~
@ -201,7 +201,7 @@ You can deploy `DeepSeek-V3 <https://huggingface.co/deepseek-ai/DeepSeek-V3>`_ m
.. code-block:: bash
echo -e "enable_attention_dp: true\npytorch_backend_config:\n enable_overlap_scheduler: true" > extra-llm-api-config.yml
echo -e "enable_attention_dp: true\npytorch_backend_config:\n enable_overlap_scheduler: true" > config.yml
srun -N 2 -w [NODES] \
--output=benchmark_2node.log \
@ -210,7 +210,7 @@ You can deploy `DeepSeek-V3 <https://huggingface.co/deepseek-ai/DeepSeek-V3>`_ m
--container-image=<CONTAINER_IMG> \
--container-mounts=/workspace:/workspace \
--container-workdir /workspace \
bash -c "trtllm-llmapi-launch trtllm-serve deepseek-ai/DeepSeek-V3 --max_batch_size 161 --max_num_tokens 1160 --tp_size 16 --ep_size 4 --kv_cache_free_gpu_memory_fraction 0.95 --extra_llm_api_options ./extra-llm-api-config.yml"
bash -c "trtllm-llmapi-launch trtllm-serve deepseek-ai/DeepSeek-V3 --max_batch_size 161 --max_num_tokens 1160 --tp_size 16 --ep_size 4 --kv_cache_free_gpu_memory_fraction 0.95 --config ./config.yml"
See `the source code <https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/llmapi/trtllm-llmapi-launch>`_ of ``trtllm-llmapi-launch`` for more details.
@ -234,11 +234,11 @@ For the default PyTorch backend, iteration statistics logging is enabled by sett
# extra_llm_config.yaml
enable_iter_perf_stats: true
Start the server and specify the ``--extra_llm_api_options`` argument with the path to the YAML file:
Start the server and specify the ``--config`` argument with the path to the YAML file:
.. code-block:: bash
trtllm-serve "TinyLlama/TinyLlama-1.1B-Chat-v1.0" --extra_llm_api_options extra_llm_config.yaml
trtllm-serve "TinyLlama/TinyLlama-1.1B-Chat-v1.0" --config config.yaml
After sending at least one inference request to the server, you can fetch runtime iteration statistics by polling the ``/metrics`` endpoint.
Since the statistics are stored in an internal queue and removed once retrieved, it's recommended to poll the endpoint shortly after each request and store the results if needed.
@ -272,10 +272,16 @@ Example output:
}
]
.. _configuring-with-yaml-files:
Configuring with YAML Files
----------------------------
You can configure various options of ``trtllm-serve`` using YAML files by setting the ``--extra_llm_api_options`` option to the path of a YAML file, the arguments in the file will override the corresponding command line arguments.
You can configure various options of ``trtllm-serve`` using YAML files by setting the ``--config`` option to the path of a YAML file. The arguments in the file override the corresponding command line arguments.
.. include:: ../../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
The yaml file is configuration of `tensorrt_llm.llmapi.LlmArgs <https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs>`_, the class has multiple levels of hierarchy, to configure the top level arguments like ``max_batch_size``, the yaml file should be like:

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the DeepSeek-R1 model from within the container. The command is specifically configured for the 1024/1024 Input/Output Sequence Length test. The explanation of each flag is shown in the “LLM API Options (YAML Configuration)” section.
```shell
trtllm-serve deepseek-ai/DeepSeek-R1-0528 --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve deepseek-ai/DeepSeek-R1-0528 --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -124,7 +124,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -200,7 +200,7 @@ These options provide control over TensorRT LLM's behavior and are set within th
* **Default**: `TRTLLM`
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the `extra_llm_api_options`.
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the YAML configuration file.
### Wide Expert Parallelism
@ -435,7 +435,7 @@ $$
The following tables list recommended configurations from the comprehensive database for different performance profiles.
```{eval-rst}
.. include:: note_sections.rst
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-traffic-patterns
:end-before: .. end-note-traffic-patterns

View File

@ -113,7 +113,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the GPT-OSS model from within the container. The command is specifically configured for the 1024/1024 Input/Output Sequence Length test. The explanation of each flag is shown in the “LLM API Options (YAML Configuration)” section.
```shell
trtllm-serve openai/gpt-oss-120b --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve openai/gpt-oss-120b --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -122,7 +122,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -178,7 +178,7 @@ These options provide control over TensorRT LLM's behavior and are set within th
* `backend`: The backend to use for MoE operations.
**Default**: `CUTLASS`
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the `extra_llm_api_options`.
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the YAML configuration file.
## Testing API Endpoint
@ -383,7 +383,7 @@ $$
The following table lists recommended configurations from the comprehensive database for different performance profiles.
```{eval-rst}
.. include:: note_sections.rst
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-traffic-patterns
:end-before: .. end-note-traffic-patterns

View File

@ -60,7 +60,7 @@ With the `EXTRA_OPTIONS_YAML_FILE`, use the following example command to launch
```bash
trtllm-serve nvidia/Kimi-K2-Thinking-NVFP4 \
--host 0.0.0.0 --port 8000 \
--extra_llm_api_options ${EXTRA_OPTIONS_YAML_FILE}
--config ${EXTRA_OPTIONS_YAML_FILE}
```
TensorRT LLM will load weights and select the best kernels during startup. The server is successfully launched when the following log is shown:

View File

@ -83,7 +83,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the Llama-3.3-70B-Instruct-FP8 model from within the container. The command is specifically configured for the 1024/1024 Input/Output Sequence Length test. The explanation of each flag is shown in the “LLM API Options (YAML Configuration)” section.
```shell
trtllm-serve nvidia/Llama-3.3-70B-Instruct-FP8 --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve nvidia/Llama-3.3-70B-Instruct-FP8 --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -92,7 +92,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -170,7 +170,7 @@ These options provide control over TensorRT LLM's behavior and are set within th
&emsp;**Default**: TRTLLM
See the [TorchLlmArgs](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) class for the full list of options which can be used in the `extra_llm_api_options`.
See the [TorchLlmArgs](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) class for the full list of options which can be used in the YAML configuration file.
## Testing API Endpoint

View File

@ -82,7 +82,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the Llama-4-Scout-17B-16E-Instruct-FP8 model from within the container. The command is specifically configured for the 1024/1024 Input/Output Sequence Length test. The explanation of each flag is shown in the “LLM API Options (YAML Configuration)” section.
```shell
trtllm-serve nvidia/Llama-4-Scout-17B-16E-Instruct-FP8 --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve nvidia/Llama-4-Scout-17B-16E-Instruct-FP8 --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -91,7 +91,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -166,7 +166,7 @@ These options provide control over TensorRT LLM's behavior and are set within th
* **Default**: `TRTLLM`
See the [TorchLlmArgs](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) class for the full list of options which can be used in the `extra_llm_api_options`.
See the [TorchLlmArgs](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) class for the full list of options which can be used in the YAML configuration file.
## Testing API Endpoint

View File

@ -61,7 +61,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the Qwen3-Next model from within the container.
```shell
trtllm-serve Qwen/Qwen3-Next-80B-A3B-Thinking --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve Qwen/Qwen3-Next-80B-A3B-Thinking --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -70,7 +70,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -127,7 +127,7 @@ These options provide control over TensorRT LLM's behavior and are set within th
* `backend`: The backend to use for MoE operations.
**Default**: `CUTLASS`
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the `extra_llm_api_options`.
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the YAML configuration file.
## Testing API Endpoint
@ -220,7 +220,7 @@ If you want to save the results to a file add the following options.
--result-filename "concurrency_${concurrency}.json"
```
For more benchmarking options see [benchmark_serving.py](https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/serve/scripts/benchmark_serving.py)
For more benchmarking options see [benchmark_serving.py](https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/serve/scripts/benchmark_serving.py)
Run `bench.sh` to begin a serving benchmark. This will take a long time if you run all the concurrencies mentioned in the above `bench.sh` script.

View File

@ -66,7 +66,7 @@ append: EOF
Below is an example command to launch the TensorRT LLM server with the Qwen3 model from within the container.
```shell
trtllm-serve Qwen/Qwen3-30B-A3B --host 0.0.0.0 --port 8000 --extra_llm_api_options ${EXTRA_LLM_API_FILE}
trtllm-serve Qwen/Qwen3-30B-A3B --host 0.0.0.0 --port 8000 --config ${EXTRA_LLM_API_FILE}
```
After the server is set up, the client can now send prompt requests to the server and receive results.
@ -75,7 +75,7 @@ After the server is set up, the client can now send prompt requests to the serve
<!-- TODO: this section is duplicated across the deployment guides; they should be consolidated to a central file and imported as needed, or we can remove this and link to LLM API reference -->
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--extra_llm_api_options` argument.
These options provide control over TensorRT LLM's behavior and are set within the YAML file passed to the `trtllm-serve` command via the `--config` argument.
#### `tensor_parallel_size`
@ -127,10 +127,10 @@ These options provide control over TensorRT LLM's behavior and are set within th
* **Options**:
* `backend`: The backend to use for MoE operations.
**Default**: `CUTLASS`
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the `extra_llm_api_options`.
See the [`TorchLlmArgs` class](https://nvidia.github.io/TensorRT-LLM/llm-api/reference.html#tensorrt_llm.llmapi.TorchLlmArgs) for the full list of options which can be used in the YAML configuration file.
## Testing API Endpoint
@ -247,7 +247,7 @@ If you want to save the results to a file add the following options.
--result-filename "concurrency_${concurrency}.json"
```
For more benchmarking options see [benchmark_serving.py](https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/serve/scripts/benchmark_serving.py)
For more benchmarking options see [benchmark_serving.py](https://github.com/NVIDIA/TensorRT-LLM/blob/main/tensorrt_llm/serve/scripts/benchmark_serving.py)
Run `bench.sh` to begin a serving benchmark. This will take a long time if you run all the concurrencies mentioned in the above `bench.sh` script.

View File

@ -17,7 +17,7 @@ The TensorRT LLM Docker container makes these config files available at ``/app/t
export TRTLLM_DIR="/app/tensorrt_llm" # path to the TensorRT LLM repo in your local environment
.. include:: note_sections.rst
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-quick-start-isl-osl
:end-before: .. end-note-quick-start-isl-osl
@ -36,52 +36,52 @@ This table is designed to provide a straightforward starting point; for detailed
- H100, H200
- Max Throughput
- `deepseek-r1-throughput.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/deepseek-r1-throughput.yaml>`_
- ``trtllm-serve deepseek-ai/DeepSeek-R1-0528 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-throughput.yaml``
- ``trtllm-serve deepseek-ai/DeepSeek-R1-0528 --config ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-throughput.yaml``
* - `DeepSeek-R1 <https://huggingface.co/deepseek-ai/DeepSeek-R1-0528>`_
- B200, GB200
- Max Throughput
- `deepseek-r1-deepgemm.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/deepseek-r1-deepgemm.yaml>`_
- ``trtllm-serve deepseek-ai/DeepSeek-R1-0528 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-deepgemm.yaml``
- ``trtllm-serve deepseek-ai/DeepSeek-R1-0528 --config ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-deepgemm.yaml``
* - `DeepSeek-R1 (NVFP4) <https://huggingface.co/nvidia/DeepSeek-R1-FP4>`_
- B200, GB200
- Max Throughput
- `deepseek-r1-throughput.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/deepseek-r1-throughput.yaml>`_
- ``trtllm-serve nvidia/DeepSeek-R1-FP4 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-throughput.yaml``
- ``trtllm-serve nvidia/DeepSeek-R1-FP4 --config ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-throughput.yaml``
* - `DeepSeek-R1 (NVFP4) <https://huggingface.co/nvidia/DeepSeek-R1-FP4-v2>`_
- B200, GB200
- Min Latency
- `deepseek-r1-latency.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/deepseek-r1-latency.yaml>`_
- ``trtllm-serve nvidia/DeepSeek-R1-FP4-v2 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-latency.yaml``
- ``trtllm-serve nvidia/DeepSeek-R1-FP4-v2 --config ${TRTLLM_DIR}/examples/configs/curated/deepseek-r1-latency.yaml``
* - `gpt-oss-120b <https://huggingface.co/openai/gpt-oss-120b>`_
- Any
- Max Throughput
- `gpt-oss-120b-throughput.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/gpt-oss-120b-throughput.yaml>`_
- ``trtllm-serve openai/gpt-oss-120b --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/gpt-oss-120b-throughput.yaml``
- ``trtllm-serve openai/gpt-oss-120b --config ${TRTLLM_DIR}/examples/configs/curated/gpt-oss-120b-throughput.yaml``
* - `gpt-oss-120b <https://huggingface.co/openai/gpt-oss-120b>`_
- Any
- Min Latency
- `gpt-oss-120b-latency.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/gpt-oss-120b-latency.yaml>`_
- ``trtllm-serve openai/gpt-oss-120b --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/gpt-oss-120b-latency.yaml``
- ``trtllm-serve openai/gpt-oss-120b --config ${TRTLLM_DIR}/examples/configs/curated/gpt-oss-120b-latency.yaml``
* - `Qwen3-Next-80B-A3B-Thinking <https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Thinking>`_
- Any
- Max Throughput
- `qwen3-next.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/qwen3-next.yaml>`_
- ``trtllm-serve Qwen/Qwen3-Next-80B-A3B-Thinking --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/qwen3-next.yaml``
- ``trtllm-serve Qwen/Qwen3-Next-80B-A3B-Thinking --config ${TRTLLM_DIR}/examples/configs/curated/qwen3-next.yaml``
* - Qwen3 family (e.g. `Qwen3-30B-A3B <https://huggingface.co/Qwen/Qwen3-30B-A3B>`_)
- Any
- Max Throughput
- `qwen3.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/qwen3.yaml>`_
- ``trtllm-serve Qwen/Qwen3-30B-A3B --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/qwen3.yaml`` (swap to another Qwen3 model name as needed)
- ``trtllm-serve Qwen/Qwen3-30B-A3B --config ${TRTLLM_DIR}/examples/configs/curated/qwen3.yaml`` (swap to another Qwen3 model name as needed)
* - `Llama-3.3-70B (FP8) <https://huggingface.co/nvidia/Llama-3.3-70B-Instruct-FP8>`_
- Any
- Max Throughput
- `llama-3.3-70b.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/llama-3.3-70b.yaml>`_
- ``trtllm-serve nvidia/Llama-3.3-70B-Instruct-FP8 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/llama-3.3-70b.yaml``
- ``trtllm-serve nvidia/Llama-3.3-70B-Instruct-FP8 --config ${TRTLLM_DIR}/examples/configs/curated/llama-3.3-70b.yaml``
* - `Llama 4 Scout (FP8) <https://huggingface.co/nvidia/Llama-4-Scout-17B-16E-Instruct-FP8>`_
- Any
- Max Throughput
- `llama-4-scout.yaml <https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/configs/curated/llama-4-scout.yaml>`_
- ``trtllm-serve nvidia/Llama-4-Scout-17B-16E-Instruct-FP8 --extra_llm_api_options ${TRTLLM_DIR}/examples/configs/curated/llama-4-scout.yaml``
- ``trtllm-serve nvidia/Llama-4-Scout-17B-16E-Instruct-FP8 --config ${TRTLLM_DIR}/examples/configs/curated/llama-4-scout.yaml``
Model-Specific Deployment Guides
---------------------------------

View File

@ -2,6 +2,13 @@
# TensorRT LLM Benchmarking
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
TensorRT LLM provides the `trtllm-bench` CLI, a packaged benchmarking utility that aims to make it
easier for users to reproduce our officially published [performance overview](./perf-overview.md#throughput-measurements). `trtllm-bench` provides the follows:
@ -176,7 +183,7 @@ trtllm-bench --model meta-llama/Llama-3.1-8B prepare-dataset --output /tmp/synth
To benchmark the PyTorch backend (`tensorrt_llm._torch`), use the following command with [dataset](#preparing-a-dataset) generated from previous steps. The `throughput` benchmark initializes the backend by tuning against the dataset provided via `--dataset` (or the other build mode settings described above).
Note that CUDA graph is enabled by default. You can add additional pytorch config with `--extra_llm_api_options` followed by the path to a YAML file. For more details, please refer to the help text by running the command with `--help`.
Note that CUDA graph is enabled by default. You can add additional pytorch config with `--config` followed by the path to a YAML file. For more details, please refer to the help text by running the command with `--help`.
```{tip}
The command below specifies the `--model_path` option. The model path is optional and used only when you want to run a locally
@ -289,7 +296,7 @@ The generated dataset will include LoRA request metadata. Below is an example of
**LoRA Configuration**
Create an `extra-llm-api-options.yaml` file with LoRA configuration:
Create a `config.yaml` file with LoRA configuration:
```yaml
lora_config:
@ -314,7 +321,7 @@ trtllm-bench --model /path/to/base/model \
throughput \
--dataset synthetic_lora_data.json \
--backend pytorch \
--extra_llm_api_options extra-llm-api-options.yaml
--config config.yaml
```
```{note}

View File

@ -269,7 +269,7 @@ Testing was performed using the PyTorch backend - this workflow does not require
| Stage | Description | Command |
| :- | - | - |
| [Dataset](#preparing-a-dataset) | Create a synthetic dataset | `python benchmarks/cpp/prepare_dataset.py --tokenizer=$model_name --stdout token-norm-dist --num-requests=$num_requests --input-mean=$isl --output-mean=$osl --input-stdev=0 --output-stdev=0 > $dataset_file` |
| [Run](#running-the-benchmark) | Run a benchmark with a dataset | `trtllm-bench --model $model_name throughput --dataset $dataset_file --backend pytorch --extra_llm_api_options $llm_options` |
| [Run](#running-the-benchmark) | Run a benchmark with a dataset | `trtllm-bench --model $model_name throughput --dataset $dataset_file --backend pytorch --config $llm_options` |
### Variables
@ -323,7 +323,7 @@ a model name (HuggingFace reference or path to a local model), a [generated data
For dense / non-MoE models:
```shell
trtllm-bench --tp $tp_size --pp $pp_size --model $model_name throughput --dataset $dataset_file --backend pytorch --extra_llm_api_options $llm_options
trtllm-bench --tp $tp_size --pp $pp_size --model $model_name throughput --dataset $dataset_file --backend pytorch --config $llm_options
```
Llama 3.3
@ -337,7 +337,7 @@ cuda_graph_config:
For MoE models:
```shell
trtllm-bench --tp $tp_size --pp $pp_size --ep $ep_size --model $model_name throughput --dataset $dataset_file --backend pytorch --extra_llm_api_options $llm_options
trtllm-bench --tp $tp_size --pp $pp_size --ep $ep_size --model $model_name throughput --dataset $dataset_file --backend pytorch --config $llm_options
```
GPT-OSS:

View File

@ -24,7 +24,13 @@ As in the PyTorch workflow, AutoDeploy does not require a separate `trtllm-bench
## Advanced Configuration
For more granular control over AutoDeploy's behavior during benchmarking, use the `--extra_llm_api_options` flag with a YAML configuration file:
For more granular control over AutoDeploy's behavior during benchmarking, use the `--config` flag with a YAML configuration file:
```{eval-rst}
.. include:: ../../../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
```bash
trtllm-bench \
@ -32,7 +38,7 @@ trtllm-bench \
throughput \
--dataset /tmp/synthetic_128_128.txt \
--backend _autodeploy \
--extra_llm_api_options autodeploy_config.yaml
--config autodeploy_config.yaml
```
### Configuration Examples

View File

@ -1,4 +1,4 @@
# Disaggregated Serving
# Disaggregated Serving
- [Motivation](#Motivation)
- [KV Cache Exchange](#KV-Cache-Exchange)
@ -100,6 +100,12 @@ For more information on how to use Dynamo with TensorRT-LLM, please refer to [th
The second approach to evaluate disaggregated LLM inference with TensorRT LLM involves launching a separate OpenAI-compatible server per context and generation instance using `trtllm-serve`. An additional server, referred to as the "disaggregated" server, is also launched with `trtllm-serve` and acts as an orchestrator which receives client requests and dispatches them to the appropriate context and generation servers via OpenAI REST API. Figure 6 below illustrates the disaggregated serving workflow when using this approach. When a context instance is done generating the KV blocks associated with the prompt, it returns a response to the disaggregated server. This response includes the prompt tokens, the first generated token and metadata associated with the context request and context instance. This metadata is referred to as context parameters (`ctx_params` in Figure 6). These parameters are then used by the generation instances to establish communication with the context instance and retrieve the KV cache blocks associated with the request.
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
<div align="center">
<figure>
<img src="https://github.com/NVIDIA/TensorRT-LLM/raw/main/docs/source/blogs/media/tech_blog5_Picture3.png" width="800" height="auto">
@ -126,19 +132,19 @@ For example, you could launch two context servers and one generation servers as
```
# Generate context_extra-llm-api-config.yml
# Generate context_config.yml
# Overlap scheduler for context servers are disabled because it's not supported for disaggregated context servers yet
echo -e "disable_overlap_scheduler: True\ncache_transceiver_config:\n backend: UCX\n max_tokens_in_buffer: 2048" > context_extra-llm-api-config.yml
echo -e "disable_overlap_scheduler: True\ncache_transceiver_config:\n backend: UCX\n max_tokens_in_buffer: 2048" > context_config.yml
# Start Context servers
CUDA_VISIBLE_DEVICES=0 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8001 --backend pytorch --extra_llm_api_options ./context_extra-llm-api-config.yml &> log_ctx_0 &
CUDA_VISIBLE_DEVICES=1 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8002 --backend pytorch --extra_llm_api_options ./context_extra-llm-api-config.yml &> log_ctx_1 &
CUDA_VISIBLE_DEVICES=0 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8001 --backend pytorch --config ./context_config.yml &> log_ctx_0 &
CUDA_VISIBLE_DEVICES=1 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8002 --backend pytorch --config ./context_config.yml &> log_ctx_1 &
# Generate gen_extra-llm-api-config.yml
echo -e "cache_transceiver_config:\n backend: UCX\n max_tokens_in_buffer: 2048" > gen_extra-llm-api-config.yml
# Generate gen_config.yml
echo -e "cache_transceiver_config:\n backend: UCX\n max_tokens_in_buffer: 2048" > gen_config.yml
# Start Generation servers
CUDA_VISIBLE_DEVICES=2 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8003 --backend pytorch --extra_llm_api_options ./gen_extra-llm-api-config.yml &> log_gen_0 &
CUDA_VISIBLE_DEVICES=2 trtllm-serve TinyLlama/TinyLlama-1.1B-Chat-v1.0 --host localhost --port 8003 --backend pytorch --config ./gen_config.yml &> log_gen_0 &
```
Once the context and generation servers are launched, you can launch the disaggregated
server, which will accept requests from clients and do the orchestration between context

View File

@ -9,14 +9,20 @@ TensorRT LLM supports two grammar backends:
## Online API: `trtllm-serve`
If you are using `trtllm-serve`, enable guided decoding by specifying `guided_decoding_backend` with `xgrammar` or `llguidance` in the YAML configuration file, and pass it to `--extra_llm_api_options`. For example,
If you are using `trtllm-serve`, enable guided decoding by specifying `guided_decoding_backend` with `xgrammar` or `llguidance` in the YAML configuration file, and pass it to `--config`. For example,
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
```bash
cat > extra_llm_api_options.yaml <<EOF
cat > config.yaml <<EOF
guided_decoding_backend: xgrammar
EOF
trtllm-serve nvidia/Llama-3.1-8B-Instruct-FP8 --extra_llm_api_options extra_llm_api_options.yaml
trtllm-serve nvidia/Llama-3.1-8B-Instruct-FP8 --config config.yaml
```
You should see a log like the following, which indicates the grammar backend is successfully enabled.

View File

@ -157,7 +157,13 @@ llm = LLM(
### YAML Configuration
Create an `extra_llm_api_options.yaml` file:
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
Create a `config.yaml` file:
```yaml
lora_config:
@ -168,7 +174,7 @@ lora_config:
```bash
python -m tensorrt_llm.commands.serve
/path/to/model \
--extra_llm_api_options extra_llm_api_options.yaml
--config config.yaml
```
### Client Usage
@ -196,7 +202,13 @@ response = client.completions.create(
### YAML Configuration
Create an `extra_llm_api_options.yaml` file:
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
Create a `config.yaml` file:
```yaml
lora_config:
@ -216,5 +228,5 @@ lora_config:
```
### Run trtllm-bench
```bash
trtllm-bench --model $model_path throughput --dataset $dataset_path --extra_llm_api_options extra_llm_api_options.yaml --num_requests 64 --concurrency 16
trtllm-bench --model $model_path throughput --dataset $dataset_path --config config.yaml --num_requests 64 --concurrency 16
```

View File

@ -80,7 +80,7 @@ enable_attention_dp: true
EOF
```
then set `--extra_llm_api_options parallel_config.yaml` in `trtllm-serve` or `trtllm-bench`.
then set `--config parallel_config.yaml` in `trtllm-serve` or `trtllm-bench`.
### FFN Module

View File

@ -122,7 +122,13 @@ llm = LLM("/path/to/target_model", speculative_config=speculative_config)
## Usage with `trtllm-bench` and `trtllm-serve`
Speculative decoding options must be specified via `--extra_llm_api_options config.yaml` for both `trtllm-bench` and `trtllm-serve`. All speculative decoding options can be specified in this YAML file. An additional `decoding_type` option is used to specify the type of speculation to use. The available options are:
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
Speculative decoding options must be specified via `--config config.yaml` for both `trtllm-bench` and `trtllm-serve`. All speculative decoding options can be specified in this YAML file. An additional `decoding_type` option is used to specify the type of speculation to use. The available options are:
* `MTP`
* `Eagle` (for EAGLE 3)

View File

@ -31,7 +31,13 @@ Piecewise CUDA Graph is a technique that runs cudagraph-unsupported components (
## Usage
To enable torch.compile and Piecewise CUDA Graph, add the following configuration to `extra_config.yml`. Typically, the `extra_config.yml` can be used by adding launching args `--extra_llm_api_options extra_config.yml` to `trtllm-serve` or `trtllm-bench`.
To enable torch.compile and Piecewise CUDA Graph, add the following configuration to `config.yml`. Typically, the `config.yml` can be used by adding launching args `--config config.yml` to `trtllm-serve` or `trtllm-bench`.
```{eval-rst}
.. include:: ../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
```yaml
... # Other extra config
@ -50,7 +56,7 @@ Piecewise CUDA Graph only handles context-only and mixed context+generation iter
```yaml
cuda_graph_config:
enable_padding: true
max_batch_size: 1024 # Specify max capture batch size for generation only cuda graph. By default, TensorRT LLM will generate a capture list based on it.
max_batch_size: 1024 # Specify max capture batch size for generation only cuda graph. By default, TensorRT LLM will generate a capture list based on it.
torch_compile_config:
capture_num_tokens: '${capture_num_tokens}' # Specify capture_num_tokens for piecewise cuda graph
@ -72,7 +78,7 @@ Guidelines for `capture_num_tokens`:
- Define bounds:
- Lower bound: base it on typical context lengths. In low-latency workflows with KV-cache reuse, it can be as small as <10 tokens.
- Upper bound: set by hardware and model configuration—choose the largest token count that still provides a measurable benefit from Piecewise CUDA Graph even after padding.
- Upper bound: set by hardware and model configuration—choose the largest token count that still provides a measurable benefit from Piecewise CUDA Graph even after padding.
- Choose step size: Choose step sizes that balance coverage and memory overhead. Use denser steps in a smaller number of token ranges, and a fixed step (e.g., 256) for larger ranges.
- Manage trade-offs: more capture points reduce padding but increase memory use and can lower max concurrency; fewer points save memory but increase padding and compute cost.
@ -80,7 +86,7 @@ Even with Piecewise CUDA Graph enabled, you may still observe bubbles in the con
## Known Issue
Torch compile cannot work with multi-ModelEngine config.
Torch compile cannot work with multi-ModelEngine config.
1. Speculative Decoding in Two-Model Style
@ -104,14 +110,14 @@ Currently, TRT-LLM mainly relies on torch.compile **fullgraph** mode to enable P
#### Custom Op
For ops that cannot be represented by a torch native op, developers need to wrap them into a custom op so that they can work properly with torch.compile. A custom op mainly contains two parts: Op forward implementation & Fake kernel.
For ops that cannot be represented by a torch native op, developers need to wrap them into a custom op so that they can work properly with torch.compile. A custom op mainly contains two parts: Op forward implementation & Fake kernel.
1. Op forward implementation: Define how this op does forward calculation. Including custom CUDA kernel, etc.
1. Op forward implementation: Define how this op does forward calculation. Including custom CUDA kernel, etc.
2. Fake kernel: Help torch.compile to do the output tensor dtype/shape inference.
After wrapping the op into a torch custom op, the implementation is a completely **black box** for torch compile. Instead, torch.compile will fully rely on a fake kernel to do the tracing.
After wrapping the op into a torch custom op, the implementation is a completely **black box** for torch compile. Instead, torch.compile will fully rely on a fake kernel to do the tracing.
Below is a simple example of flashinfer ops fake kernel.
Below is a simple example of flashinfer ops fake kernel.
```python
@torch.library.custom_op("trtllm::flashinfer_silu_and_mul", mutates_args=())
@ -127,9 +133,9 @@ For more examples, please refer to `tensorrt_llm/_torch/custom_ops`.
#### Current Status
For hot models like deepseek/qwen/lllama, weve already wrapped some large modules into a custom op to avoid trace failure/graph breaks and exclude output projection & MTP from torch.compile's scope.
For hot models like deepseek/qwen/lllama, weve already wrapped some large modules into a custom op to avoid trace failure/graph breaks and exclude output projection & MTP from torch.compile's scope.
This means developing the inside attention custom op part, the MoE routed export part, and the MPT part dont need to worry about complex torch.compile constraints since they are treated as a black box for Torch compile. Developers should only make sure the fake kernels of attention custom op, and routed expert are aligned with the actual implementation.
This means developing the inside attention custom op part, the MoE routed export part, and the MPT part dont need to worry about complex torch.compile constraints since they are treated as a black box for Torch compile. Developers should only make sure the fake kernels of attention custom op, and routed expert are aligned with the actual implementation.
<div align="center">
@ -158,21 +164,21 @@ For the op outside of attention and MLP, the developer should obey the torch.com
</div>
<p align="center"><sub><em>Figure 2. TensorRT LLM Custom torch.compile Backend Overview</em></sub></p>
Above is the overview of the TensorRT LLM custom backend for `torch.compile`.
Above is the overview of the TensorRT LLM custom backend for `torch.compile`.
#### Torch IR Optimization
Torch IR is the Fx graph that is directly traced by Torch Dynamo. It has several important features for us to do some graph rewriting and get information:
1. Preserve the operations as is: We can easily find a specific operation and then transform it to arbitrary operations. No need to deal with `auto_functionalize`, etc.
2. Preserve original variable tensor name in the Fx graph: For Piecewise CUDA Graph, it needs to find the correct `SymInt` which represents the token number. Hence, we rely on the `input_ids`'s shape to make it find the `SymInt` correctly.
2. Preserve original variable tensor name in the Fx graph: For Piecewise CUDA Graph, it needs to find the correct `SymInt` which represents the token number. Hence, we rely on the `input_ids`'s shape to make it find the `SymInt` correctly.
#### ATen IR Optimization
We get ATen IR after explicitly calling `aot_module_simplified` on the Fx graph. ATen IR is
1. In SSA format (no input mutations)
2. Strict subset of aten op (<250): In Torch IR, Python native add op, `torch.Tensor().add()`, `torch.aten.add.Tensor` could be three different ops. After the transform, they will be the same op.
2. Strict subset of aten op (<250): In Torch IR, Python native add op, `torch.Tensor().add()`, `torch.aten.add.Tensor` could be three different ops. After the transform, they will be the same op.
3. Guaranteed metadata information, e.g., dtype and shape propagation
On this IR level, TensorRT LLM will do the following optimization
@ -183,16 +189,16 @@ All fusions are located in `tensorrt_llm/_torch/compilation/patterns` and implem
1. Inadequate handling of scalars and lists:
- Scalars get specialized into the traced pattern, forcing one pattern per value—impractical and non-general.
- Lists are flattened, turning elements into separate input arguments, making it impossible to match the original operation.
- Lists are flattened, turning elements into separate input arguments, making it impossible to match the original operation.
2. Trace-driven pitfalls: Because its trace-based, the generated source patterns may not meet our needs and can introduce additional issues as we expand pattern coverage.
We mainly do the operation fusion for AllReduce & RMSNorm.
1. AllReduce related fusion: Fuse the following operations into one AllReduce op.
+ AllReduce + Residual + RMSNorm
+ AllReduce + Residual + RMSNorm + FP8 Quantization
+ AllReduce + Residual + RMSNorm + FP8 Quantization
+ AllReduce + Residual + RMSNorm + FP4 Quantization
2. AllReduce with User Buffer: Converts AllReduce operations to use userbuffers to avoid extra copy overhead.
2. AllReduce with User Buffer: Converts AllReduce operations to use userbuffers to avoid extra copy overhead.
We enable these fusions in torch.compile because theyre difficult to express in eager mode. For the AllReduce + RMSNorm fusion, which is cross-module, implementing it in eager mode would require moving code between modules, leading to redundant, complex, and hard-to-maintain logic.
@ -204,7 +210,7 @@ Because ATen IR is SSA, in-place operations are rewritten as out-of-place via a
##### Auto Multi-stream
Currently torch.compile won't create a subgraph for user user-defined CUDA stream. Instead, it will convert it to `set_stream`. The set_stream op doesn't have any consumers, so it will be removed in the Torch IR to ATen IR transformation, thus losing all the multi-stream scheduling.
Currently torch.compile won't create a subgraph for user user-defined CUDA stream. Instead, it will convert it to `set_stream`. The set_stream op doesn't have any consumers, so it will be removed in the Torch IR to ATen IR transformation, thus losing all the multi-stream scheduling.
To address this, we implemented an auto multi-stream scheduler:
@ -214,7 +220,7 @@ To address this, we implemented an auto multi-stream scheduler:
3. Schedules nodes onto up to `max_num_streams` specified by user config
4. Insert multi-stream related custom op: since the Fx graph executes operators in list order, so we insert streaming-control operators directly into the graph. Moreover, as these operators have no users, we cannot perform dead-code elimination after multi-stream scheduling. Below is an example of multi-stream, which `trtllm.dsv3_router_gemm_op.default` and `trtllm.silu_and_mul.default` + `trtllm.fp4_quantize.default` execute in parallel.
4. Insert multi-stream related custom op: since the Fx graph executes operators in list order, so we insert streaming-control operators directly into the graph. Moreover, as these operators have no users, we cannot perform dead-code elimination after multi-stream scheduling. Below is an example of multi-stream, which `trtllm.dsv3_router_gemm_op.default` and `trtllm.silu_and_mul.default` + `trtllm.fp4_quantize.default` execute in parallel.
```
call_function record_event trtllm.record_event (1,) {}
@ -238,7 +244,7 @@ To address this, we implemented an auto multi-stream scheduler:
call_function record_stream_1 trtllm.record_stream (mm_1, 1) {}
call_function record_event_4 trtllm.record_event (2,) {}
call_function set_stream_1 trtllm.set_stream (0,) {}
call_function wait_event_2 trtllm.wait_event (2,)
call_function wait_event_2 trtllm.wait_event (2,)
```
#### Piecewise CUDA Graph
@ -254,14 +260,14 @@ In the current design, we assume the attention block is the only non-capturable
Notes:
1. Attention **MUST NOT** have any output. The output tensor should be allocated by CUDA Graph.
2. Each sub-cudagraph **MUST** have at least one input tensor that contains the number of tokens in the shape.
3. Only allow dynamic shape for `num_of_tokens` dim.
1. Attention **MUST NOT** have any output. The output tensor should be allocated by CUDA Graph.
2. Each sub-cudagraph **MUST** have at least one input tensor that contains the number of tokens in the shape.
3. Only allow dynamic shape for `num_of_tokens` dim.
### Common Trace Failure
1. Custom op fake kernel: For every custom op, developers must implement a correct fake kernel. **Make sure to update the corresponding fake kernel when the custom op is changed**
2. Dynamic Iteration Number Loop: This is technically not a trace failure, but it will introduce long-time tracing that is generally not acceptable. When torch.compile tries to convert PyTorch modeling code to Fx graph, it will try to unroll the loop. For a loop that has a large and dynamic loop number with a large loop body, the tracing process will take a long time to do the unrolling.
2. Dynamic Iteration Number Loop: This is technically not a trace failure, but it will introduce long-time tracing that is generally not acceptable. When torch.compile tries to convert PyTorch modeling code to Fx graph, it will try to unroll the loop. For a loop that has a large and dynamic loop number with a large loop body, the tracing process will take a long time to do the unrolling.
1. If the IO of the loop can be easily written into a custom op format, try to replace it with a custom op
2. If the loop num is unchanged during the whole inference service lifetime, then it is ok to leave the loop as is. (e.g., Model decoder layer loop)
@ -276,30 +282,30 @@ Notes:
+ `torch.nonzeros()`: Produce data-dependent dynamic shape tensor
+ `torch.sym_min`: `SymInt` aware min
+ `torch.Tensor.tolist()`, `torch.Tensor.item()`
+ **Solution:** Use them inside a custom op if these operators don't get involved in producing the custom op's output tensor.
+ **Solution:** Use them inside a custom op if these operators don't get involved in producing the custom op's output tensor.
2. Use a custom objects method: For a class like mapping config, we cannot directly use its method like has_pp() in the model forward.
2. Use a custom objects method: For a class like mapping config, we cannot directly use its method like has_pp() in the model forward.
+ **Solution**: We should convert it to a bool in the model init and use the bool.
+ **Solution**: We should convert it to a bool in the model init and use the bool.
```python
class Mapping(object):
def __init__(self, ...):
...
def has_pp(self): # Cannot use this method in torch.compile
return self.pp_size > 1
```
3. Data Dependent Control(DDC) flow involved in code
+ **Solution**: Try to avoid DDC in the code. Try to pre-compute the result outside of torch.compile's scope. For the following example, try to pre-compute the `torch.sum(data)` at the data preparation stage, and pass the result to the `forward`.
+ **Solution**: Try to avoid DDC in the code. Try to pre-compute the result outside of torch.compile's scope. For the following example, try to pre-compute the `torch.sum(data)` at the data preparation stage, and pass the result to the `forward`.
```python
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, data):
y = x ** 2
if torch.sum(data) >= 4: # Data Dependent Control Here!
@ -308,7 +314,7 @@ Notes:
t = y / 2
t = t + 10
return t
test_case = TestCase()
test_case = torch.compile(test_case, backend=Backend())
x = torch.randn(5).cuda()
@ -320,15 +326,15 @@ Notes:
### Recompilation
1. Try not to use data-dependent dynamic shapes in the model forward. (e.g., slice the tensor based on input value). This will introduce 0/1 specialization to the model and will possibly introduce recompile.
1. Try not to use data-dependent dynamic shapes in the model forward. (e.g., slice the tensor based on input value). This will introduce 0/1 specialization to the model and will possibly introduce recompile.
1. **0/1 specialization**: torch.compile will recompile the model if a dynamic tensors dim equals 0 or 1. In the worst case, it will recompile 3 times for 1 dimension: 0,1, >2
2. For an int argument that would change during runtime, use `SymInt` rather than int in the C++ custom op definition. Otherwise, it will trigger a recompile when the value changes.
2. For an int argument that would change during runtime, use `SymInt` rather than int in the C++ custom op definition. Otherwise, it will trigger a recompile when the value changes.
```c++
TORCH_LIBRARY_FRAGMENT(trtllm, m)
{
{
m.def("allgather(Tensor input, SymInt[]? sizes, int[] group) -> Tensor");
m.def("allgather_list(Tensor[] input_list, SymInt[]? sizes, int[] group) -> Tensor[]");
}
@ -340,13 +346,13 @@ Notes:
2. Control Flow based on dynamic shape
3. Next power of two: Previously, we used `bit_length()` to implement the next power of 2 function. However, it will cause a recompile for every int value. Now rewrite the code to be torch.compile-friendly.
3. Next power of two: Previously, we used `bit_length()` to implement the next power of 2 function. However, it will cause a recompile for every int value. Now rewrite the code to be torch.compile-friendly.
```python
def next_positive_power_of_2(x: int) -> int:
if x < 1:
return 1
# Following code is equivalent to 1 << (x - 1).bit_length()
# But this impl does not contain bit_length(), so it can be used by torch compile.
# It can correctly handle 64-bit numbers, which should be enough for now.
@ -359,5 +365,3 @@ Notes:
n |= n >> 32
return n + 1
```

View File

@ -358,15 +358,20 @@ def update_version():
docs_source_dir = Path(__file__).parent.resolve()
md_files = list(docs_source_dir.rglob("*.md"))
# Default is to replace `release:x.y.z` placeholders; set to 0 to disable.
if os.environ.get("TRTLLM_DOCS_REPLACE_CONTAINER_TAG", "1") != "1":
return
for file_path in md_files:
with open(file_path, "r") as f:
content = f.read()
content = content.replace(
updated = content.replace(
"nvcr.io/nvidia/tensorrt-llm/release:x.y.z",
f"nvcr.io/nvidia/tensorrt-llm/release:{version}",
)
with open(file_path, "w") as f:
f.write(content)
if updated != content:
with open(file_path, "w") as f:
f.write(updated)
if __name__ == "__main__":

View File

@ -415,11 +415,17 @@ Total Latency (ms): 13525.6862
### Running with the PyTorch Workflow
```{eval-rst}
.. include:: ../../_includes/note_sections.rst
:start-after: .. start-note-config-flag-alias
:end-before: .. end-note-config-flag-alias
```
To benchmark the PyTorch backend (`tensorrt_llm._torch`), use the following command with [dataset](#preparing-a-dataset) generated from previous steps. With the PyTorch flow, you will not need to
run `trtllm-bench build`; the `throughput` benchmark initializes the backend by tuning against the
dataset provided via `--dataset` (or the other build mode settings described [above](#other-build-modes)).
Note that CUDA graph is enabled by default. You can add additional pytorch config with
`--extra_llm_api_options` followed by the path to a YAML file. For more details, please refer to the
`--config` followed by the path to a YAML file. For more details, please refer to the
help text by running the command with `--help`.
```{tip}
@ -511,7 +517,7 @@ The generated dataset will include LoRA request metadata. Below is an example of
**LoRA Configuration**
Create an `extra-llm-api-options.yaml` file with LoRA configuration:
Create a `config.yaml` file with LoRA configuration:
```yaml
lora_config:
@ -535,7 +541,7 @@ lora_config:
trtllm-bench --model /path/to/base/model \
throughput \
--dataset synthetic_lora_data.json \
--extra_llm_api_options extra-llm-api-options.yaml
--config config.yaml
```
```{note}

View File

@ -24,7 +24,7 @@ As in the PyTorch workflow, AutoDeploy does not require a separate `trtllm-bench
## Advanced Configuration
For more granular control over AutoDeploy's behavior during benchmarking, use the `--extra_llm_api_options` flag with a YAML configuration file:
For more granular control over AutoDeploy's behavior during benchmarking, use the `--config` flag with a YAML configuration file:
```bash
trtllm-bench \
@ -32,7 +32,7 @@ trtllm-bench \
throughput \
--dataset /tmp/synthetic_128_128.txt \
--backend _autodeploy \
--extra_llm_api_options autodeploy_config.yaml
--config autodeploy_config.yaml
```
### Configuration Examples

View File

@ -30,13 +30,13 @@ curl -s http://localhost:8000/v1/chat/completions \
## Configuration via YAML
Use `--extra_llm_api_options` to supply a YAML file that augments or overrides server/runtime settings.
Use `--config` to supply a YAML file that augments or overrides server/runtime settings.
```bash
trtllm-serve \
meta-llama/Llama-3.1-8B \
--backend _autodeploy \
--extra_llm_api_options autodeploy_config.yaml
--config autodeploy_config.yaml
```
Example `autodeploy_config.yaml`:

View File

@ -157,7 +157,7 @@ llm = LLM(
### YAML Configuration
Create an `extra_llm_api_options.yaml` file:
Create a `config.yaml` file:
```yaml
lora_config:
@ -170,7 +170,7 @@ lora_config:
```bash
python -m tensorrt_llm.commands.serve
/path/to/model \
--extra_llm_api_options extra_llm_api_options.yaml
--config config.yaml
```
### Client Usage
@ -198,7 +198,7 @@ response = client.completions.create(
### YAML Configuration
Create an `extra_llm_api_options.yaml` file:
Create a `config.yaml` file:
```yaml
lora_config:
@ -220,5 +220,5 @@ lora_config:
### Run trtllm-bench
```bash
trtllm-bench --model $model_path throughput --dataset $dataset_path --extra_llm_api_options extra-llm-api-options.yaml --num_requests 64 --concurrency 16
trtllm-bench --model $model_path throughput --dataset $dataset_path --config config.yaml --num_requests 64 --concurrency 16
```

14
examples/__init__.py Normal file
View File

@ -0,0 +1,14 @@
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -6,3 +6,4 @@ benchmark_results.json
*.yaml
!nano_v3.yaml
!nemotron_flash.yaml
!model_registry/configs/*.yaml

View File

@ -0,0 +1,160 @@
# AutoDeploy Model Registry
The AutoDeploy model registry provides a comprehensive, maintainable list of supported models for testing and coverage tracking.
## Format
**Version: 2.0** (Flat format with composable configurations)
### Structure
```yaml
version: '2.0'
description: AutoDeploy Model Registry - Flat format with composable configs
models:
- name: meta-llama/Llama-3.1-8B-Instruct
yaml_extra: [dashboard_default.yaml, world_size_2.yaml]
- name: meta-llama/Llama-3.3-70B-Instruct
yaml_extra: [dashboard_default.yaml, world_size_4.yaml, llama-3.3-70b.yaml]
- name: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
yaml_extra: [dashboard_default.yaml, world_size_2.yaml, demollm_triton.yaml]
```
### Key Concepts
- **Flat list**: Models are in a single flat list (not grouped)
- **Composable configs**: Each model references YAML config files via `yaml_extra`
- **Deep merging**: Config files are merged in order (later files override earlier ones)
- **No inline args**: All configuration is in YAML files for reusability
## Configuration Files
Config files are stored in `configs/` subdirectory and define runtime parameters:
### Core Configs
| File | Purpose | Example Use |
|------|---------|-------------|
| `dashboard_default.yaml` | Baseline settings for all models | Always first in yaml_extra |
| `world_size_N.yaml` | GPU count (1, 2, 4, 8) | Defines tensor_parallel_size |
### Runtime Configs
| File | Purpose |
|------|---------|
| `multimodal.yaml` | Vision + text models |
| `demollm_triton.yaml` | DemoLLM runtime with Triton backend |
| `simple_shard_only.yaml` | Large models requiring simple sharding
### Model-Specific Configs
| File | Purpose |
|------|---------|
| `llama-3.3-70b.yaml` | Optimized settings for Llama 3.3 70B |
| `nano_v3.yaml` | Settings for Nemotron Nano V3 |
| `llama-4-scout.yaml` | Settings for Llama 4 Scout |
| `openelm.yaml` | Apple OpenELM (custom tokenizer) |
| `gemma3_1b.yaml` | Gemma 3 1B (sequence length) |
| `deepseek_v3_lite.yaml` | DeepSeek V3/R1 (reduced layers) |
| `llama4_maverick_lite.yaml` | Llama 4 Maverick (reduced layers) |
## Adding a New Model
### Simple Model (Standard Config)
```yaml
- name: organization/my-new-model-7b
yaml_extra: [dashboard_default.yaml, world_size_2.yaml]
```
### Model with Special Requirements
```yaml
- name: organization/my-multimodal-model
yaml_extra: [dashboard_default.yaml, world_size_4.yaml, multimodal.yaml]
```
### Model with Custom Config
1. Create `configs/my_model.yaml`:
```yaml
# Custom settings for my model
max_batch_size: 2048
kv_cache_free_gpu_memory_fraction: 0.95
cuda_graph_config:
enable_padding: true
```
2. Reference it in `models.yaml`:
```yaml
- name: organization/my-custom-model
yaml_extra: [dashboard_default.yaml, world_size_8.yaml, my_model.yaml]
```
## Config Merging
Configs are merged in order. Example:
```yaml
yaml_extra:
- dashboard_default.yaml # baseline: runtime=trtllm, benchmark_enabled=true
- world_size_2.yaml # adds: tensor_parallel_size=2
- openelm.yaml # overrides: tokenizer=llama-2, benchmark_enabled=false
```
**Result**: `runtime=trtllm, tensor_parallel_size=2, tokenizer=llama-2, benchmark_enabled=false`
## World Size Guidelines
| World Size | Model Size Range | Example Models |
|------------|------------------|----------------|
| 1 | \< 2B params | TinyLlama, Qwen 0.5B, Phi-4-mini |
| 2 | 2-15B params | Llama 3.1 8B, Qwen 7B, Mistral 7B |
| 4 | 20-80B params | Llama 3.3 70B, QwQ 32B, Gemma 27B |
| 8 | 80B+ params | DeepSeek V3, Llama 405B, Nemotron Ultra |
## Model Coverage
The registry contains models distributed across different GPU configurations (world sizes 1, 2, 4, and 8), including both text-only and multimodal models.
**To verify current model counts and coverage:**
```bash
cd /path/to/autodeploy-dashboard
python3 scripts/prepare_model_coverage_v2.py \
--source local \
--local-path /path/to/TensorRT-LLM \
--output /tmp/model_coverage.yaml
# View summary
grep -E "^- name:|yaml_extra:" /path/to/TensorRT-LLM/examples/auto_deploy/model_registry/models.yaml | wc -l
```
When adding or removing models, use `prepare_model_coverage_v2.py` to validate the registry structure and coverage.
## Best Practices
1. **Always include `dashboard_default.yaml` first** - it provides baseline settings
1. **Always include a `world_size_N.yaml`** - defines GPU count
1. **Add special configs after world_size** - they override defaults
1. **Create reusable configs** - if 3+ models need same settings, make a config file
1. **Use model-specific configs sparingly** - only for unique requirements
1. **Test before committing** - verify with `prepare_model_coverage_v2.py`
## Testing Changes
```bash
# Generate workload from local changes
cd /path/to/autodeploy-dashboard
python3 scripts/prepare_model_coverage_v2.py \
--source local \
--local-path /path/to/TensorRT-LLM \
--output /tmp/test_workload.yaml
# Verify output
cat /tmp/test_workload.yaml
```

View File

@ -0,0 +1,9 @@
# Default configuration for all AutoDeploy dashboard tests
# These are baseline settings that apply to all models unless overridden
runtime: trtllm
attn_backend: flashinfer
compile_backend: torch-compile
model_factory: AutoModelForCausalLM
skip_loading_weights: false
max_seq_len: 512

View File

@ -0,0 +1,4 @@
# Configuration for DeepSeek V3 and R1 with reduced layers
# Full models are too large, so we test with limited layers
model_kwargs:
num_hidden_layers: 10

View File

@ -0,0 +1,4 @@
# Configuration for DemoLLM runtime with Triton backend
# Used for experimental or specific model requirements
runtime: demollm
attn_backend: triton

View File

@ -0,0 +1,3 @@
# Configuration for Gemma 3 1B model
# Specific sequence length requirement due to small attention window
max_seq_len: 511

View File

@ -0,0 +1,10 @@
# Configuration for Llama 3.3 70B
# AutoDeploy-specific settings for large Llama models
max_batch_size: 1024
max_num_tokens: 2048
free_mem_ratio: 0.9
trust_remote_code: true
cuda_graph_batch_sizes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 768, 1024]
kv_cache_config:
dtype: fp8

View File

@ -0,0 +1,5 @@
# Configuration for Llama 4 Maverick with reduced layers
# Full model is too large for testing
model_kwargs:
text_config:
num_hidden_layers: 5

View File

@ -0,0 +1,10 @@
# Configuration for Llama 4 Scout (VLM)
# AutoDeploy-specific settings for Llama 4 Scout MoE vision model
max_batch_size: 1024
max_num_tokens: 2048
free_mem_ratio: 0.9
trust_remote_code: true
cuda_graph_batch_sizes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 768, 1024]
kv_cache_config:
dtype: fp8

View File

@ -0,0 +1,2 @@
# Configuration for multimodal (vision + text) models
model_factory: AutoModelForImageTextToText

View File

@ -0,0 +1,3 @@
# Configuration for Apple OpenELM models
# These models require Llama-2 tokenizer
tokenizer: meta-llama/Llama-2-7b-hf

View File

@ -0,0 +1,5 @@
# Configuration for models that require simple sharding only
# Used for very large models with specific sharding requirements
transforms:
detect_sharding:
simple_shard_only: true

View File

@ -0,0 +1,2 @@
# Configuration for single GPU models
world_size: 1

View File

@ -0,0 +1,2 @@
# Configuration for 2 GPU models
world_size: 2

View File

@ -0,0 +1,2 @@
# Configuration for 4 GPU models
world_size: 4

View File

@ -0,0 +1,2 @@
# Configuration for 8 GPU models
world_size: 8

View File

@ -0,0 +1,248 @@
version: '2.0'
description: AutoDeploy Model Registry - Flat format with composable configs
models:
- name: TinyLlama/TinyLlama-1.1B-Chat-v1.0
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml']
- name: Qwen/Qwen2.5-0.5B-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml']
- name: Qwen/Qwen3-0.6B
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml']
# DISABLED: TorchDynamo compilation error - fake tensor dispatch failure
# - name: apple/OpenELM-270M-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml', 'openelm.yaml']
# DISABLED: TorchDynamo compilation error - fake tensor dispatch failure
# - name: apple/OpenELM-1_1B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml', 'openelm.yaml']
# DISABLED: TorchDynamo compilation error - fake tensor dispatch failure
# - name: apple/OpenELM-3B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml', 'openelm.yaml']
- name: microsoft/Phi-4-mini-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml']
- name: microsoft/Phi-4-mini-reasoning
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml']
- name: google/gemma-3-1b-it
yaml_extra: ['dashboard_default.yaml', 'world_size_1.yaml', 'gemma3_1b.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: meta-llama/Llama-3.1-8B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: casperhansen/llama-3-8b-instruct-awq
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: meta-llama/Llama-3.2-1B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: meta-llama/Llama-3.2-3B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: Qwen/Qwen2.5-1.5B-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: Qwen/Qwen2.5-3B-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: Qwen/Qwen2.5-7B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: Qwen/Qwen2.5-7B-Instruct-AWQ
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: Qwen/Qwen3-4B
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: Qwen/Qwen3-8B
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: microsoft/phi-4
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: microsoft/Phi-4-reasoning
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: microsoft/Phi-4-reasoning-plus
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: google/gemma-1.1-7b-it
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: google/gemma-2-2b-it
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: google/gemma-2-9b-it
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: google/codegemma-7b-it
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: mistralai/Mistral-7B-Instruct-v0.2
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: mistralai/Mistral-7B-Instruct-v0.3
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: TheBloke/Mistral-7B-Instruct-v0.2-GPTQ
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: bigcode/starcoder2-7b
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: bigcode/starcoder2-15b-instruct-v0.1
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: deepseek-ai/DeepSeek-Prover-V1.5-SFT
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: deepseek-ai/DeepSeek-Prover-V2-7B
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: ibm-granite/granite-3.1-2b-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: ibm-granite/granite-3.1-8b-instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: ibm-granite/granite-3.3-2b-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: ibm-granite/granite-3.3-8b-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: ibm-granite/granite-guardian-3.1-2b
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: ibm-granite/granite-guardian-3.2-5b
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: meta-llama/CodeLlama-7b-Instruct-hf
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: meta-llama/CodeLlama-7b-Python-hf
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: meta-llama/Llama-2-7b-chat-hf
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: FakeTensorMode error in unified_attn export
# - name: nvidia/Llama-3.1-8B-Instruct-FP8
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: nvidia/Llama-3.1-Minitron-4B-Depth-Base
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: nvidia/Llama-3.1-Minitron-4B-Width-Base
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: nvidia/Llama-3.1-Nemotron-Nano-8B-v1
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: nvidia/Mistral-NeMo-Minitron-8B-Base
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: openai/gpt-oss-20b
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: Custom op error - append_paged_kv_cache missing Float kernel
# - name: bigcode/starcoder2-15b
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: ibm-granite/granite-3.0-8b-instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: mistralai/Ministral-8B-Instruct-2410
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: nvidia/NVIDIA-Nemotron-Nano-9B-v2-FP8
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: nvidia/NVIDIA-Nemotron-Nano-9B-v2-NVFP4
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-FP8
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml', 'multimodal.yaml']
- name: google/gemma-3-27b-it
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml', 'multimodal.yaml']
- name: google/gemma-3-2b-it
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
- name: deepseek-ai/DeepSeek-V2.5
yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: Network timeout downloading from Hugging Face
# - name: ai21labs/AI21-Jamba-1.5-Mini
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: THUDM/glm-4v-9b
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml', 'multimodal.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: meta-llama/Llama-3.2-11B-Vision-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_2.yaml', 'multimodal.yaml']
# DISABLED: Auto-deploy compilation error
# - name: meta-llama/Llama-3.3-70B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml', 'llama3_3_70b.yaml']
- name: meta-llama/CodeLlama-34b-Instruct-hf
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: meta-llama/Llama-2-13b-chat-hf
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: microsoft/Phi-3-medium-128k-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: microsoft/Phi-3-medium-4k-instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: mistralai/Codestral-22B-v0.1
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: Graph transformation error in auto-deploy
# - name: neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: TheBloke/falcon-40b-instruct-GPTQ
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: Qwen/QwQ-32B
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: google/gemma-2-27b-it
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: perplexity-ai/r1-1776-distill-llama-70b
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: nvidia/NVIDIA-Nemotron-Nano-31B-A3-v3
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml', 'nano_v3.yaml']
- name: nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: Qwen/QwQ-32B-Preview
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: Qwen/Qwen3-Coder-32B-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: Qwen/Qwen3-235B-A22B-Instruct-2507
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: Network timeout downloading from Hugging Face
# - name: ai21labs/AI21-Jamba-1.5-Large
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
- name: nvidia/OpenReasoning-Nemotron-32B
yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: Auto-deploy compilation error
# - name: mistralai/Mistral-Large-Instruct-v2.1
# yaml_extra: ['dashboard_default.yaml', 'world_size_4.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: deepseek-ai/DeepSeek-R1-Distill-Llama-70B
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
# DISABLED: Auto-deploy compilation error
# - name: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
# DISABLED: Graph transformation error in auto-deploy
# - name: mistralai/Mixtral-8x22B-Instruct-v0.1
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
# DISABLED: FakeTensorMode error in unified_attn export
# - name: nvidia/Llama-3.1-70B-Instruct-FP8
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
# DISABLED: FakeTensorMode error in unified_attn export
# - name: nvidia/Llama-3.1-405B-Instruct-FP8
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
- name: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
# DISABLED: Model loading failure - dynamic module registry issue
# - name: nvidia/Llama-3_1-Nemotron-51B-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: nvidia/Llama-3_1-Nemotron-Ultra-253B-v1
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: nvidia/Llama-3_1-Nemotron-Ultra-253B-v1-FP8
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: nvidia/Llama-3_3-Nemotron-Super-49B-v1
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: Qwen/Qwen3-30B-A3B
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: Qwen/Qwen3-235B-A22B
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'simple_shard_only.yaml']
- name: deepseek-ai/DeepSeek-R1
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'deepseek_v3_lite.yaml']
# DISABLED: Auto-deploy compilation error
# - name: deepseek-ai/DeepSeek-V3
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'deepseek_v3_lite.yaml']
# DISABLED: Assertion failure in auto-deploy transform pipeline
# - name: deepseek-ai/DeepSeek-Coder-V2-Instruct
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
- name: Qwen/Qwen3-VL-8B-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
- name: Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'multimodal.yaml']
# DISABLED: SLURM cluster cancellation - infrastructure issue
# - name: codellama/CodeLlama-70b-Instruct-hf
# yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
- name: meta-llama/Llama-3.2-90B-Vision-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'multimodal.yaml']
- name: openai/gpt-oss-120b
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml']
- name: meta-llama/Llama-4-Scout-17B-16E-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'multimodal.yaml', 'llama4_scout.yaml']
- name: meta-llama/Llama-4-Maverick-17B-128E-Instruct
yaml_extra: ['dashboard_default.yaml', 'world_size_8.yaml', 'multimodal.yaml', 'llama4_maverick_lite.yaml']

View File

@ -1,5 +1,5 @@
# Recommended LLM API Configuration Settings
This directory contains recommended [LLM API](https://nvidia.github.io/TensorRT-LLM/llm-api/) performance settings for popular models. They can be used out-of-the-box with `trtllm-serve` via the `--extra_llm_api_options` CLI flag, or you can adjust them to your specific use case.
This directory contains recommended [LLM API](https://nvidia.github.io/TensorRT-LLM/llm-api/) performance settings for popular models. They can be used out-of-the-box with `trtllm-serve` via the `--config` CLI flag, or you can adjust them to your specific use case.
For model-specific deployment guides, please refer to the [official documentation](https://nvidia.github.io/TensorRT-LLM/deployment-guide/index.html).

View File

@ -0,0 +1,14 @@
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -13,4 +13,4 @@ stream_interval: 20
num_postprocess_workers: 4
kv_cache_config:
enable_block_reuse: false
free_gpu_memory_fraction: 0.6
free_gpu_memory_fraction: 0.9

View File

@ -0,0 +1,14 @@
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: DEEPGEMM

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.75
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: CUTLASS

View File

@ -6,7 +6,6 @@ print_iter_log: true
kv_cache_config:
dtype: fp8
free_gpu_memory_fraction: 0.8
enable_block_reuse: false
stream_interval: 10
moe_config:
backend: TRTLLM

Some files were not shown because too many files have changed in this diff Show More