/* * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "tensorrt_llm/batch_manager/kvCacheManager.h" #include "tensorrt_llm/executor/executor.h" #include "tensorrt_llm/runtime/bufferManager.h" #include "tensorrt_llm/runtime/iTensor.h" #include #include #include #include #include #include namespace tensorrt_llm::batch_manager::kv_cache_manager { class FabricMemory { public: explicit FabricMemory(size_t size); ~FabricMemory(); FabricMemory(FabricMemory const&) = delete; FabricMemory& operator=(FabricMemory const&) = delete; FabricMemory(FabricMemory&&) noexcept; FabricMemory& operator=(FabricMemory&&) noexcept; void* getPtr() const; size_t getSize() const; static size_t getAlignedSize(size_t size); static bool supportFbaricMemory(); private: class Impl; std::unique_ptr pImpl; }; class CacheTransBufferManager { public: CacheTransBufferManager(KVCacheManager::BaseKVCacheManager* cacheManager, std::optional maxNumTokens = std::nullopt, bool transferIndexerKCache = false); static size_t preAllocBufferSize(std::map const& cacheSizeBytesPerTokenPerWindow, SizeType32 tokensPerBlock, std::optional const& cacheTransceiverConfig = std::nullopt); std::optional assignBufferIndexForSend(); void freeBufferIndexForSend(std::optional bufferId); std::optional assignBufferIndexForRecv(); void freeBufferIndexForRecv(std::optional bufferId); std::tuple, size_t, bool> getOrAllocateSendBuffers( std::optional bufferId, int targetNum, std::vector const& requestedNumberOfElements, runtime::BufferManager const& bufferManagerToUse); std::tuple, size_t, bool> getOrAllocateRecvBuffers( std::optional bufferId, int targetNum, std::vector const& requestedNumberOfElements, runtime::BufferManager const& bufferManagerToUse); runtime::ITensor::SharedPtr getSendBuffer(std::optional bufferId); runtime::ITensor::SharedPtr getRecvBuffer(std::optional bufferId); size_t getRecvBufferCount(); size_t getSendBufferCount(); std::optional getMaxNumTokens() { return mMaxNumTokens; } private: struct ConcurrenceResource { std::unordered_map mBuffers; std::vector mBufferIndexFlag; std::mutex mBuffersMutex; std::condition_variable mBuffersCV; std::atomic mConcurrence = 0; }; std::tuple, size_t, bool> getOrAllocateBuffers(std::optional bufferId, int targetNum, std::vector const& requestedNumberOfElements, runtime::BufferManager const& bufferManagerToUse, ConcurrenceResource& concurrenceResource); void allocateBuffer(); std::optional assignBufferIndex(ConcurrenceResource& resource, size_t bufferCount, bool onlyUseDynamicBuffer); void freeBufferIndex( ConcurrenceResource& resource, std::optional bufferId, size_t bufferCount, bool onlyUseDynamicBuffer); size_t mPreAllocBufferSize; size_t mRecvBufferCount; size_t mSendBufferCount; size_t mTransferBufferSize; bool mOnlyUseDynamicBuffer; bool mUseFabricMemory; size_t mNumberOfElements; nvinfer1::DataType mDataType; ConcurrenceResource mConcurrenceSendResource; ConcurrenceResource mConcurrenceRecvResource; KVCacheManager::BaseKVCacheManager* mCacheManager; runtime::BufferManager mBufferManager; std::vector> mFabricMemory; std::optional mMaxNumTokens; }; } // namespace tensorrt_llm::batch_manager::kv_cache_manager