TensorRT-LLMs/cpp/tensorrt_llm/kernels/communicationKernels/allReduceWorkspace.h
Void 7d16f3a28b
[https://nvbugs/5788127][fix] Use uint64_t as the dtype of lamport_buffer_size to avoid overflow (#10499)
Signed-off-by: Yilin Zhang <18275976+yilin-void@users.noreply.github.com>
2026-01-13 17:16:22 +08:00

51 lines
1.6 KiB
C++

/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "tensorrt_llm/common/assert.h"
#include "tensorrt_llm/common/config.h"
#include "tensorrt_llm/common/cudaUtils.h"
#include "tensorrt_llm/kernels/communicationKernels/allReduceFusionKernels.h"
#include "tensorrt_llm/runtime/ipcUtils.h"
TRTLLM_NAMESPACE_BEGIN
namespace kernels::ar_fusion
{
class Workspace
{
public:
Workspace(int rank, int tp_size, int max_token_num, int hidden_dim,
std::shared_ptr<tensorrt_llm::runtime::CudaStream> stream_ptr);
~Workspace();
void** get_workspace();
private:
tensorrt_llm::runtime::WorldConfig m_world_config;
std::shared_ptr<tensorrt_llm::runtime::BufferManager> m_buffer_mgr;
std::vector<tensorrt_llm::runtime::IpcMemory> m_ipc_mem_handles;
void* m_workspace;
std::shared_ptr<tensorrt_llm::runtime::CudaStream> m_cuda_stream;
void* m_flag_d_ptr;
void* m_layout_d_ptr;
};
void lamport_initialize(void* ptr, size_t bytes, cudaStream_t stream);
} // namespace kernels::ar_fusion
TRTLLM_NAMESPACE_END