TensorRT-LLMs/cpp/tensorrt_llm/batch_manager/utils/logitsThread.h
Robin Kobus 79a94a28f9
refactor: unique_ptr instead of shared_ptr (#4697)
Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com>
2025-05-29 22:49:35 +02:00

59 lines
2.1 KiB
C++

/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "tensorrt_llm/batch_manager/common.h"
#include "tensorrt_llm/executor/executor.h"
#include "tensorrt_llm/runtime/common.h"
#include "tensorrt_llm/runtime/iTensor.h"
#include "tensorrt_llm/runtime/modelConfig.h"
#include <memory>
#include <optional>
namespace tensorrt_llm::batch_manager
{
namespace kv_cache_manager
{
class BaseKVCacheManager;
} // namespace kv_cache_manager
class SequenceSlotManager;
class BasePeftCacheManager;
class GenerateRequestOptions;
} // namespace tensorrt_llm::batch_manager
namespace tensorrt_llm::executor
{
struct SpeculativeDecodingFastLogitsInfo;
} // namespace tensorrt_llm::executor
namespace tensorrt_llm::batch_manager::utils
{
void draftModelSendLogitsThread(int device, std::atomic<bool>* draftModelThreadShouldExit,
RequestVector* draftRequestsWaitingToSendLogits, std::shared_ptr<SequenceSlotManager> const& seqSlotManager,
runtime::SizeType32 maxInputLen, std::shared_ptr<kv_cache_manager::BaseKVCacheManager> const& kvCacheManager,
std::shared_ptr<kv_cache_manager::BaseKVCacheManager> const& crossKvCacheManager,
std::shared_ptr<BasePeftCacheManager> const& peftCacheManager);
std::optional<runtime::ITensor::SharedPtr> targetModelReceiveLogits(
executor::SpeculativeDecodingFastLogitsInfo const& fastLogitsInfo, runtime::ModelConfig const& modelConfig);
} // namespace tensorrt_llm::batch_manager::utils