mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* refactor: CreateNewDecoderRequests Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Consolidate request generation in CreateNewDecoderRequests - Removed the GenerateRequestOptions class and integrated its functionality into CreateNewDecoderRequests. - Updated the constructor of CreateNewDecoderRequests to accept parameters for speculative decoding and normalization options. - Modified the operator() method to handle request generation directly, improving code organization and reducing redundancy. - Cleaned up associated includes and references throughout the codebase. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Simplify request handling in CreateNewDecoderRequests - Removed the generateRequestOptions method and integrated its logic directly into the operator() method. - Updated the request generation process to improve clarity and reduce redundancy. - Adjusted the return type to streamline the handling of batch slots, decoder requests, and sampling configurations. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Enhance createDecoderRequests method in CreateNewDecoderRequests - Updated the createDecoderRequests method to include additional parameters for decoder state and CUDA streams, improving flexibility in request handling. - Removed redundant request generation logic from the operator() method, streamlining the process. - Adjusted the newRequest method to utilize the updated decoder request structure, enhancing clarity and maintainability. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Use MedusaBuffers instead of RuntimeBuffers in CreateNewDecoderRequests - Updated references from RuntimeBuffers to MedusaBuffers across the CreateNewDecoderRequests class and its methods, enhancing clarity in buffer management. - Adjusted method signatures and internal logic to accommodate the new MedusaBuffers type, ensuring compatibility with existing functionality. - Cleaned up unnecessary includes and improved code organization for better maintainability. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Update CreateNewDecoderRequests to use DecoderState and CudaStream parameters - Modified method signatures in CreateNewDecoderRequests to replace GptDecoderBatched with runtime::decoder::DecoderState and added a separate CudaStream for the decoder. - Adjusted the implementation of the operator() method to accommodate the new parameters, enhancing flexibility in request handling. - Updated associated bindings in the pybind11 interface to reflect the changes in method signatures, ensuring consistency across the codebase. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Update TRTLLMSampler to use refactored create_new_decoder_requests - Updated the sampler.py to reflect changes in the request handling logic, replacing generate_request_options with create_new_decoder_requests for improved clarity and consistency. - Updated bindings and method signatures for decoder stream handling. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Update gptDecoderBatchedTest to use CreateNewDecoderRequests::newRequest Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> --------- Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com>
134 lines
6.6 KiB
C++
134 lines
6.6 KiB
C++
/*
|
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include "tensorrt_llm/batch_manager/common.h"
|
|
#include "tensorrt_llm/common/algorithm.h"
|
|
#include "tensorrt_llm/common/optionalRef.h"
|
|
#include "tensorrt_llm/runtime/bufferManager.h"
|
|
#include "tensorrt_llm/runtime/common.h"
|
|
#include "tensorrt_llm/runtime/iTensor.h"
|
|
#include "tensorrt_llm/runtime/modelConfig.h"
|
|
#include "tensorrt_llm/runtime/request.h"
|
|
#include "tensorrt_llm/runtime/worldConfig.h"
|
|
|
|
namespace tensorrt_llm::runtime
|
|
{
|
|
class DecodingInput;
|
|
class DecodingOutput;
|
|
class GptDecoderBatched;
|
|
class SamplingConfig;
|
|
class SpeculativeDecodingMode;
|
|
|
|
namespace decoder
|
|
{
|
|
class DecoderState;
|
|
} // namespace decoder
|
|
|
|
} // namespace tensorrt_llm::runtime
|
|
|
|
namespace tensorrt_llm::batch_manager
|
|
{
|
|
class MedusaBuffers;
|
|
class DecoderInputBuffers;
|
|
|
|
class CreateNewDecoderRequests : Algorithm
|
|
{
|
|
public:
|
|
constexpr static auto name{"CreateNewDecoderRequests"};
|
|
|
|
using SizeType32 = tensorrt_llm::runtime::SizeType32;
|
|
using SamplingConfig = tensorrt_llm::runtime::SamplingConfig;
|
|
using CudaStream = tensorrt_llm::runtime::CudaStream;
|
|
using TensorPtr = runtime::ITensor::SharedPtr;
|
|
using SharedConstPtr = runtime::ITensor::SharedConstPtr;
|
|
using DecodingInput = runtime::DecodingInput;
|
|
using DecodingOutput = runtime::DecodingOutput;
|
|
using SpeculativeDecodingMode = runtime::SpeculativeDecodingMode;
|
|
using GptDecoderBatched = runtime::GptDecoderBatched;
|
|
template <typename T>
|
|
using OptionalRef = tensorrt_llm::common::OptionalRef<T>;
|
|
|
|
CreateNewDecoderRequests(bool speculativeDecodingFastLogits, bool isLeaderInOrchMode, bool isNormalizeLogProbs)
|
|
: mSpeculativeDecodingFastLogits(speculativeDecodingFastLogits)
|
|
, mIsLeaderInOrchMode(isLeaderInOrchMode)
|
|
, mIsNormalizeLogProbs(isNormalizeLogProbs)
|
|
{
|
|
}
|
|
|
|
std::tuple<TensorPtr, std::vector<runtime::decoder_batch::Request>, std::vector<runtime::SamplingConfig>>
|
|
operator()(runtime::ModelConfig const& modelConfig, runtime::WorldConfig const& worldConfig,
|
|
executor::DecodingConfig const& decodingConfig, RequestVector const& contextRequests,
|
|
runtime::BufferManager const& bufferManager, nvinfer1::DataType logitsType, DecoderInputBuffers& inputBuffers,
|
|
runtime::decoder::DecoderState& decoderState, CudaStream const& runtimeStream, CudaStream const& decoderStream,
|
|
SizeType32 maxSequenceLength, SizeType32 beamWidth, OptionalRef<MedusaBuffers const> medusaBuffers) const;
|
|
|
|
//! @brief Initialize the decoder at `batchSlot` with a new `request`. Exposed only for static batching via
|
|
//! GptDecoderBatched::newBatch()
|
|
static void newRequest(SizeType32 batchSlot, runtime::decoder_batch::Request const& request,
|
|
SamplingConfig const& samplingConfig, runtime::ModelConfig const& modelConfig,
|
|
runtime::decoder::DecoderState& decoderState, CudaStream const& runtimeStream, CudaStream const& decoderStream,
|
|
SizeType32 maxSequenceLength);
|
|
|
|
private:
|
|
//! @brief Setups decoder internal tensors for new speculative decoding request
|
|
static void newRequestSpeculativeDecoding(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
SamplingConfig const& samplingConfig, runtime::ModelConfig const& modelConfig,
|
|
DecodingInput& jointDecodingInput, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream,
|
|
CudaStream const& decoderStream, SpeculativeDecodingMode const& speculativeDecodingMode,
|
|
SizeType32 maxDecodingEngineTokens);
|
|
|
|
//! @brief Setups decoder internal tensors for new request in Draft model Sps mode
|
|
static void newRequestDraftTokensExternal(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
SamplingConfig const& samplingConfig, DecodingInput& jointDecodingInput, CudaStream const& decoderStream);
|
|
|
|
//! @brief Setups decoder internal tensors for new Medusa request
|
|
static void newRequestMedusa(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
DecodingInput& jointDecodingInput, CudaStream const& decoderStream, SizeType32 maxDecodingEngineTokens);
|
|
|
|
//! @brief Setups decoder internal tensors for new Lookahead request
|
|
static void newRequestLookahead(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
DecodingInput& jointDecodingInput, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
|
|
|
|
//! @brief Setups decoder internal tensors for new Explicit draft tokens request
|
|
static void newRequestExplicitDraftTokens(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
|
|
|
|
//! @brief Setups decoder internal tensors for new Eagle request
|
|
static void newRequestEagle(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
|
|
runtime::ModelConfig const& modelConfig, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
|
|
|
|
[[nodiscard]] std::vector<runtime::decoder_batch::Request> createDecoderRequests(
|
|
RequestVector const& finishedContextRequests, TensorPtr const& inputIds,
|
|
executor::DecodingConfig const& decodingConfig, runtime::decoder::DecoderState& decoderState,
|
|
runtime::BufferManager const& bufferManager, nvinfer1::DataType logitsType,
|
|
runtime::ModelConfig const& modelConfig, runtime::WorldConfig const& worldConfig,
|
|
runtime::CudaStream const& runtimeStream, runtime::CudaStream const& decoderStream,
|
|
SizeType32 maxSequenceLength, OptionalRef<MedusaBuffers const> medusaBuffers) const;
|
|
|
|
[[nodiscard]] std::shared_ptr<runtime::ITensor> retrieveDraftLogits(runtime::ModelConfig const& modelConfig,
|
|
runtime::WorldConfig const& worldConfig, std::shared_ptr<runtime::ITensor> const& tensor,
|
|
runtime::BufferManager const& bufferManager) const;
|
|
|
|
bool mSpeculativeDecodingFastLogits;
|
|
bool mIsLeaderInOrchMode;
|
|
bool mIsNormalizeLogProbs;
|
|
};
|
|
|
|
} // namespace tensorrt_llm::batch_manager
|