mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
* refactor: Update ExecutorConfig to use AdditionalModelOutput type - Changed function signatures and member variables across multiple files to replace std::optional<std::vector<std::string>> with std::optional<std::vector<executor::AdditionalModelOutput>> to include gatherContext flag for each additional output. - Updated related serialization and deserialization methods to accommodate the new type. - Adjusted tests to reflect the changes in the output handling structure. This refactor enhances the flexibility and maintainability of the output configuration in the executor and batch manager components. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Remove equality operator from TrtGptModelOptionalParams - Deleted the operator== implementation from TrtGptModelOptionalParams to simplify the class. - Updated the pybind11 bindings to remove the exposure of the equality operator to Python. This change streamlines the class definition and reduces unnecessary complexity in the bindings. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Enhance copyAdditionalOutputs to utilize AdditionalModelOutput - Updated the copyAdditionalOutputs function to accept a vector of AdditionalModelOutput, allowing for the inclusion of the gatherContext flag. - Adjusted the logic to handle context and non-context outputs separately, improving the output handling mechanism. - Modified related unit tests to incorporate the new gatherContext parameter, ensuring comprehensive testing of the updated functionality. This refactor improves the flexibility and clarity of output management in the batch processing workflow. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Introduce findOutputTensor utility function for output tensor retrieval - Added a new utility function, findOutputTensor, to encapsulate the logic for finding output tensors and checking their validity. - Refactored copyAdditionalOutputs to utilize findOutputTensor, reducing code duplication and improving clarity. - Enhanced error checking for additional context and generation output tensors. This change streamlines the output tensor retrieval process, enhancing maintainability and readability in the batch processing workflow. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * refactor: Check final indices of additional output tensors and update tests - Added checks to verify the final indices of additional output tensors for context and generation outputs. - Updated unit tests to verify the changes. - Add lastTokenIds input tensor to test engines. - Logits output depends on gatherContextLogits parameter. - Removed gatherContextOutputs parameter from the validate method in LlmRequest. - Context outputs do not depend on computeContextLogits parameter. Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * fixup! refactor: Check final indices of additional output tensors and update tests Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * fixup! refactor: Update ExecutorConfig to use AdditionalModelOutput type Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * fixup! refactor: Remove equality operator from TrtGptModelOptionalParams Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * docs: Update executor.md Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> * chore: Clean up includes Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com> --------- Signed-off-by: Robin Kobus <19427718+Funatiq@users.noreply.github.com>
111 lines
4.0 KiB
C++
111 lines
4.0 KiB
C++
/*
|
|
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include "tensorrt_llm/batch_manager/common.h"
|
|
#include "tensorrt_llm/batch_manager/kvCacheManager.h"
|
|
#include "tensorrt_llm/batch_manager/peftCacheManager.h"
|
|
#include "tensorrt_llm/batch_manager/runtimeBuffers.h"
|
|
#include "tensorrt_llm/batch_manager/sequenceSlotManager.h"
|
|
#include "tensorrt_llm/common/optionalRef.h"
|
|
#include "tensorrt_llm/runtime/iTensor.h"
|
|
|
|
namespace tensorrt_llm::batch_manager::utils
|
|
{
|
|
using SizeType32 = runtime::SizeType32;
|
|
using TensorPtr = runtime::ITensor::SharedPtr;
|
|
|
|
template <typename T>
|
|
using OptionalRef = common::OptionalRef<T>;
|
|
|
|
TensorPtr collectRequestIds(RequestVector const& contextRequests, RequestVector const& generationRequests);
|
|
|
|
void sortByLoraId(ScheduledRequests& scheduledRequests);
|
|
|
|
//! @param beforeDecoder Whether the function is called before the decoder. If it is true, correct the output offset.
|
|
//! @param numDroppedTokens The number of dropped tokens for each beam (e.g. when the requests finished early).
|
|
//! Generation logits for dropped tokens are ignored.
|
|
void copyGenerationLogits(RuntimeBuffers::GenerationLogitsCache& generationLogitsCache,
|
|
runtime::BufferManager const& bufferManager, LlmRequest& llmReq, bool beforeDecoder,
|
|
std::vector<SizeType32> const& numDroppedTokens = {});
|
|
|
|
void copyAdditionalOutputs(std::vector<executor::AdditionalModelOutput> const& additionalModelOutputs,
|
|
RequestVector const& contextRequests, RequestVector const& generationRequests,
|
|
RuntimeBuffers::TensorMap const& outputMap, runtime::BufferManager const& manager);
|
|
|
|
void terminateRequest(SequenceSlotManager& seqSlotManager, LlmRequest& llmRequest, SizeType32 maxInputLen,
|
|
OptionalRef<kv_cache_manager::BaseKVCacheManager> kvCacheManager = std::nullopt,
|
|
OptionalRef<kv_cache_manager::BaseKVCacheManager> crossKvCacheManager = std::nullopt,
|
|
OptionalRef<BasePeftCacheManager> peftCacheManager = std::nullopt, bool pause = false);
|
|
|
|
class CudaGraphExecutor
|
|
{
|
|
public:
|
|
CudaGraphExecutor() = default;
|
|
|
|
~CudaGraphExecutor()
|
|
{
|
|
try
|
|
{
|
|
clear();
|
|
}
|
|
catch (std::exception& e)
|
|
{
|
|
TLLM_LOG_EXCEPTION(e);
|
|
}
|
|
}
|
|
|
|
bool hasInstance() const
|
|
{
|
|
return mInstance != nullptr;
|
|
}
|
|
|
|
void clear();
|
|
void prepareNextGraph(std::shared_ptr<runtime::TllmRuntime>& runtime, SizeType32 nextContextId);
|
|
void launch(runtime::CudaStream const& stream);
|
|
|
|
private:
|
|
void create(cudaGraph_t const& graph);
|
|
bool update(cudaGraph_t const& graph);
|
|
void uploadToStream(runtime::CudaStream const& stream);
|
|
|
|
cudaGraphExec_t mInstance;
|
|
};
|
|
|
|
class CudaGraphExecutorCache
|
|
{
|
|
/// @brief LRU cache to store cuda graph instances.
|
|
public:
|
|
explicit CudaGraphExecutorCache(runtime::SizeType32 capacity)
|
|
: mCapacity(capacity)
|
|
{
|
|
}
|
|
|
|
std::optional<std::shared_ptr<CudaGraphExecutor>> get(BatchState const& state);
|
|
|
|
void put(BatchState const& state, std::shared_ptr<CudaGraphExecutor> const& value);
|
|
|
|
private:
|
|
using BatchStateGraphExecutorPair = std::pair<BatchState, std::shared_ptr<CudaGraphExecutor>>;
|
|
using GraphExecutorLruCache = std::list<BatchStateGraphExecutorPair>;
|
|
SizeType32 mCapacity;
|
|
GraphExecutorLruCache mCache;
|
|
std::unordered_map<BatchState, GraphExecutorLruCache::iterator, BatchStateHash> mMap;
|
|
};
|
|
} // namespace tensorrt_llm::batch_manager::utils
|