TensorRT-LLMs/cpp/include/tensorrt_llm/runtime/promptTuningParams.h
wili eba3623a54
Feat: Variable-Beam-Width-Search (VBWS) part4 (#3979)
* feat/vbws-part4-v1.8: rebase

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* feat/vbws-part4-v1.9: fix incorrect output when using short output length

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* v1.9.1: remove useless variables

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* v1.9.2:fix incorrect output when using short output length

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* v1.9.3: rebase

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* v1.9.4: rebase

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

* v1.9.5: remove API change

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>

---------

Signed-off-by: wili-65535 <wili-65535@users.noreply.github.com>
Co-authored-by: wili-65535 <wili-65535@users.noreply.github.com>
2025-05-12 22:32:29 +02:00

75 lines
2.9 KiB
C++

/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "tensorrt_llm/runtime/bufferManager.h"
#include "tensorrt_llm/runtime/common.h"
#include "tensorrt_llm/runtime/iTensor.h"
#include <utility>
namespace tensorrt_llm::runtime
{
template <typename TTensor>
class GenericPromptTuningParams
{
public:
using TensorPtr = TTensor;
using SizeType32 = tensorrt_llm::runtime::SizeType32;
explicit GenericPromptTuningParams(
TensorPtr embeddingTable = TensorPtr(), TensorPtr tasks = TensorPtr(), TensorPtr vocabSize = TensorPtr())
: embeddingTable{std::move(embeddingTable)}
, tasks{std::move(tasks)}
, vocabSize{std::move(vocabSize)} {};
// The prompt embedding table
TensorPtr embeddingTable; // [numTasks * taskVocabSize, hidden_dim], on gpu
// In GenerationInput, tasks expected shape is [batchSize]
// For context requests with non-packed inputs, expected shape is [batchSize, 1]
// For generation requests with non-packed inputs, expected shape is [batchSize*beamWidth] for generation requests.
// For packed inputs, expected shape is [packedLength] (note that ifb currently doesn't support non-packed
// inputs)
TensorPtr tasks;
TensorPtr vocabSize; // [1], on gpu
std::vector<bool>
promptTuningEnabled; // [batchSize] vector of bool that indicates which requests in a batch have ptuning enabled
};
class PromptTuningParams : public GenericPromptTuningParams<ITensor::SharedPtr>
{
public:
using TensorPtr = ITensor::SharedPtr;
using SizeType32 = GenericPromptTuningParams::SizeType32;
explicit PromptTuningParams(
TensorPtr embeddingTable = nullptr, TensorPtr tasks = nullptr, TensorPtr vocabSize = nullptr)
: GenericPromptTuningParams(std::move(embeddingTable), std::move(tasks), std::move(vocabSize))
{
}
// Fill the tasks tensor for the batch using the provided tasksHost
// Function assumes that the first numContextRequests requests in the batch are context requests
void fillTasksTensor(TensorPtr tasksHost, SizeType32 batchSize, SizeType32 numContextRequests,
std::vector<SizeType32> const& reqBeamWidths, std::vector<SizeType32> const& reqPromptLengths,
BufferManager const& manager, bool packedInput);
};
} // namespace tensorrt_llm::runtime