TensorRT-LLMs/cpp/tensorrt_llm/kernels/penaltyKernels.h
Kaiyu Xie e06f537e08
Update TensorRT-LLM (#1019)
* Update TensorRT-LLM

---------

Co-authored-by: erenup <ping.nie@pku.edu.cn>
Co-authored-by: Shixiaowei02 <39303645+Shixiaowei02@users.noreply.github.com>
2024-01-31 21:55:32 +08:00

60 lines
1.6 KiB
C++

/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_fp16.h>
#include "tensorrt_llm/common/cudaUtils.h"
#include "tensorrt_llm/kernels/penaltyTypes.h"
namespace tensorrt_llm
{
namespace kernels
{
template <typename T>
struct InvokeBatchApplyPenaltyParams
{
T* logits;
const T* biases;
int* penaltyWorkspace;
const int* penaltyWorkspacePrev;
const float* temperatures;
const float* repetitionPenalties;
const float* presencePenalties;
const float* frequencyPenalties;
const bool accumulateVocab;
const size_t batchSize;
const int beamWidth;
const int maxSeqLen;
const size_t vocabSize;
const size_t vocabSizePadded;
const int** outputIdsPtr;
const int** parentIdsPtr;
const int* inputLengths;
const int* sequenceLengths;
const int* minLengths;
const int* endIds;
const int* batchSlots;
cudaStream_t stream;
};
template <typename T>
void invokeBatchApplyPenalty(const InvokeBatchApplyPenaltyParams<T>& params);
} // namespace kernels
} // namespace tensorrt_llm