TensorRT-LLMs/cpp/tensorrt_llm/executor/speculativeDecodingConfig.cpp
2025-03-11 21:13:42 +08:00

42 lines
1.3 KiB
C++

/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tensorrt_llm/executor/executor.h"
namespace tensorrt_llm::executor
{
SpeculativeDecodingConfig::SpeculativeDecodingConfig(bool inFastLogits)
: fastLogits(inFastLogits)
{
}
bool SpeculativeDecodingConfig::operator==(SpeculativeDecodingConfig const& other) const
{
return fastLogits == other.fastLogits;
}
Tensor SpeculativeDecodingFastLogitsInfo::toTensor() const
{
size_t const numLogitsNeeded = (sizeof(*this) + 1) / sizeof(float);
auto tensor = Tensor::cpu(DataType::kFP32, {1, numLogitsNeeded});
std::memcpy(tensor.getData(), this, sizeof(*this));
return tensor;
}
} // namespace tensorrt_llm::executor