/* * Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "tests/layers/baseSamplingLayerTest.h" namespace tensorrt_llm::tests::layers::sampling { using namespace tensorrt_llm::runtime; using namespace tensorrt_llm::layers; using namespace tensorrt_llm::common; namespace tk = tensorrt_llm::kernels; namespace trk = tensorrt_llm::runtime::kernels; template void BaseSamplingLayerTest::setup(uint64_t seed, TestSamplingParams const& params) { auto const dataType = TRTDataType::value; auto const ptrType = TRTDataType::value; // clang-format off // prob = (0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 0.2, 0.1) mTestLogitsInit = { -FLT_MAX, -FLT_MAX, -FLT_MAX, -FLT_MAX, -0.9163, -1.2040, -1.6094, -2.3026, // step 0 -0.9163, -1.2040, -1.6094, -2.3026, -FLT_MAX, -FLT_MAX, -FLT_MAX, -FLT_MAX, // step 1 -FLT_MAX, -FLT_MAX, -0.9163, -1.2040, -1.6094, -2.3026, -FLT_MAX, -FLT_MAX, // step 2 -0.9163, -1.2040, -1.6094, -2.3026, -FLT_MAX, -FLT_MAX, -FLT_MAX, -FLT_MAX // step 3 }; // clang-format on if (mComputeProbs) { computeProb(mTestLogitsInit.data(), mTestLogitsInit.data(), 4, mVocabSize); } mSeqLengthsDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize}), nvinfer1::DataType::kINT32); mContextLengthDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize}), nvinfer1::DataType::kINT32); mFinishedDevice = mBufferManager->gpu( ITensor::makeShape({mMaxBatchSize}), TRTDataType::value); mOutputIdsDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize, mMaxSeqLen}), nvinfer1::DataType::kINT32); mEndIdsDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize}), nvinfer1::DataType::kINT32); mIdsPtrHost = mBufferManager->pinned(ITensor::makeShape({mMaxBatchSize}), ptrType); mCumLogProbsDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize}), nvinfer1::DataType::kFLOAT); mOutputLogProbsDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize, mMaxSeqLen}), nvinfer1::DataType::kFLOAT); mBatchSlots = mBufferManager->pinned(ITensor::makeShape({mBatchSize}), nvinfer1::DataType::kINT32); mCurandStatesDevice = mBufferManager->gpu(ITensor::makeShape({mMaxBatchSize, sizeof(curandState_t)}), nvinfer1::DataType::kINT8); auto const workspaceSize = mSamplingLayer->getWorkspaceSize(); trk::invokeFill(*mSeqLengthsDevice, int32_t{0}, *mStream); trk::invokeFill(*mContextLengthDevice, int32_t{0}, *mStream); trk::invokeFill(*mFinishedDevice, uint8_t{0}, *mStream); trk::invokeFill(*mOutputIdsDevice, int32_t{0}, *mStream); trk::invokeFill(*mCumLogProbsDevice, float{0.0f}, *mStream); trk::invokeFill(*mOutputLogProbsDevice, float{0.0f}, *mStream); trk::invokeFill(*mEndIdsDevice, int32_t{mEndId}, *mStream); tk::invokeCurandInitialize(reinterpret_cast(bufferCast(*mCurandStatesDevice)), nullptr, mMaxBatchSize, seed, mStream->get()); auto batchSlotsPtr = bufferCast(*mBatchSlots); for (SizeType32 bi = 0; bi < mBatchSize; ++bi) { batchSlotsPtr[bi] = 2 * bi; } auto idsPtrHostPtr = BufferRange(*mIdsPtrHost); auto outputIdsDevicePtr = bufferCast(*mOutputIdsDevice); for (SizeType32 bi = 0; bi < mMaxBatchSize; bi++) { idsPtrHostPtr[bi] = outputIdsDevicePtr + bi * mMaxSeqLen; } auto setupParams = std::make_shared(); setupParams->randomSeed = std::make_optional>({seed}); setupParams->runtimeTopK = params.topKs.size() ? std::make_optional>(params.topKs) : std::nullopt; setupParams->runtimeTopP = params.topPs.size() ? std::make_optional>(params.topPs) : std::nullopt; setupParams->topPDecay = params.decay.size() ? std::make_optional>(params.decay) : std::nullopt; setupParams->topPMin = params.minTopP.size() ? std::make_optional>(params.minTopP) : std::nullopt; setupParams->topPResetIds = params.topPResetIds.size() ? std::make_optional>(params.topPResetIds) : std::nullopt; mDecodingWorkspace->setDeviceBatchSlots(mBatchSlots); mDecodingWorkspace->getDeviceRuntimeLogits()->reshape(ITensor::makeShape({mBatchSize, mVocabSize})); mSamplingLayer->setup(mBatchSize, mBeamWidth, mBatchSlots, setupParams, mDecodingWorkspace); mStream->synchronize(); } template std::shared_ptr BaseSamplingLayerTest::createInputTensors(int32_t step) { constexpr int32_t ite = 0; auto decodeInputTensors = std::make_shared(mEndIdsDevice, mBatchSlots, step, ite, mBatchSize); decodeInputTensors->logits = mDecodingWorkspace->getDeviceRuntimeLogits(); decodeInputTensors->inputLengths = mContextLengthDevice; decodeInputTensors->finished = mFinishedDevice; decodeInputTensors->probsComputed = mComputeProbs; decodeInputTensors->curandStates = reinterpret_cast(bufferCast(*mCurandStatesDevice)); return decodeInputTensors; } template std::shared_ptr BaseSamplingLayerTest::createOutputTensors() { auto decodeOutputs = std::make_shared(mOutputIdsDevice); decodeOutputs->outputIdsPtr = mIdsPtrHost; decodeOutputs->sequenceLength = mSeqLengthsDevice; decodeOutputs->finished = mFinishedDevice; decodeOutputs->outputLogProbs = mOutputLogProbsDevice; decodeOutputs->cumLogProbs = mCumLogProbsDevice; // TODO(nkorobov): check log probs and cum_log_probs return decodeOutputs; } template void BaseSamplingLayerTest::batchCopy(int32_t step) { auto const logitsHost = ITensor::wrap( mTestLogitsInit.data() + step * mVocabSize, TRTDataType::value, ITensor::makeShape({1, mVocabSize})); for (int32_t bi = 0; bi < mBatchSize; ++bi) { auto logitsDeviceView = ITensor::slice(mDecodingWorkspace->getDeviceRuntimeLogits(), bi, 1); mBufferManager->copy(*logitsHost, *logitsDeviceView); } } template bool BaseSamplingLayerTest::checkResult(int32_t* outputIds, std::vector>& expectedIds) { assert(expectedIds.size() == mMaxSeqLen * mBatchBeam); int failures = 0; auto* const batchSlotsPtr = bufferCast(*mBatchSlots); for (int32_t i = 0; i < mMaxSeqLen * mBatchBeam; ++i) { int32_t s = i / mBatchBeam; int32_t b = i % mBatchBeam; auto const batchSlot = batchSlotsPtr[b]; std::set expts = expectedIds.at(i); auto const outputId = outputIds[batchSlot * mMaxSeqLen + s]; if (expts.count(outputId) == 0) { if (failures < 10) { std::stringstream ss; ss << " - Fail " << " (step=" << s << ", batch=" << b << ") " << "actual=" << outputId << ", expected"; for (auto const& expt : expts) { ss << " " << expt; } TLLM_LOG_DEBUG("%s", ss.str().c_str()); } ++failures; } } TLLM_LOG_DEBUG( "check...%6s : failures: %d / %d", failures == 0 ? "....OK" : "FAILED", failures, mMaxSeqLen * mBatchBeam); return failures == 0; } template void BaseSamplingLayerTest::runTest( std::vector> expectedOutputIds, TestSamplingParams const& params, int32_t endId) { initLayer(params); auto const decoderDomain = tensorrt_llm::layers::DecoderDomain(mMaxBatchSize, mBeamWidth, mVocabSize, mVocabSizePadded); mDecodingWorkspace = std::make_unique( mBufferManager, decoderDomain, TRTDataType::value, mSamplingLayer->getWorkspaceSize()); mEndId = endId; for (uint64_t seed = 0; seed < mMaxSeed; ++seed) { setup(seed, params); int32_t step = mMaxInputLen; auto inputTensors = createInputTensors(step); auto outputTensors = createOutputTensors(); for (step = mMaxInputLen; step < mMaxOutputLen; ++step) { // Reset by the test value since the sampling layer internally updates the logit buffer. batchCopy(step); inputTensors->step = step; mDecodingWorkspace->setDeviceBatchSlots(mBatchSlots); mSamplingLayer->forwardAsync(outputTensors, inputTensors, mDecodingWorkspace); mStream->synchronize(); } auto const outputIdsHost = mBufferManager->copyFrom(*mOutputIdsDevice, tensorrt_llm::runtime::MemoryType::kCPU); mStream->synchronize(); bool passed = checkResult(bufferCast(*outputIdsHost), expectedOutputIds); EXPECT_TRUE(passed) << "Output ids check failed at seed " << seed; if (!passed) { std::stringstream ss; ss << "Actual output ids:" << std::endl << *outputIdsHost; TLLM_LOG_DEBUG(ss.str()); } } } template class BaseSamplingLayerTest; template class BaseSamplingLayerTest; } // namespace tensorrt_llm::tests::layers::sampling