mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
425 lines
17 KiB
C++
425 lines
17 KiB
C++
/*
|
|
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <gmock/gmock-matchers.h>
|
|
#include <gtest/gtest.h>
|
|
|
|
#include "tensorrt_llm/common/cudaUtils.h"
|
|
#include "tensorrt_llm/common/stringUtils.h"
|
|
#include "tensorrt_llm/runtime/memoryCounters.h"
|
|
#include "tensorrt_llm/runtime/tllmBuffers.h"
|
|
|
|
#include <limits>
|
|
#include <memory>
|
|
#include <sstream>
|
|
#include <type_traits>
|
|
|
|
using namespace tensorrt_llm::runtime;
|
|
namespace tc = tensorrt_llm::common;
|
|
|
|
class TllmBuffersTest : public ::testing::Test // NOLINT(cppcoreguidelines-pro-type-member-init)
|
|
{
|
|
protected:
|
|
void SetUp() override
|
|
{
|
|
mDeviceCount = tc::getDeviceCount();
|
|
|
|
if (mDeviceCount == 0)
|
|
GTEST_SKIP();
|
|
}
|
|
|
|
void TearDown() override {}
|
|
|
|
int mDeviceCount;
|
|
};
|
|
|
|
TEST_F(TllmBuffersTest, Stream)
|
|
{
|
|
CudaStream stream{};
|
|
EXPECT_NE(stream.get(), nullptr);
|
|
auto ptr = std::make_shared<CudaStream>();
|
|
EXPECT_NE(ptr->get(), nullptr);
|
|
EXPECT_GE(ptr->getDevice(), 0);
|
|
CudaStream lease{ptr->get(), ptr->getDevice(), false};
|
|
EXPECT_EQ(lease.get(), ptr->get());
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, CudaAllocator)
|
|
{
|
|
auto constexpr size = 1024;
|
|
CudaAllocator allocator{};
|
|
auto& counters = MemoryCounters::getInstance();
|
|
EXPECT_EQ(counters.getGpu(), 0);
|
|
auto ptr = allocator.allocate(size);
|
|
EXPECT_NE(ptr, nullptr);
|
|
EXPECT_EQ(counters.getGpu(), size);
|
|
EXPECT_EQ(counters.getGpuDiff(), size);
|
|
EXPECT_NO_THROW(allocator.deallocate(ptr, size));
|
|
EXPECT_EQ(counters.getGpu(), 0);
|
|
EXPECT_EQ(counters.getGpuDiff(), -size);
|
|
EXPECT_EQ(allocator.getMemoryType(), MemoryType::kGPU);
|
|
EXPECT_THROW(allocator.deallocate(ptr, size), std::runtime_error);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, PinnedAllocator)
|
|
{
|
|
auto constexpr size = 1024;
|
|
PinnedAllocator allocator{};
|
|
auto& counters = MemoryCounters::getInstance();
|
|
EXPECT_EQ(counters.getPinned(), 0);
|
|
auto ptr = allocator.allocate(size);
|
|
EXPECT_NE(ptr, nullptr);
|
|
EXPECT_EQ(counters.getPinned(), size);
|
|
EXPECT_EQ(counters.getPinnedDiff(), size);
|
|
EXPECT_NO_THROW(allocator.deallocate(ptr, size));
|
|
EXPECT_EQ(counters.getPinned(), 0);
|
|
EXPECT_EQ(counters.getPinnedDiff(), -size);
|
|
EXPECT_EQ(allocator.getMemoryType(), MemoryType::kPINNED);
|
|
EXPECT_THROW(allocator.deallocate(ptr, size), std::runtime_error);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, HostAllocator)
|
|
{
|
|
auto constexpr size = 1024;
|
|
HostAllocator allocator{};
|
|
auto& counters = MemoryCounters::getInstance();
|
|
EXPECT_EQ(counters.getCpu(), 0);
|
|
auto ptr = allocator.allocate(size);
|
|
EXPECT_NE(ptr, nullptr);
|
|
EXPECT_EQ(counters.getCpu(), size);
|
|
EXPECT_EQ(counters.getCpuDiff(), size);
|
|
EXPECT_NO_THROW(allocator.deallocate(ptr, size));
|
|
EXPECT_EQ(counters.getCpu(), 0);
|
|
EXPECT_EQ(counters.getCpuDiff(), -size);
|
|
EXPECT_EQ(allocator.getMemoryType(), MemoryType::kCPU);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, CudaAllocatorAsync)
|
|
{
|
|
auto streamPtr = std::make_shared<CudaStream>();
|
|
auto constexpr size = 1024;
|
|
CudaAllocatorAsync allocator{streamPtr};
|
|
auto& counters = MemoryCounters::getInstance();
|
|
EXPECT_EQ(counters.getGpu(), 0);
|
|
auto ptr = allocator.allocate(size);
|
|
EXPECT_NE(ptr, nullptr);
|
|
EXPECT_EQ(counters.getGpu(), size);
|
|
EXPECT_EQ(counters.getGpuDiff(), size);
|
|
EXPECT_NO_THROW(allocator.deallocate(ptr, size));
|
|
EXPECT_EQ(counters.getGpu(), 0);
|
|
EXPECT_EQ(counters.getGpuDiff(), -size);
|
|
EXPECT_EQ(allocator.getMemoryType(), MemoryType::kGPU);
|
|
streamPtr->synchronize();
|
|
CudaAllocatorAsync allocatorCopy = allocator;
|
|
EXPECT_EQ(allocatorCopy.getCudaStream(), streamPtr);
|
|
CudaAllocatorAsync allocatorMove = std::move(allocatorCopy);
|
|
EXPECT_EQ(allocatorMove.getCudaStream(), streamPtr);
|
|
EXPECT_THROW(allocator.deallocate(ptr, size), std::runtime_error);
|
|
}
|
|
|
|
namespace
|
|
{
|
|
void testBuffer(IBuffer& buffer, std::int32_t typeSize)
|
|
{
|
|
auto const size = buffer.getSize();
|
|
EXPECT_NE(buffer.data(), nullptr);
|
|
EXPECT_EQ(buffer.getSizeInBytes(), size * typeSize);
|
|
EXPECT_EQ(buffer.getCapacity(), size);
|
|
buffer.resize(size / 2);
|
|
EXPECT_EQ(buffer.getSize(), size / 2);
|
|
EXPECT_EQ(buffer.getCapacity(), size);
|
|
buffer.resize(size * 2);
|
|
EXPECT_EQ(buffer.getSize(), size * 2);
|
|
EXPECT_EQ(buffer.getCapacity(), size * 2);
|
|
buffer.release();
|
|
EXPECT_EQ(buffer.getSize(), 0);
|
|
EXPECT_EQ(buffer.data(), nullptr);
|
|
buffer.resize(size / 2);
|
|
EXPECT_EQ(buffer.getCapacity(), size / 2);
|
|
auto bufferWrapped = IBuffer::wrap(buffer.data(), buffer.getDataType(), buffer.getSize(), buffer.getCapacity());
|
|
EXPECT_EQ(bufferWrapped->data(), buffer.data());
|
|
EXPECT_EQ(bufferWrapped->getSize(), buffer.getSize());
|
|
EXPECT_EQ(bufferWrapped->getCapacity(), buffer.getCapacity());
|
|
EXPECT_EQ(bufferWrapped->getDataType(), buffer.getDataType());
|
|
EXPECT_EQ(bufferWrapped->getMemoryType(), buffer.getMemoryType());
|
|
EXPECT_NO_THROW(bufferWrapped->resize(buffer.getCapacity() / 2));
|
|
EXPECT_THROW(bufferWrapped->resize(buffer.getCapacity() * 2), std::bad_alloc);
|
|
auto tensorWrapped = ITensor::wrap(buffer.data(), buffer.getDataType(),
|
|
ITensor::makeShape({static_cast<SizeType>(buffer.getSize())}), buffer.getCapacity());
|
|
EXPECT_EQ(tensorWrapped->getSize(), buffer.getSize());
|
|
EXPECT_EQ(tensorWrapped->getCapacity(), buffer.getCapacity());
|
|
EXPECT_EQ(tensorWrapped->getDataType(), buffer.getDataType());
|
|
EXPECT_EQ(tensorWrapped->getMemoryType(), buffer.getMemoryType());
|
|
EXPECT_NO_THROW(tensorWrapped->reshape(ITensor::makeShape({static_cast<SizeType>(buffer.getCapacity()) / 2})));
|
|
EXPECT_THROW(
|
|
tensorWrapped->reshape(ITensor::makeShape({static_cast<SizeType>(buffer.getCapacity()) * 2})), std::bad_alloc);
|
|
}
|
|
} // namespace
|
|
|
|
TEST_F(TllmBuffersTest, DeviceBuffer)
|
|
{
|
|
auto streamPtr = std::make_shared<CudaStream>();
|
|
auto constexpr size = 1024;
|
|
CudaAllocatorAsync allocator{streamPtr};
|
|
{
|
|
DeviceBuffer buffer{size, nvinfer1::DataType::kFLOAT, allocator};
|
|
testBuffer(buffer, sizeof(float));
|
|
}
|
|
streamPtr->synchronize();
|
|
|
|
static_assert(!std::is_copy_constructible<DeviceBuffer>::value);
|
|
static_assert(!std::is_copy_assignable<DeviceBuffer>::value);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, DeviceTensor)
|
|
{
|
|
auto streamPtr = std::make_shared<CudaStream>();
|
|
nvinfer1::Dims constexpr dims{3, 16, 8, 4};
|
|
CudaAllocatorAsync allocator{streamPtr};
|
|
{
|
|
DeviceTensor tensor{dims, nvinfer1::DataType::kFLOAT, allocator};
|
|
EXPECT_EQ(tensor.getSize(), ITensor::volume(dims));
|
|
testBuffer(tensor, sizeof(float));
|
|
EXPECT_EQ(tensor.getSize(), ITensor::volume(tensor.getShape()));
|
|
}
|
|
streamPtr->synchronize();
|
|
|
|
static_assert(!std::is_copy_constructible<DeviceBuffer>::value);
|
|
static_assert(!std::is_copy_assignable<DeviceBuffer>::value);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, BufferSlice)
|
|
{
|
|
auto constexpr size = 1024;
|
|
HostAllocator allocator{};
|
|
auto constexpr dataType = nvinfer1::DataType::kFLOAT;
|
|
auto buffer = std::make_shared<HostBuffer>(size, dataType, allocator);
|
|
auto offset = size / 8;
|
|
auto slice = IBuffer::slice(buffer, offset);
|
|
auto const sizeSlice = size - offset;
|
|
EXPECT_EQ(slice->getSize(), sizeSlice);
|
|
EXPECT_EQ(slice->getCapacity(), sizeSlice);
|
|
EXPECT_EQ(static_cast<std::uint8_t*>(slice->data()) - static_cast<std::uint8_t*>(buffer->data()),
|
|
offset * BufferDataType(dataType).getSize());
|
|
|
|
EXPECT_NO_THROW(slice->resize(sizeSlice));
|
|
EXPECT_NO_THROW(slice->resize(sizeSlice / 2));
|
|
EXPECT_THROW(slice->resize(sizeSlice * 2), std::runtime_error);
|
|
EXPECT_NO_THROW(slice->release());
|
|
EXPECT_EQ(slice->data(), nullptr);
|
|
|
|
std::shared_ptr<HostBuffer const> constBuffer{buffer};
|
|
auto constSlice = IBuffer::slice(constBuffer, offset);
|
|
EXPECT_EQ(constSlice->getSize(), sizeSlice);
|
|
auto uniqueSlice = IBuffer::slice(std::move(constSlice), 1);
|
|
EXPECT_EQ(uniqueSlice->getSize(), sizeSlice - 1);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, TensorSlice)
|
|
{
|
|
auto dims = ITensor::makeShape({16, 8, 4});
|
|
HostAllocator allocator{};
|
|
auto constexpr dataType = nvinfer1::DataType::kFLOAT;
|
|
auto tensor = std::make_shared<HostTensor>(dims, dataType, allocator);
|
|
auto offset = dims.d[0] / 4;
|
|
auto slice = ITensor::slice(tensor, offset);
|
|
auto const sizeSlice = 3 * tensor->getSize() / 4;
|
|
EXPECT_EQ(slice->getShape().d[0], dims.d[0] - offset);
|
|
EXPECT_EQ(slice->getSize(), sizeSlice);
|
|
EXPECT_EQ(slice->getCapacity(), sizeSlice);
|
|
EXPECT_EQ(static_cast<std::uint8_t*>(slice->data()) - static_cast<std::uint8_t*>(tensor->data()),
|
|
offset * ITensor::volume(dims) / dims.d[0] * BufferDataType(dataType).getSize());
|
|
|
|
auto dimsNew = ITensor::makeShape({12, 32});
|
|
EXPECT_EQ(ITensor::volume(dimsNew), sizeSlice);
|
|
EXPECT_NO_THROW(slice->reshape(dimsNew));
|
|
EXPECT_EQ(slice->getShape().d[1], dimsNew.d[1]);
|
|
dimsNew.d[0] = 6;
|
|
EXPECT_LT(ITensor::volume(dimsNew), sizeSlice);
|
|
EXPECT_NO_THROW(slice->reshape(dimsNew));
|
|
EXPECT_EQ(slice->getShape().d[0], dimsNew.d[0]);
|
|
dimsNew.d[0] = 16;
|
|
EXPECT_GT(ITensor::volume(dimsNew), sizeSlice);
|
|
EXPECT_THROW(slice->reshape(dimsNew), std::runtime_error);
|
|
|
|
EXPECT_NO_THROW(slice->resize(sizeSlice));
|
|
EXPECT_NO_THROW(slice->resize(sizeSlice / 2));
|
|
EXPECT_EQ(slice->getShape().d[0], sizeSlice / 2);
|
|
EXPECT_THROW(slice->resize(sizeSlice * 2), std::runtime_error);
|
|
EXPECT_NO_THROW(slice->release());
|
|
EXPECT_EQ(slice->data(), nullptr);
|
|
EXPECT_NE(tensor->data(), nullptr);
|
|
|
|
std::shared_ptr<HostTensor const> constTensor{tensor};
|
|
auto constSlice = ITensor::slice(constTensor, offset);
|
|
EXPECT_EQ(constSlice->getShape().d[0], dims.d[0] - offset);
|
|
auto uniqueSlice = ITensor::slice(std::move(constSlice), 1);
|
|
EXPECT_EQ(uniqueSlice->getShape().d[0], dims.d[0] - offset - 1);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, TensorSqueeze)
|
|
{
|
|
auto dims = ITensor::makeShape({16, 1, 4});
|
|
HostAllocator allocator{};
|
|
auto constexpr dataType = nvinfer1::DataType::kFLOAT;
|
|
auto tensor = std::make_shared<HostTensor>(dims, dataType, allocator);
|
|
|
|
auto squeezeDim = 0;
|
|
EXPECT_THROW(tensor->squeeze(squeezeDim), std::runtime_error);
|
|
squeezeDim = 1;
|
|
auto squeezed = ITensor::view(tensor, ITensor::squeeze(dims, squeezeDim));
|
|
|
|
EXPECT_EQ(squeezed->getSize(), tensor->getSize());
|
|
EXPECT_EQ(squeezed->getShape().nbDims, tensor->getShape().nbDims - 1);
|
|
EXPECT_EQ(squeezed->getShape().d[0], tensor->getShape().d[0]);
|
|
EXPECT_EQ(squeezed->getShape().d[1], tensor->getShape().d[2]);
|
|
|
|
EXPECT_NO_THROW(squeezed->release());
|
|
EXPECT_EQ(squeezed->data(), nullptr);
|
|
EXPECT_NE(tensor->data(), nullptr);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, TensorView)
|
|
{
|
|
auto const dims = ITensor::makeShape({16, 1, 4});
|
|
HostAllocator allocator{};
|
|
auto constexpr dataType = nvinfer1::DataType::kFLOAT;
|
|
auto tensor = std::make_shared<HostTensor>(dims, dataType, allocator);
|
|
|
|
auto const viewDims = ITensor::makeShape({16, 1, 2});
|
|
|
|
auto view = ITensor::view(tensor, viewDims);
|
|
EXPECT_EQ(view->getSize(), tensor->getSize() / 2);
|
|
EXPECT_EQ(view->getShape().nbDims, tensor->getShape().nbDims);
|
|
EXPECT_EQ(view->getShape().d[2], tensor->getShape().d[2] / 2);
|
|
|
|
EXPECT_NO_THROW(view->release());
|
|
EXPECT_EQ(view->data(), nullptr);
|
|
EXPECT_NE(tensor->data(), nullptr);
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, BufferOutput)
|
|
{
|
|
auto streamPtr = std::make_shared<CudaStream>();
|
|
CudaAllocatorAsync allocator{streamPtr};
|
|
for (std::size_t size : {0, 16})
|
|
{
|
|
DeviceBuffer buffer{size, nvinfer1::DataType::kFLOAT, allocator};
|
|
TLLM_CUDA_CHECK(cudaMemsetAsync(buffer.data(), 0, buffer.getSizeInBytes(), streamPtr->get()));
|
|
streamPtr->synchronize();
|
|
std::stringstream ss;
|
|
ss << buffer;
|
|
auto str = ss.str();
|
|
EXPECT_THAT(str, ::testing::HasSubstr(std::string("shape: (") + std::to_string(size) + ")"));
|
|
EXPECT_THAT(str, ::testing::HasSubstr(tc::vec2str(std::vector<int>(size, 0))));
|
|
}
|
|
streamPtr->synchronize();
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, TensorOutput)
|
|
{
|
|
auto streamPtr = std::make_shared<CudaStream>();
|
|
nvinfer1::Dims constexpr dims{3, 16, 8, 4};
|
|
CudaAllocatorAsync allocator{streamPtr};
|
|
for (auto dataType :
|
|
{nvinfer1::DataType::kFLOAT, nvinfer1::DataType::kHALF, nvinfer1::DataType::kBOOL, nvinfer1::DataType::kINT8,
|
|
nvinfer1::DataType::kINT32, nvinfer1::DataType::kINT64, nvinfer1::DataType::kUINT8})
|
|
{
|
|
DeviceTensor tensor{dims, dataType, allocator};
|
|
TLLM_CUDA_CHECK(cudaMemsetAsync(tensor.data(), 0, tensor.getSizeInBytes(), streamPtr->get()));
|
|
streamPtr->synchronize();
|
|
std::stringstream ss;
|
|
ss << tensor;
|
|
auto str = ss.str();
|
|
EXPECT_THAT(str, ::testing::HasSubstr(std::string("shape: ") + ITensor::toString(dims)));
|
|
EXPECT_THAT(str, ::testing::HasSubstr("i=15 j=7: (0, 0, 0, 0)"))
|
|
<< "dataType: " << static_cast<std::int32_t>(dataType);
|
|
}
|
|
streamPtr->synchronize();
|
|
}
|
|
|
|
namespace
|
|
{
|
|
template <typename T>
|
|
void testBufferType()
|
|
{
|
|
auto constexpr size = 1024;
|
|
HostAllocator allocator{};
|
|
BufferDataType constexpr dataType{TRTDataType<T>::value};
|
|
using limits = std::numeric_limits<T>;
|
|
static_assert(dataType.isPointer() || dataType.isUnsigned() != limits::is_signed);
|
|
static_assert(std::is_same_v<T,
|
|
typename CppDataType<dataType.getDataType(), dataType.isUnsigned(), dataType.isPointer()>::type>);
|
|
IBuffer::SharedPtr buffer{std::make_shared<HostBuffer>(size, dataType, allocator)};
|
|
auto bufferPtr = bufferCast<T>(*buffer);
|
|
auto constexpr max = limits::max();
|
|
bufferPtr[0] = max;
|
|
EXPECT_EQ(bufferPtr[0], max);
|
|
auto constexpr min = limits::min();
|
|
bufferPtr[size - 1] = min;
|
|
EXPECT_EQ(bufferPtr[size - 1], min);
|
|
EXPECT_EQ(buffer->data(size), bufferPtr + size);
|
|
}
|
|
} // namespace
|
|
|
|
TEST_F(TllmBuffersTest, ExtendedTypes)
|
|
{
|
|
testBufferType<bool>();
|
|
testBufferType<bool*>();
|
|
testBufferType<std::int8_t>();
|
|
testBufferType<std::int8_t*>();
|
|
testBufferType<std::uint8_t>();
|
|
testBufferType<std::uint8_t*>();
|
|
testBufferType<std::int32_t>();
|
|
testBufferType<std::int32_t*>();
|
|
testBufferType<std::uint32_t>();
|
|
testBufferType<std::uint32_t*>();
|
|
testBufferType<std::int64_t>();
|
|
testBufferType<std::int64_t*>();
|
|
testBufferType<std::uint64_t>();
|
|
testBufferType<std::uint64_t*>();
|
|
}
|
|
|
|
TEST_F(TllmBuffersTest, BytesToString)
|
|
{
|
|
auto constexpr precision = 2;
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 10) - 1, precision), "1023.00 B");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(1ul << 10, precision), "1.00 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 10) + (1ul << 9), precision), "1.50 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 20) - (1ul << 10), precision), "1023.00 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(1ul << 20, precision), "1.00 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 20) + (1ul << 19), precision), "1.50 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 30) - (1ul << 20), precision), "1023.00 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(1ul << 30, precision), "1.00 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 30) + (1ul << 29), precision), "1.50 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 40) - (1ul << 30), precision), "1023.00 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(1ul << 40, precision), "1.00 TB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString((1ul << 40) + (1ul << 39), precision), "1.50 TB");
|
|
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 10) + 1, precision), "-1023.00 B");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 10), precision), "-1.00 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 10) - (1l << 9), precision), "-1.50 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 20) + (1l << 10), precision), "-1023.00 KB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 20), precision), "-1.00 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 20) - (1l << 19), precision), "-1.50 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 30) + (1l << 20), precision), "-1023.00 MB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 30), precision), "-1.00 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 30) - (1l << 29), precision), "-1.50 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 40) + (1l << 30), precision), "-1023.00 GB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 40), precision), "-1.00 TB");
|
|
EXPECT_EQ(MemoryCounters::bytesToString(-(1l << 40) - (1l << 39), precision), "-1.50 TB");
|
|
}
|