blob: 178b29e1c8b7b67e6c15c2d4d1207595604d5c85 [file] [log] [blame]
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Tests for the Command Buffer Helper.
#include "gpu/command_buffer/client/transfer_buffer.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include "base/compiler_specific.h"
#include "base/memory/aligned_memory.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
using ::testing::AtMost;
using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::testing::StrictMock;
namespace gpu {
class TransferBufferTest : public testing::Test {
protected:
static const int32_t kNumCommandEntries = 400;
static const int32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
static const uint32_t kStartingOffset = 64;
static const uint32_t kAlignment = 4;
static const uint32_t kTransferBufferSize = 256;
TransferBufferTest()
: transfer_buffer_id_(0) {
}
void SetUp() override;
void TearDown() override;
virtual void Initialize() {
ASSERT_TRUE(transfer_buffer_->Initialize(
kTransferBufferSize, kStartingOffset, kTransferBufferSize,
kTransferBufferSize, kAlignment));
}
MockClientCommandBufferMockFlush* command_buffer() const {
return command_buffer_.get();
}
std::unique_ptr<MockClientCommandBufferMockFlush> command_buffer_;
std::unique_ptr<CommandBufferHelper> helper_;
std::unique_ptr<TransferBuffer> transfer_buffer_;
int32_t transfer_buffer_id_;
};
void TransferBufferTest::SetUp() {
command_buffer_ =
std::make_unique<StrictMock<MockClientCommandBufferMockFlush>>();
helper_ = std::make_unique<CommandBufferHelper>(command_buffer());
ASSERT_EQ(helper_->Initialize(kCommandBufferSizeBytes),
gpu::ContextResult::kSuccess);
transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
transfer_buffer_ = std::make_unique<TransferBuffer>(helper_.get());
}
void TransferBufferTest::TearDown() {
if (transfer_buffer_->HaveBuffer()) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
}
// For command buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(2));
transfer_buffer_.reset();
}
const int32_t TransferBufferTest::kNumCommandEntries;
const int32_t TransferBufferTest::kCommandBufferSizeBytes;
const uint32_t TransferBufferTest::kStartingOffset;
const uint32_t TransferBufferTest::kAlignment;
const uint32_t TransferBufferTest::kTransferBufferSize;
TEST_F(TransferBufferTest, Basic) {
Initialize();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
EXPECT_EQ(
kTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
}
TEST_F(TransferBufferTest, Free) {
Initialize();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// See that it gets reallocated.
EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// See that it gets reallocated.
EXPECT_TRUE(transfer_buffer_->AcquireResultBuffer() != nullptr);
transfer_buffer_->ReleaseResultBuffer();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// See that it gets reallocated.
base::span<uint8_t> data = transfer_buffer_->AllocUpTo(1);
EXPECT_FALSE(data.empty());
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
int32_t token = helper_->InsertToken();
int32_t put_offset = helper_->GetPutOffsetForTest();
transfer_buffer_->FreePendingToken(data.data(), token);
// Free buffer. Should cause an ordering barrier.
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// Free should not have caused a finish.
EXPECT_LT(command_buffer_->GetState().get_offset, put_offset);
// See that it gets reallocated.
transfer_buffer_->GetShmId();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
EXPECT_EQ(
kTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Test freeing twice.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
transfer_buffer_->Free();
}
TEST_F(TransferBufferTest, TooLargeAllocation) {
Initialize();
// Check that we can't allocate large than max size.
base::span<uint8_t> data = transfer_buffer_->Alloc(kTransferBufferSize + 1);
EXPECT_TRUE(data.empty());
// Check we if we try to allocate larger than max we get max.
data = transfer_buffer_->AllocUpTo(kTransferBufferSize + 1);
EXPECT_EQ(kTransferBufferSize - kStartingOffset, data.size());
transfer_buffer_->FreePendingToken(data.data(), 1);
}
TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) {
Initialize();
base::span<uint8_t> data = transfer_buffer_->Alloc(0);
EXPECT_TRUE(base::IsAligned(data.data(), kAlignment));
transfer_buffer_->FreePendingToken(data.data(), helper_->InsertToken());
// Check that the pointer is aligned on the following allocation.
data = transfer_buffer_->Alloc(4);
EXPECT_TRUE(base::IsAligned(data.data(), kAlignment));
transfer_buffer_->FreePendingToken(data.data(), helper_->InsertToken());
}
TEST_F(TransferBufferTest, ScopedTransferBuffer) {
Initialize();
ScopedTransferBufferPtr scoped_transfer_buffer(1, helper_.get(), transfer_buffer_.get());
EXPECT_EQ(scoped_transfer_buffer.size(), 1u);
uint8_t* c = static_cast<uint8_t*>(scoped_transfer_buffer.address());
EXPECT_FALSE(scoped_transfer_buffer.BelongsToBuffer(UNSAFE_TODO(c - 1)));
EXPECT_TRUE(scoped_transfer_buffer.BelongsToBuffer(c));
EXPECT_FALSE(scoped_transfer_buffer.BelongsToBuffer(UNSAFE_TODO(c + 1)));
}
class MockClientCommandBufferCanFail : public MockClientCommandBufferMockFlush {
public:
MockClientCommandBufferCanFail() = default;
~MockClientCommandBufferCanFail() override = default;
MOCK_METHOD4(CreateTransferBuffer,
scoped_refptr<Buffer>(uint32_t size,
int32_t* id,
uint32_t alignment,
TransferBufferAllocationOption option));
scoped_refptr<gpu::Buffer> RealCreateTransferBuffer(
uint32_t size,
int32_t* id,
uint32_t alignment,
TransferBufferAllocationOption option) {
return MockClientCommandBufferMockFlush::CreateTransferBuffer(
size, id, alignment, option);
}
};
class TransferBufferExpandContractTest : public testing::Test {
protected:
static const int32_t kNumCommandEntries = 400;
static const int32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
static const uint32_t kStartingOffset = 64;
static const uint32_t kAlignment = 4;
static const uint32_t kStartTransferBufferSize = 256;
static const uint32_t kMaxTransferBufferSize = 1024;
static const uint32_t kMinTransferBufferSize = 128;
TransferBufferExpandContractTest()
: transfer_buffer_id_(0) {
}
void SetUp() override;
void TearDown() override;
MockClientCommandBufferCanFail* command_buffer() const {
return command_buffer_.get();
}
std::unique_ptr<MockClientCommandBufferCanFail> command_buffer_;
std::unique_ptr<CommandBufferHelper> helper_;
std::unique_ptr<TransferBuffer> transfer_buffer_;
int32_t transfer_buffer_id_;
};
void TransferBufferExpandContractTest::SetUp() {
command_buffer_ =
std::make_unique<StrictMock<MockClientCommandBufferCanFail>>();
command_buffer_->SetTokenForSetGetBuffer(0);
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kCommandBufferSizeBytes, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
helper_ = std::make_unique<CommandBufferHelper>(command_buffer());
ASSERT_EQ(helper_->Initialize(kCommandBufferSizeBytes),
gpu::ContextResult::kSuccess);
transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kStartTransferBufferSize, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
transfer_buffer_ = std::make_unique<TransferBuffer>(helper_.get());
ASSERT_TRUE(transfer_buffer_->Initialize(
kStartTransferBufferSize, kStartingOffset, kMinTransferBufferSize,
kMaxTransferBufferSize, kAlignment));
}
void TransferBufferExpandContractTest::TearDown() {
if (transfer_buffer_->HaveBuffer()) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
}
// For command buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
transfer_buffer_.reset();
}
const int32_t TransferBufferExpandContractTest::kNumCommandEntries;
const int32_t TransferBufferExpandContractTest::kCommandBufferSizeBytes;
const uint32_t TransferBufferExpandContractTest::kStartingOffset;
const uint32_t TransferBufferExpandContractTest::kAlignment;
const uint32_t TransferBufferExpandContractTest::kStartTransferBufferSize;
const uint32_t TransferBufferExpandContractTest::kMaxTransferBufferSize;
const uint32_t TransferBufferExpandContractTest::kMinTransferBufferSize;
TEST_F(TransferBufferExpandContractTest, ExpandWithSmallAllocations) {
int32_t token = helper_->InsertToken();
EXPECT_FALSE(helper_->HasTokenPassed(token));
auto ExpectCreateTransferBuffer = [&](int size) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), CreateTransferBuffer(size, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
};
// Check it starts at starting size.
EXPECT_EQ(
kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Fill the free space.
base::span<uint8_t> data =
transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize());
transfer_buffer_->FreePendingToken(data.data(), token);
// Allocate one more byte to force expansion.
ExpectCreateTransferBuffer(kStartTransferBufferSize * 2);
data = transfer_buffer_->AllocUpTo(1);
EXPECT_EQ(1u, data.size());
transfer_buffer_->FreePendingToken(data.data(), token);
// Fill free space and expand again.
data = transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize());
transfer_buffer_->FreePendingToken(data.data(), token);
ExpectCreateTransferBuffer(kStartTransferBufferSize * 4);
data = transfer_buffer_->AllocUpTo(1);
EXPECT_EQ(1u, data.size());
transfer_buffer_->FreePendingToken(data.data(), token);
// Try to expand again, no expansion should occur because we are at max.
data = transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize());
transfer_buffer_->FreePendingToken(data.data(), token);
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
data = transfer_buffer_->AllocUpTo(1);
EXPECT_EQ(1u, data.size());
transfer_buffer_->FreePendingToken(data.data(), token);
EXPECT_EQ(kMaxTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
}
// Verify that expansion does not happen when there are blocks in use.
TEST_F(TransferBufferExpandContractTest, NoExpandWithInUseAllocation) {
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
int32_t token = helper_->InsertToken();
EXPECT_FALSE(helper_->HasTokenPassed(token));
// Check it starts at starting size.
EXPECT_EQ(kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Fill the free space in two blocks.
uint32_t block_size_1 = transfer_buffer_->GetFreeSize() / 2;
uint32_t block_size_2 = transfer_buffer_->GetFreeSize() - block_size_1;
base::span<uint8_t> block1 = transfer_buffer_->AllocUpTo(block_size_1);
EXPECT_EQ(block_size_1, block1.size());
base::span<uint8_t> block2 = transfer_buffer_->AllocUpTo(block_size_2);
EXPECT_EQ(block_size_2, block2.size());
transfer_buffer_->FreePendingToken(block1.data(), token);
// Expansion tries to happens when GetFreeSize() is not enough for the
// allocation.
EXPECT_EQ(0u, transfer_buffer_->GetFreeSize());
// Allocate one more byte to try to force expansion, however there are
// blocks in use, so this should not expand.
base::span<uint8_t> block3 = transfer_buffer_->AllocUpTo(1);
EXPECT_EQ(1u, block3.size());
transfer_buffer_->FreePendingToken(block3.data(), token);
transfer_buffer_->FreePendingToken(block2.data(), token);
// No reallocs should have occurred.
EXPECT_EQ(kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
}
TEST_F(TransferBufferExpandContractTest, ExpandWithLargeAllocations) {
int32_t token = helper_->InsertToken();
EXPECT_FALSE(helper_->HasTokenPassed(token));
auto ExpectCreateTransferBuffer = [&](int size) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), CreateTransferBuffer(size, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
};
// Check it starts at starting size.
EXPECT_EQ(kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Allocate one byte more than the free space to force expansion.
ExpectCreateTransferBuffer(kStartTransferBufferSize * 2);
base::span<uint8_t> data =
transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1);
transfer_buffer_->FreePendingToken(data.data(), token);
// Expand again.
ExpectCreateTransferBuffer(kStartTransferBufferSize * 4);
uint32_t size_requested = transfer_buffer_->GetFreeSize() + 1;
data = transfer_buffer_->AllocUpTo(size_requested);
EXPECT_EQ(size_requested, data.size());
transfer_buffer_->FreePendingToken(data.data(), token);
// Try to expand again, no expansion should occur because we are at max.
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
size_requested =
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc() + 1;
data = transfer_buffer_->AllocUpTo(size_requested);
EXPECT_LT(data.size(), size_requested);
transfer_buffer_->FreePendingToken(data.data(), token);
EXPECT_EQ(kMaxTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
}
TEST_F(TransferBufferExpandContractTest, ShrinkRingBuffer) {
int32_t token = helper_->InsertToken();
// For this test we want all allocations to be freed immediately.
command_buffer_->SetToken(token);
EXPECT_TRUE(helper_->HasTokenPassed(token));
auto ExpectCreateTransferBuffer = [&](int size) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), CreateTransferBuffer(size, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
};
// Expand the ring buffer to the maximum size.
ExpectCreateTransferBuffer(kMaxTransferBufferSize);
base::span<uint8_t> data =
transfer_buffer_->Alloc(kMaxTransferBufferSize - kStartingOffset);
EXPECT_FALSE(data.empty());
transfer_buffer_->FreePendingToken(data.data(), token);
// We shouldn't shrink before we reach the allocation threshold.
for (uint32_t allocated = kMaxTransferBufferSize - kStartingOffset;
allocated < (kStartTransferBufferSize + kStartingOffset) *
(TransferBuffer::kShrinkThreshold);) {
data = transfer_buffer_->Alloc(kStartTransferBufferSize);
EXPECT_FALSE(data.empty());
transfer_buffer_->FreePendingToken(data.data(), token);
allocated += kStartTransferBufferSize;
}
// The next allocation should trip the threshold and shrink.
ExpectCreateTransferBuffer(kStartTransferBufferSize * 2);
data = transfer_buffer_->Alloc(1);
EXPECT_FALSE(data.empty());
transfer_buffer_->FreePendingToken(data.data(), token);
}
TEST_F(TransferBufferExpandContractTest, Contract) {
// Check it starts at starting size.
EXPECT_EQ(
kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
// Try to allocate again, fail first request
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kStartTransferBufferSize, _, _, _))
.WillOnce(
DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kMinTransferBufferSize, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
const uint32_t kSize1 = 256 - kStartingOffset;
const uint32_t kSize2 = 128 - kStartingOffset;
base::span<uint8_t> data = transfer_buffer_->AllocUpTo(kSize1);
EXPECT_EQ(kSize2, data.size());
EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
transfer_buffer_->FreePendingToken(data.data(), 1);
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
// Try to allocate again,
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kMinTransferBufferSize, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
data = transfer_buffer_->AllocUpTo(kSize1);
EXPECT_EQ(kSize2, data.size());
EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
transfer_buffer_->FreePendingToken(data.data(), 1);
}
TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
// Try to allocate again, fail both requests.
EXPECT_CALL(*command_buffer(), CreateTransferBuffer(_, _, _, _))
.WillOnce(
DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
.WillOnce(
DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
.WillOnce(
DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
.RetiresOnSaturation();
const uint32_t kSize1 = 512 - kStartingOffset;
base::span<uint8_t> data = transfer_buffer_->AllocUpTo(kSize1);
ASSERT_TRUE(data.empty());
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
}
TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
// Free buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
// See that it gets reallocated.
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kStartTransferBufferSize, _, _, _))
.WillOnce(
Invoke(command_buffer(),
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
// Check it's the default size.
EXPECT_EQ(
kStartTransferBufferSize - kStartingOffset,
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
}
TEST_F(TransferBufferExpandContractTest, Shrink) {
uint32_t alloc_size = transfer_buffer_->GetFreeSize();
EXPECT_EQ(kStartTransferBufferSize - kStartingOffset, alloc_size);
base::span<uint8_t> data = transfer_buffer_->AllocUpTo(alloc_size);
ASSERT_FALSE(data.empty());
EXPECT_EQ(alloc_size, data.size());
EXPECT_GT(alloc_size, 0u);
EXPECT_EQ(0u, transfer_buffer_->GetFreeSize());
// Shrink once.
const uint32_t shrink_size1 = 64;
EXPECT_LT(shrink_size1, alloc_size);
transfer_buffer_->ShrinkLastBlock(shrink_size1 - kAlignment + 1);
EXPECT_EQ(alloc_size - shrink_size1, transfer_buffer_->GetFreeSize());
// Shrink again.
const uint32_t shrink_size2 = 32;
EXPECT_LT(shrink_size2, shrink_size1);
transfer_buffer_->ShrinkLastBlock(shrink_size2);
EXPECT_EQ(alloc_size - shrink_size2, transfer_buffer_->GetFreeSize());
// Shrink to zero (minimum size is kAlignment).
transfer_buffer_->ShrinkLastBlock(0);
EXPECT_EQ(alloc_size - kAlignment, transfer_buffer_->GetFreeSize());
transfer_buffer_->FreePendingToken(data.data(), 1);
}
TEST_F(TransferBufferTest, MultipleAllocsAndFrees) {
// An arbitrary size, but is aligned so no padding needed.
constexpr uint32_t kArbitrarySize = 16;
Initialize();
uint32_t original_free_size = transfer_buffer_->GetFreeSize();
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(), original_free_size);
base::span<uint8_t> data1 = transfer_buffer_->Alloc(kArbitrarySize);
EXPECT_FALSE(data1.empty());
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize);
base::span<uint8_t> data2 = transfer_buffer_->Alloc(kArbitrarySize);
EXPECT_FALSE(data2.empty());
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize * 2);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize * 2);
base::span<uint8_t> data3 = transfer_buffer_->Alloc(kArbitrarySize);
EXPECT_FALSE(data3.empty());
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize * 3);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize * 3);
// Generate tokens in order, but submit out of order.
auto token1 = helper_->InsertToken();
auto token2 = helper_->InsertToken();
auto token3 = helper_->InsertToken();
auto token4 = helper_->InsertToken();
// Freeing the final block here, is not perceivable because it's a hole.
transfer_buffer_->FreePendingToken(data3.data(), token3);
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize * 3);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize * 3);
// Freeing the first block here leaves the second plus a hole after, so
// perceived two blocks not free yet. The free size (no waiting) has not
// changed because the free_offset_ has not moved, but the fragmented free
// size gets bigger because in_use_offset_ has moved past the first block.
transfer_buffer_->FreePendingToken(data1.data(), token1);
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize * 3);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize * 2);
// Allocate a 4th block. This leaves the state as: Freed Used Freed Used
base::span<uint8_t> data4 = transfer_buffer_->Alloc(kArbitrarySize);
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(),
original_free_size - kArbitrarySize * 4);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(),
original_free_size - kArbitrarySize * 3);
// Freeing the second and fourth block makes everything free, so back to
// original size.
transfer_buffer_->FreePendingToken(data4.data(), token4);
transfer_buffer_->FreePendingToken(data2.data(), token2);
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFreeSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(), original_free_size);
}
#if defined(GTEST_HAS_DEATH_TEST) && DCHECK_IS_ON()
TEST_F(TransferBufferTest, ResizeDuringScopedResultPtr) {
Initialize();
ScopedResultPtr<int> ptr(transfer_buffer_.get());
// If an attempt is made to resize the transfer buffer while a result
// pointer exists, we should hit a CHECK. Allocate just enough to force a
// resize.
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1),
"outstanding_result_pointer_");
}
TEST_F(TransferBufferTest, AllocDuringScopedResultPtr) {
Initialize();
ScopedResultPtr<int> ptr(transfer_buffer_.get());
// If an attempt is made to allocate any amount in the transfer buffer while a
// result pointer exists, we should hit a DCHECK.
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1),
"outstanding_result_pointer_");
}
TEST_F(TransferBufferTest, TwoScopedResultPtrs) {
Initialize();
// Attempting to create two ScopedResultPtrs at the same time should DCHECK.
ScopedResultPtr<int> ptr(transfer_buffer_.get());
ASSERT_DEATH(ScopedResultPtr<int>(transfer_buffer_.get()),
"outstanding_result_pointer_");
}
#endif // defined(GTEST_HAS_DEATH_TEST) && DCHECK_IS_ON()
} // namespace gpu