blob: 938711c986dfa7fd1bd880840e5db009ad448163 [file] [log] [blame]
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/base/synchronization/one_writer_seqlock.h"
#include <stdlib.h>
#include <atomic>
#include "base/macros.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace device {
// Basic test to make sure that basic operation works correctly.
struct TestData {
// Data copies larger than a cache line.
uint32_t buffer[32];
};
class BasicSeqLockTestThread : public base::PlatformThread::Delegate {
public:
BasicSeqLockTestThread() = default;
void Init(OneWriterSeqLock* seqlock,
TestData* data,
std::atomic<int>* ready) {
seqlock_ = seqlock;
data_ = data;
ready_ = ready;
}
void ThreadMain() override {
while (!*ready_) {
base::PlatformThread::YieldCurrentThread();
}
for (unsigned i = 0; i < 1000; ++i) {
TestData copy;
base::subtle::Atomic32 version;
do {
version = seqlock_->ReadBegin();
OneWriterSeqLock::AtomicReaderMemcpy(&copy, data_, sizeof(TestData));
} while (seqlock_->ReadRetry(version));
for (unsigned j = 1; j < 32; ++j)
EXPECT_EQ(copy.buffer[j], copy.buffer[0] + copy.buffer[j - 1]);
}
--(*ready_);
}
private:
OneWriterSeqLock* seqlock_;
TestData* data_;
std::atomic<int>* ready_;
DISALLOW_COPY_AND_ASSIGN(BasicSeqLockTestThread);
};
class MaxRetriesSeqLockTestThread : public base::PlatformThread::Delegate {
public:
MaxRetriesSeqLockTestThread() = default;
void Init(OneWriterSeqLock* seqlock, std::atomic<int>* ready) {
seqlock_ = seqlock;
ready_ = ready;
}
void ThreadMain() override {
while (!*ready_) {
base::PlatformThread::YieldCurrentThread();
}
for (unsigned i = 0; i < 10; ++i) {
base::subtle::Atomic32 version;
version = seqlock_->ReadBegin(100);
EXPECT_NE(version & 1, 0);
}
--*ready_;
}
private:
OneWriterSeqLock* seqlock_;
std::atomic<int>* ready_;
DISALLOW_COPY_AND_ASSIGN(MaxRetriesSeqLockTestThread);
};
#if defined(OS_ANDROID)
#define MAYBE_ManyThreads FLAKY_ManyThreads
#else
#define MAYBE_ManyThreads ManyThreads
#endif
TEST(OneWriterSeqLockTest, MAYBE_ManyThreads) {
OneWriterSeqLock seqlock;
TestData data;
std::atomic<int> ready(0);
ANNOTATE_BENIGN_RACE_SIZED(&data, sizeof(data), "Racey reads are discarded");
static const unsigned kNumReaderThreads = 10;
BasicSeqLockTestThread threads[kNumReaderThreads];
base::PlatformThreadHandle handles[kNumReaderThreads];
for (uint32_t i = 0; i < kNumReaderThreads; ++i)
threads[i].Init(&seqlock, &data, &ready);
for (uint32_t i = 0; i < kNumReaderThreads; ++i)
ASSERT_TRUE(base::PlatformThread::Create(0, &threads[i], &handles[i]));
// The main thread is the writer, and the spawned are readers.
uint32_t counter = 0;
for (;;) {
TestData new_data;
new_data.buffer[0] = counter++;
for (unsigned i = 1; i < 32; ++i) {
new_data.buffer[i] = new_data.buffer[0] + new_data.buffer[i - 1];
}
seqlock.WriteBegin();
OneWriterSeqLock::AtomicWriterMemcpy(&data, &new_data, sizeof(TestData));
seqlock.WriteEnd();
if (counter == 1)
ready += kNumReaderThreads;
if (!ready)
break;
}
for (unsigned i = 0; i < kNumReaderThreads; ++i)
base::PlatformThread::Join(handles[i]);
}
TEST(OneWriterSeqLockTest, MaxRetries) {
OneWriterSeqLock seqlock;
std::atomic<int> ready(0);
static const unsigned kNumReaderThreads = 3;
MaxRetriesSeqLockTestThread threads[kNumReaderThreads];
base::PlatformThreadHandle handles[kNumReaderThreads];
for (uint32_t i = 0; i < kNumReaderThreads; ++i)
threads[i].Init(&seqlock, &ready);
for (uint32_t i = 0; i < kNumReaderThreads; ++i)
ASSERT_TRUE(base::PlatformThread::Create(0, &threads[i], &handles[i]));
// The main thread is the writer, and the spawned are readers.
seqlock.WriteBegin();
ready += kNumReaderThreads;
while (ready) {
base::PlatformThread::YieldCurrentThread();
}
seqlock.WriteEnd();
for (unsigned i = 0; i < kNumReaderThreads; ++i)
base::PlatformThread::Join(handles[i]);
}
} // namespace device