blob: 2120b6a2ceab0808d0924baf5953e780fb87f159 [file] [log] [blame]
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <atomic>
#include <memory>
#include <utility>
#include "base/atomic_ref_count.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
#include "base/synchronization/waitable_event.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/platform/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/heap/address_cache.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/heap_linked_stack.h"
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include "third_party/blink/renderer/platform/heap/heap_test_utilities.h"
#include "third_party/blink/renderer/platform/heap/marking_visitor.h"
#include "third_party/blink/renderer/platform/heap/self_keep_alive.h"
#include "third_party/blink/renderer/platform/heap/stack_frame_depth.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/heap/visitor.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
#include "third_party/blink/renderer/platform/testing/unit_test_helpers.h"
#include "third_party/blink/renderer/platform/wtf/hash_traits.h"
#include "third_party/blink/renderer/platform/wtf/linked_hash_set.h"
namespace blink {
namespace {
class IntWrapper : public GarbageCollectedFinalized<IntWrapper> {
public:
static IntWrapper* Create(int x) {
return MakeGarbageCollected<IntWrapper>(x);
}
virtual ~IntWrapper() {
destructor_calls_.fetch_add(1, std::memory_order_relaxed);
}
static std::atomic_int destructor_calls_;
void Trace(blink::Visitor* visitor) {}
int Value() const { return x_; }
bool operator==(const IntWrapper& other) const {
return other.Value() == Value();
}
unsigned GetHash() { return IntHash<int>::GetHash(x_); }
IntWrapper(int x) : x_(x) {}
private:
IntWrapper() = delete;
int x_;
};
std::atomic_int IntWrapper::destructor_calls_{0};
struct IntWrapperHash {
static unsigned GetHash(const IntWrapper& key) {
return WTF::HashInt(static_cast<uint32_t>(key.Value()));
}
static bool Equal(const IntWrapper& a, const IntWrapper& b) { return a == b; }
};
static_assert(WTF::IsTraceable<IntWrapper>::value,
"IsTraceable<> template failed to recognize trace method.");
static_assert(WTF::IsTraceable<HeapVector<IntWrapper>>::value,
"HeapVector<IntWrapper> must be traceable.");
static_assert(WTF::IsTraceable<HeapDeque<IntWrapper>>::value,
"HeapDeque<IntWrapper> must be traceable.");
static_assert(WTF::IsTraceable<HeapHashSet<IntWrapper, IntWrapperHash>>::value,
"HeapHashSet<IntWrapper> must be traceable.");
static_assert(WTF::IsTraceable<HeapHashMap<int, IntWrapper>>::value,
"HeapHashMap<int, IntWrapper> must be traceable.");
class KeyWithCopyingMoveConstructor final {
public:
struct Hash final {
STATIC_ONLY(Hash);
public:
static unsigned GetHash(const KeyWithCopyingMoveConstructor& key) {
return key.hash_;
}
static bool Equal(const KeyWithCopyingMoveConstructor& x,
const KeyWithCopyingMoveConstructor& y) {
return x.hash_ == y.hash_;
}
static constexpr bool safe_to_compare_to_empty_or_deleted = true;
};
KeyWithCopyingMoveConstructor() = default;
KeyWithCopyingMoveConstructor(WTF::HashTableDeletedValueType) : hash_(-1) {}
~KeyWithCopyingMoveConstructor() = default;
KeyWithCopyingMoveConstructor(unsigned hash, const String& string)
: hash_(hash), string_(string) {
DCHECK_NE(hash_, 0);
DCHECK_NE(hash_, -1);
}
KeyWithCopyingMoveConstructor(const KeyWithCopyingMoveConstructor&) = default;
// The move constructor delegates to the copy constructor intentionally.
KeyWithCopyingMoveConstructor(KeyWithCopyingMoveConstructor&& x)
: KeyWithCopyingMoveConstructor(x) {}
KeyWithCopyingMoveConstructor& operator=(
const KeyWithCopyingMoveConstructor&) = default;
bool operator==(const KeyWithCopyingMoveConstructor& x) const {
return hash_ == x.hash_;
}
bool IsHashTableDeletedValue() const { return hash_ == -1; }
private:
int hash_ = 0;
String string_;
};
struct SameSizeAsPersistent {
void* pointer_[4];
};
static_assert(sizeof(Persistent<IntWrapper>) <= sizeof(SameSizeAsPersistent),
"Persistent handle should stay small");
class ThreadMarker {
public:
ThreadMarker()
: creating_thread_(reinterpret_cast<ThreadState*>(0)), num_(0) {}
ThreadMarker(unsigned i)
: creating_thread_(ThreadState::Current()), num_(i) {}
ThreadMarker(WTF::HashTableDeletedValueType deleted)
: creating_thread_(reinterpret_cast<ThreadState*>(-1)), num_(0) {}
~ThreadMarker() {
EXPECT_TRUE((creating_thread_ == ThreadState::Current()) ||
(creating_thread_ == reinterpret_cast<ThreadState*>(0)) ||
(creating_thread_ == reinterpret_cast<ThreadState*>(-1)));
}
bool IsHashTableDeletedValue() const {
return creating_thread_ == reinterpret_cast<ThreadState*>(-1);
}
bool operator==(const ThreadMarker& other) const {
return other.creating_thread_ == creating_thread_ && other.num_ == num_;
}
ThreadState* creating_thread_;
unsigned num_;
};
struct ThreadMarkerHash {
static unsigned GetHash(const ThreadMarker& key) {
return static_cast<unsigned>(
reinterpret_cast<uintptr_t>(key.creating_thread_) + key.num_);
}
static bool Equal(const ThreadMarker& a, const ThreadMarker& b) {
return a == b;
}
static const bool safe_to_compare_to_empty_or_deleted = false;
};
typedef std::pair<Member<IntWrapper>, WeakMember<IntWrapper>> StrongWeakPair;
struct PairWithWeakHandling : public StrongWeakPair {
DISALLOW_NEW();
public:
// Regular constructor.
PairWithWeakHandling(IntWrapper* one, IntWrapper* two)
: StrongWeakPair(one, two) {
DCHECK(one); // We use null first field to indicate empty slots in the hash
// table.
}
// The HashTable (via the HashTrait) calls this constructor with a
// placement new to mark slots in the hash table as being deleted. We will
// never call trace or the destructor on these slots. We mark ourselves
// deleted
// with a pointer to -1 in the first field.
PairWithWeakHandling(WTF::HashTableDeletedValueType)
: StrongWeakPair(WTF::kHashTableDeletedValue, nullptr) {}
// Used by the HashTable (via the HashTrait) to skip deleted slots in the
// table. Recognizes objects that were 'constructed' using the above
// constructor.
bool IsHashTableDeletedValue() const {
return first.IsHashTableDeletedValue();
}
bool IsAlive() { return ThreadHeap::IsHeapObjectAlive(second); }
// Since we don't allocate independent objects of this type, we don't need
// a regular trace method. Instead, we use a traceInCollection method. If
// the entry should be deleted from the collection we return true and don't
// trace the strong pointer.
template <typename VisitorDispatcher>
bool TraceInCollection(VisitorDispatcher visitor,
WTF::WeakHandlingFlag weakness) {
HashTraits<WeakMember<IntWrapper>>::TraceInCollection(visitor, second,
weakness);
if (!ThreadHeap::IsHeapObjectAlive(second))
return true;
visitor->Trace(first);
return false;
}
// Incremental marking requires that these objects have a regular tracing
// method that is used for eagerly tracing through them in case they are
// in-place constructed in a container. In this case, we only care about
// strong fields.
void Trace(blink::Visitor* visitor) { visitor->Trace(first); }
};
template <typename T>
struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
// We want to treat the object as a weak object in the sense that it can
// disappear from hash sets and hash maps.
static const WTF::WeakHandlingFlag kWeakHandlingFlag = WTF::kWeakHandling;
// Normally whether or not an object needs tracing is inferred
// automatically from the presence of the trace method, but we don't
// necessarily have a trace method, and we may not need one because T
// can perhaps only be allocated inside collections, never as independent
// objects. Explicitly mark this as needing tracing and it will be traced
// in collections using the traceInCollection method, which it must have.
template <typename U = void>
struct IsTraceableInCollection {
static const bool value = true;
};
// The traceInCollection method traces differently depending on whether we
// are strongifying the trace operation. We strongify the trace operation
// when there are active iterators on the object. In this case all
// WeakMembers are marked like strong members so that elements do not
// suddenly disappear during iteration. Returns true if weak pointers to
// dead objects were found: In this case any strong pointers were not yet
// traced and the entry should be removed from the collection.
template <typename VisitorDispatcher>
static bool TraceInCollection(VisitorDispatcher visitor,
T& t,
WTF::WeakHandlingFlag weakness) {
return t.TraceInCollection(visitor, weakness);
}
static bool IsAlive(T& t) { return t.IsAlive(); }
};
} // namespace
} // namespace blink
namespace WTF {
template <typename T>
struct DefaultHash;
template <>
struct DefaultHash<blink::ThreadMarker> {
typedef blink::ThreadMarkerHash Hash;
};
// ThreadMarkerHash is the default hash for ThreadMarker
template <>
struct HashTraits<blink::ThreadMarker>
: GenericHashTraits<blink::ThreadMarker> {
static const bool kEmptyValueIsZero = true;
static void ConstructDeletedValue(blink::ThreadMarker& slot, bool) {
new (NotNull, &slot) blink::ThreadMarker(kHashTableDeletedValue);
}
static bool IsDeletedValue(const blink::ThreadMarker& slot) {
return slot.IsHashTableDeletedValue();
}
};
// The hash algorithm for our custom pair class is just the standard double
// hash for pairs. Note that this means you can't mutate either of the parts of
// the pair while they are in the hash table, as that would change their hash
// code and thus their preferred placement in the table.
template <>
struct DefaultHash<blink::PairWithWeakHandling> {
typedef PairHash<blink::Member<blink::IntWrapper>,
blink::WeakMember<blink::IntWrapper>>
Hash;
};
// Custom traits for the pair. These are weakness handling traits, which means
// PairWithWeakHandling must implement the traceInCollection method.
// In addition, these traits are concerned with the two magic values for the
// object, that represent empty and deleted slots in the hash table. The
// SimpleClassHashTraits allow empty slots in the table to be initialzed with
// memset to zero, and we use -1 in the first part of the pair to represent
// deleted slots.
template <>
struct HashTraits<blink::PairWithWeakHandling>
: blink::WeakHandlingHashTraits<blink::PairWithWeakHandling> {
static const bool kHasIsEmptyValueFunction = true;
static bool IsEmptyValue(const blink::PairWithWeakHandling& value) {
return !value.first;
}
static void ConstructDeletedValue(blink::PairWithWeakHandling& slot, bool) {
new (NotNull, &slot) blink::PairWithWeakHandling(kHashTableDeletedValue);
}
static bool IsDeletedValue(const blink::PairWithWeakHandling& value) {
return value.IsHashTableDeletedValue();
}
};
template <>
struct IsTraceable<blink::PairWithWeakHandling> {
static const bool value = IsTraceable<blink::StrongWeakPair>::value;
};
template <>
struct DefaultHash<blink::KeyWithCopyingMoveConstructor> {
using Hash = blink::KeyWithCopyingMoveConstructor::Hash;
};
template <>
struct HashTraits<blink::KeyWithCopyingMoveConstructor>
: public SimpleClassHashTraits<blink::KeyWithCopyingMoveConstructor> {};
} // namespace WTF
namespace blink {
class TestGCCollectGarbageScope {
public:
explicit TestGCCollectGarbageScope(BlinkGC::StackState state) {
DCHECK(ThreadState::Current()->CheckThread());
}
~TestGCCollectGarbageScope() { ThreadState::Current()->CompleteSweep(); }
};
class TestGCScope : public TestGCCollectGarbageScope {
public:
explicit TestGCScope(BlinkGC::StackState state)
: TestGCCollectGarbageScope(state),
atomic_pause_scope_(ThreadState::Current()) {
ThreadState::Current()->Heap().stats_collector()->NotifyMarkingStarted(
BlinkGC::GCReason::kTesting);
ThreadState::Current()->AtomicPausePrologue(state, BlinkGC::kAtomicMarking,
BlinkGC::GCReason::kPreciseGC);
}
~TestGCScope() {
ThreadState::Current()->MarkPhaseEpilogue(BlinkGC::kAtomicMarking);
ThreadState::Current()->AtomicPauseEpilogue(BlinkGC::kAtomicMarking,
BlinkGC::kEagerSweeping);
}
private:
ThreadState::AtomicPauseScope atomic_pause_scope_;
};
class SimpleObject : public GarbageCollected<SimpleObject> {
public:
static SimpleObject* Create() { return MakeGarbageCollected<SimpleObject>(); }
SimpleObject() = default;
void Trace(blink::Visitor* visitor) {}
char GetPayload(int i) { return payload[i]; }
// This virtual method is unused but it is here to make sure
// that this object has a vtable. This object is used
// as the super class for objects that also have garbage
// collected mixins and having a virtual here makes sure
// that adjustment is needed both for marking and for isAlive
// checks.
virtual void VirtualMethod() {}
protected:
char payload[64];
};
class HeapTestSuperClass
: public GarbageCollectedFinalized<HeapTestSuperClass> {
public:
static HeapTestSuperClass* Create() {
return MakeGarbageCollected<HeapTestSuperClass>();
}
HeapTestSuperClass() = default;
virtual ~HeapTestSuperClass() { ++destructor_calls_; }
static int destructor_calls_;
void Trace(blink::Visitor* visitor) {}
};
int HeapTestSuperClass::destructor_calls_ = 0;
class HeapTestOtherSuperClass {
public:
int payload;
};
static const size_t kClassMagic = 0xABCDDBCA;
class HeapTestSubClass : public HeapTestOtherSuperClass,
public HeapTestSuperClass {
public:
static HeapTestSubClass* Create() {
return MakeGarbageCollected<HeapTestSubClass>();
}
HeapTestSubClass() : magic_(kClassMagic) {}
~HeapTestSubClass() override {
EXPECT_EQ(kClassMagic, magic_);
++destructor_calls_;
}
static int destructor_calls_;
private:
const size_t magic_;
};
int HeapTestSubClass::destructor_calls_ = 0;
class HeapAllocatedArray : public GarbageCollected<HeapAllocatedArray> {
public:
HeapAllocatedArray() {
for (int i = 0; i < kArraySize; ++i) {
array_[i] = i % 128;
}
}
int8_t at(size_t i) { return array_[i]; }
void Trace(blink::Visitor* visitor) {}
private:
static const int kArraySize = 1000;
int8_t array_[kArraySize];
};
class OffHeapInt : public RefCounted<OffHeapInt> {
public:
static scoped_refptr<OffHeapInt> Create(int x) {
return base::AdoptRef(new OffHeapInt(x));
}
virtual ~OffHeapInt() { ++destructor_calls_; }
static int destructor_calls_;
int Value() const { return x_; }
bool operator==(const OffHeapInt& other) const {
return other.Value() == Value();
}
unsigned GetHash() { return IntHash<int>::GetHash(x_); }
void VoidFunction() {}
protected:
OffHeapInt(int x) : x_(x) {}
private:
OffHeapInt() = delete;
int x_;
};
int OffHeapInt::destructor_calls_ = 0;
class ThreadedTesterBase {
protected:
static void Test(ThreadedTesterBase* tester) {
std::unique_ptr<Thread> threads[kNumberOfThreads];
for (auto& thread : threads) {
thread = Platform::Current()->CreateThread(
ThreadCreationParams(WebThreadType::kTestThread)
.SetThreadNameForTest("blink gc testing thread"));
PostCrossThreadTask(
*thread->GetTaskRunner(), FROM_HERE,
CrossThreadBind(ThreadFunc, CrossThreadUnretained(tester)));
}
tester->done_.Wait();
delete tester;
}
virtual void RunThread() = 0;
protected:
static const int kNumberOfThreads = 10;
static const int kGcPerThread = 5;
static const int kNumberOfAllocations = 50;
virtual ~ThreadedTesterBase() = default;
inline bool Done() const {
return gc_count_.load(std::memory_order_acquire) >=
kNumberOfThreads * kGcPerThread;
}
std::atomic_int gc_count_{0};
private:
static void ThreadFunc(ThreadedTesterBase* tester) {
ThreadState::AttachCurrentThread();
tester->RunThread();
ThreadState::DetachCurrentThread();
if (!tester->threads_to_finish_.Decrement())
tester->done_.Signal();
}
base::AtomicRefCount threads_to_finish_{kNumberOfThreads};
base::WaitableEvent done_;
};
// Needed to give this variable a definition (the initializer above is only a
// declaration), so that subclasses can use it.
const int ThreadedTesterBase::kNumberOfThreads;
class ThreadedHeapTester : public ThreadedTesterBase {
public:
static void Test() { ThreadedTesterBase::Test(new ThreadedHeapTester); }
~ThreadedHeapTester() override {
// Verify that the threads cleared their CTPs when
// terminating, preventing access to a finalized heap.
for (auto& global_int_wrapper : cross_persistents_) {
DCHECK(global_int_wrapper.get());
EXPECT_FALSE(global_int_wrapper.get()->Get());
}
}
protected:
using GlobalIntWrapperPersistent = CrossThreadPersistent<IntWrapper>;
Mutex mutex_;
Vector<std::unique_ptr<GlobalIntWrapperPersistent>> cross_persistents_;
std::unique_ptr<GlobalIntWrapperPersistent> CreateGlobalPersistent(
int value) {
return std::make_unique<GlobalIntWrapperPersistent>(
IntWrapper::Create(value));
}
void AddGlobalPersistent() {
MutexLocker lock(mutex_);
cross_persistents_.push_back(CreateGlobalPersistent(0x2a2a2a2a));
}
void RunThread() override {
// Add a cross-thread persistent from this thread; the test object
// verifies that it will have been cleared out after the threads
// have all detached, running their termination GCs while doing so.
AddGlobalPersistent();
int gc_count = 0;
while (!Done()) {
{
Persistent<IntWrapper> wrapper;
std::unique_ptr<GlobalIntWrapperPersistent> global_persistent =
CreateGlobalPersistent(0x0ed0cabb);
for (int i = 0; i < kNumberOfAllocations; i++) {
wrapper = IntWrapper::Create(0x0bbac0de);
if (!(i % 10)) {
global_persistent = CreateGlobalPersistent(0x0ed0cabb);
}
test::YieldCurrentThread();
}
if (gc_count < kGcPerThread) {
PreciselyCollectGarbage();
gc_count++;
gc_count_.fetch_add(1, std::memory_order_release);
}
// Taking snapshot shouldn't have any bad side effect.
// TODO(haraken): This snapshot GC causes crashes, so disable
// it at the moment. Fix the crash and enable it.
// ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack,
// BlinkGC::TakeSnapshot, BlinkGC::ForcedGC);
PreciselyCollectGarbage();
EXPECT_EQ(wrapper->Value(), 0x0bbac0de);
EXPECT_EQ((*global_persistent)->Value(), 0x0ed0cabb);
}
test::YieldCurrentThread();
}
}
};
class ThreadedWeaknessTester : public ThreadedTesterBase {
public:
static void Test() { ThreadedTesterBase::Test(new ThreadedWeaknessTester); }
private:
void RunThread() override {
int gc_count = 0;
while (!Done()) {
{
Persistent<HeapHashMap<ThreadMarker, WeakMember<IntWrapper>>> weak_map =
MakeGarbageCollected<
HeapHashMap<ThreadMarker, WeakMember<IntWrapper>>>();
for (int i = 0; i < kNumberOfAllocations; i++) {
weak_map->insert(static_cast<unsigned>(i), IntWrapper::Create(0));
test::YieldCurrentThread();
}
if (gc_count < kGcPerThread) {
PreciselyCollectGarbage();
gc_count++;
gc_count_.fetch_add(1, std::memory_order_release);
}
// Taking snapshot shouldn't have any bad side effect.
// TODO(haraken): This snapshot GC causes crashes, so disable
// it at the moment. Fix the crash and enable it.
// ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack,
// BlinkGC::TakeSnapshot, BlinkGC::ForcedGC);
PreciselyCollectGarbage();
EXPECT_TRUE(weak_map->IsEmpty());
}
test::YieldCurrentThread();
}
}
};
class ThreadPersistentHeapTester : public ThreadedTesterBase {
public:
static void Test() {
ThreadedTesterBase::Test(new ThreadPersistentHeapTester);
}
protected:
class Local final : public GarbageCollected<Local> {
public:
Local() = default;
void Trace(blink::Visitor* visitor) {}
};
class PersistentChain;
class RefCountedChain : public RefCounted<RefCountedChain> {
public:
static RefCountedChain* Create(int count) {
return new RefCountedChain(count);
}
private:
explicit RefCountedChain(int count) {
if (count > 0) {
--count;
persistent_chain_ = PersistentChain::Create(count);
}
}
Persistent<PersistentChain> persistent_chain_;
};
class PersistentChain : public GarbageCollectedFinalized<PersistentChain> {
public:
static PersistentChain* Create(int count) {
return MakeGarbageCollected<PersistentChain>(count);
}
explicit PersistentChain(int count) {
ref_counted_chain_ = base::AdoptRef(RefCountedChain::Create(count));
}
void Trace(blink::Visitor* visitor) {}
private:
scoped_refptr<RefCountedChain> ref_counted_chain_;
};
void RunThread() override {
PersistentChain::Create(100);
// Upon thread detach, GCs will run until all persistents have been
// released. We verify that the draining of persistents proceeds
// as expected by dropping one Persistent<> per GC until there
// are none left.
}
};
// The accounting for memory includes the memory used by rounding up object
// sizes. This is done in a different way on 32 bit and 64 bit, so we have to
// have some slack in the tests.
template <typename T>
void CheckWithSlack(T expected, T actual, int slack) {
EXPECT_LE(expected, actual);
EXPECT_GE((intptr_t)expected + slack, (intptr_t)actual);
}
class TraceCounter : public GarbageCollectedFinalized<TraceCounter> {
public:
static TraceCounter* Create() { return MakeGarbageCollected<TraceCounter>(); }
TraceCounter() : trace_count_(0) {}
void Trace(blink::Visitor* visitor) { trace_count_++; }
int TraceCount() const { return trace_count_; }
private:
int trace_count_;
};
TEST(HeapTest, IsHeapObjectAliveForConstPointer) {
// See http://crbug.com/661363.
SimpleObject* object = SimpleObject::Create();
HeapObjectHeader* header = HeapObjectHeader::FromPayload(object);
header->Mark();
EXPECT_TRUE(ThreadHeap::IsHeapObjectAlive(object));
const SimpleObject* const_object = const_cast<const SimpleObject*>(object);
EXPECT_TRUE(ThreadHeap::IsHeapObjectAlive(const_object));
}
class ClassWithMember : public GarbageCollected<ClassWithMember> {
public:
static ClassWithMember* Create() {
return MakeGarbageCollected<ClassWithMember>();
}
ClassWithMember() : trace_counter_(TraceCounter::Create()) {}
void Trace(blink::Visitor* visitor) {
visitor->Trace(trace_counter_);
}
int TraceCount() const { return trace_counter_->TraceCount(); }
private:
Member<TraceCounter> trace_counter_;
};
class SimpleFinalizedObject
: public GarbageCollectedFinalized<SimpleFinalizedObject> {
public:
static SimpleFinalizedObject* Create() {
return MakeGarbageCollected<SimpleFinalizedObject>();
}
SimpleFinalizedObject() = default;
~SimpleFinalizedObject() { ++destructor_calls_; }
static int destructor_calls_;
void Trace(blink::Visitor* visitor) {}
};
int SimpleFinalizedObject::destructor_calls_ = 0;
class IntNode : public GarbageCollected<IntNode> {
public:
// IntNode is used to test typed heap allocation. Instead of
// redefining blink::Node to our test version, we keep it separate
// so as to avoid possible warnings about linker duplicates.
// Override operator new to allocate IntNode subtype objects onto
// the dedicated heap for blink::Node.
//
// TODO(haraken): untangling the heap unit tests from Blink would
// simplify and avoid running into this problem - http://crbug.com/425381
GC_PLUGIN_IGNORE("crbug.com/443854")
void* operator new(size_t size) {
ThreadState* state = ThreadState::Current();
const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(IntNode);
return state->Heap().AllocateOnArenaIndex(
state, size, BlinkGC::kNodeArenaIndex, GCInfoTrait<IntNode>::Index(),
type_name);
}
static IntNode* Create(int i) { return new IntNode(i); }
void Trace(blink::Visitor* visitor) {}
int Value() { return value_; }
private:
IntNode(int i) : value_(i) {}
int value_;
};
class Bar : public GarbageCollectedFinalized<Bar> {
public:
static Bar* Create() { return MakeGarbageCollected<Bar>(); }
Bar() : magic_(kMagic) { live_++; }
void FinalizeGarbageCollectedObject() {
EXPECT_TRUE(magic_ == kMagic);
magic_ = 0;
live_--;
}
bool HasBeenFinalized() const { return !magic_; }
virtual void Trace(blink::Visitor* visitor) {}
static unsigned live_;
protected:
static const int kMagic = 1337;
int magic_;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Bar);
unsigned Bar::live_ = 0;
class Baz : public GarbageCollected<Baz> {
public:
static Baz* Create(Bar* bar) { return MakeGarbageCollected<Baz>(bar); }
explicit Baz(Bar* bar) : bar_(bar) {}
void Trace(blink::Visitor* visitor) { visitor->Trace(bar_); }
void Clear() { bar_.Release(); }
// willFinalize is called by FinalizationObserver.
void WillFinalize() { EXPECT_TRUE(!bar_->HasBeenFinalized()); }
private:
Member<Bar> bar_;
};
class Foo : public Bar {
public:
static Foo* Create(Bar* bar) { return MakeGarbageCollected<Foo>(bar); }
static Foo* Create(Foo* foo) { return MakeGarbageCollected<Foo>(foo); }
Foo(Bar* bar) : Bar(), bar_(bar), points_to_foo_(false) {}
Foo(Foo* foo) : Bar(), bar_(foo), points_to_foo_(true) {}
void Trace(blink::Visitor* visitor) override {
if (points_to_foo_)
visitor->Trace(static_cast<Foo*>(bar_));
else
visitor->Trace(bar_);
}
private:
Bar* bar_;
bool points_to_foo_;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Foo);
class Bars : public Bar {
public:
static Bars* Create() { return MakeGarbageCollected<Bars>(); }
Bars() : width_(0) {
for (unsigned i = 0; i < kWidth; i++) {
bars_[i] = Bar::Create();
width_++;
}
}
void Trace(blink::Visitor* visitor) override {
for (unsigned i = 0; i < width_; i++)
visitor->Trace(bars_[i]);
}
unsigned GetWidth() const { return width_; }
static const unsigned kWidth = 7500;
private:
unsigned width_;
Member<Bar> bars_[kWidth];
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Bars);
class ConstructorAllocation : public GarbageCollected<ConstructorAllocation> {
public:
static ConstructorAllocation* Create() {
return MakeGarbageCollected<ConstructorAllocation>();
}
ConstructorAllocation() { int_wrapper_ = IntWrapper::Create(42); }
void Trace(blink::Visitor* visitor) { visitor->Trace(int_wrapper_); }
private:
Member<IntWrapper> int_wrapper_;
};
class LargeHeapObject : public GarbageCollectedFinalized<LargeHeapObject> {
public:
LargeHeapObject() { int_wrapper_ = IntWrapper::Create(23); }
~LargeHeapObject() { destructor_calls_++; }
static LargeHeapObject* Create() {
return MakeGarbageCollected<LargeHeapObject>();
}
char Get(size_t i) { return data_[i]; }
void Set(size_t i, char c) { data_[i] = c; }
size_t length() { return kLength; }
void Trace(blink::Visitor* visitor) { visitor->Trace(int_wrapper_); }
static int destructor_calls_;
private:
static const size_t kLength = 1024 * 1024;
Member<IntWrapper> int_wrapper_;
char data_[kLength];
};
int LargeHeapObject::destructor_calls_ = 0;
// This test class served a more important role while Blink
// was transitioned over to using Oilpan. That required classes
// that were hybrid, both ref-counted and on the Oilpan heap
// (the RefCountedGarbageCollected<> class providing just that.)
//
// There's no current need for having a ref-counted veneer on
// top of a GCed class, but we preserve it here to exercise the
// implementation technique that it used -- keeping an internal
// "keep alive" persistent reference that is set & cleared across
// ref-counting operations.
//
class RefCountedAndGarbageCollected
: public GarbageCollectedFinalized<RefCountedAndGarbageCollected> {
public:
static RefCountedAndGarbageCollected* Create() {
return MakeGarbageCollected<RefCountedAndGarbageCollected>();
}
RefCountedAndGarbageCollected() : ref_count_(0) {}
~RefCountedAndGarbageCollected() { ++destructor_calls_; }
void AddRef() {
if (UNLIKELY(!ref_count_)) {
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(
reinterpret_cast<Address>(this)));
#endif
keep_alive_ = this;
}
++ref_count_;
}
void Release() {
DCHECK_GT(ref_count_, 0);
if (!--ref_count_)
keep_alive_.Clear();
}
void Trace(blink::Visitor* visitor) {}
static int destructor_calls_;
private:
int ref_count_;
SelfKeepAlive<RefCountedAndGarbageCollected> keep_alive_;
};
int RefCountedAndGarbageCollected::destructor_calls_ = 0;
class RefCountedAndGarbageCollected2
: public HeapTestOtherSuperClass,
public GarbageCollectedFinalized<RefCountedAndGarbageCollected2> {
public:
static RefCountedAndGarbageCollected2* Create() {
return MakeGarbageCollected<RefCountedAndGarbageCollected2>();
}
RefCountedAndGarbageCollected2() : ref_count_(0) {}
~RefCountedAndGarbageCollected2() { ++destructor_calls_; }
void Ref() {
if (UNLIKELY(!ref_count_)) {
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(
reinterpret_cast<Address>(this)));
#endif
keep_alive_ = this;
}
++ref_count_;
}
void Deref() {
DCHECK_GT(ref_count_, 0);
if (!--ref_count_)
keep_alive_.Clear();
}
void Trace(blink::Visitor* visitor) {}
static int destructor_calls_;
private:
int ref_count_;
SelfKeepAlive<RefCountedAndGarbageCollected2> keep_alive_;
};
int RefCountedAndGarbageCollected2::destructor_calls_ = 0;
class Weak : public Bar {
public:
static Weak* Create(Bar* strong, Bar* weak) {
return MakeGarbageCollected<Weak>(strong, weak);
}
Weak(Bar* strong_bar, Bar* weak_bar)
: Bar(), strong_bar_(strong_bar), weak_bar_(weak_bar) {}
void Trace(blink::Visitor* visitor) override {
visitor->Trace(strong_bar_);
visitor->template RegisterWeakMembers<Weak, &Weak::ZapWeakMembers>(this);
}
void ZapWeakMembers(Visitor* visitor) {
if (!ThreadHeap::IsHeapObjectAlive(weak_bar_))
weak_bar_ = nullptr;
}
bool StrongIsThere() { return !!strong_bar_; }
bool WeakIsThere() { return !!weak_bar_; }
private:
Member<Bar> strong_bar_;
Bar* weak_bar_;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Weak);
class WithWeakMember : public Bar {
public:
static WithWeakMember* Create(Bar* strong, Bar* weak) {
return MakeGarbageCollected<WithWeakMember>(strong, weak);
}
WithWeakMember(Bar* strong_bar, Bar* weak_bar)
: Bar(), strong_bar_(strong_bar), weak_bar_(weak_bar) {}
void Trace(blink::Visitor* visitor) override {
visitor->Trace(strong_bar_);
visitor->Trace(weak_bar_);
}
bool StrongIsThere() { return !!strong_bar_; }
bool WeakIsThere() { return !!weak_bar_; }
private:
Member<Bar> strong_bar_;
WeakMember<Bar> weak_bar_;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(WithWeakMember);
class Observable : public GarbageCollectedFinalized<Observable> {
USING_PRE_FINALIZER(Observable, WillFinalize);
public:
static Observable* Create(Bar* bar) {
return MakeGarbageCollected<Observable>(bar);
}
explicit Observable(Bar* bar) : bar_(bar), was_destructed_(false) {}
~Observable() { was_destructed_ = true; }
void Trace(blink::Visitor* visitor) { visitor->Trace(bar_); }
// willFinalize is called by FinalizationObserver. willFinalize can touch
// other on-heap objects.
void WillFinalize() {
EXPECT_FALSE(was_destructed_);
EXPECT_FALSE(bar_->HasBeenFinalized());
will_finalize_was_called_ = true;
}
static bool will_finalize_was_called_;
private:
Member<Bar> bar_;
bool was_destructed_;
};
bool Observable::will_finalize_was_called_ = false;
class ObservableWithPreFinalizer
: public GarbageCollectedFinalized<ObservableWithPreFinalizer> {
USING_PRE_FINALIZER(ObservableWithPreFinalizer, Dispose);
public:
static ObservableWithPreFinalizer* Create() {
return MakeGarbageCollected<ObservableWithPreFinalizer>();
}
ObservableWithPreFinalizer() : was_destructed_(false) {}
~ObservableWithPreFinalizer() { was_destructed_ = true; }
void Trace(blink::Visitor* visitor) {}
void Dispose() {
EXPECT_FALSE(was_destructed_);
dispose_was_called_ = true;
}
static bool dispose_was_called_;
protected:
bool was_destructed_;
};
bool ObservableWithPreFinalizer::dispose_was_called_ = false;
bool g_dispose_was_called_for_pre_finalizer_base = false;
bool g_dispose_was_called_for_pre_finalizer_mixin = false;
bool g_dispose_was_called_for_pre_finalizer_sub_class = false;
class PreFinalizerBase : public GarbageCollectedFinalized<PreFinalizerBase> {
USING_PRE_FINALIZER(PreFinalizerBase, Dispose);
public:
static PreFinalizerBase* Create() {
return MakeGarbageCollected<PreFinalizerBase>();
}
PreFinalizerBase() : was_destructed_(false) {}
virtual ~PreFinalizerBase() { was_destructed_ = true; }
virtual void Trace(blink::Visitor* visitor) {}
void Dispose() {
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_base);
EXPECT_TRUE(g_dispose_was_called_for_pre_finalizer_sub_class);
EXPECT_TRUE(g_dispose_was_called_for_pre_finalizer_mixin);
EXPECT_FALSE(was_destructed_);
g_dispose_was_called_for_pre_finalizer_base = true;
}
protected:
bool was_destructed_;
};
class PreFinalizerMixin : public GarbageCollectedMixin {
USING_PRE_FINALIZER(PreFinalizerMixin, Dispose);
public:
~PreFinalizerMixin() { was_destructed_ = true; }
void Trace(blink::Visitor* visitor) override {}
void Dispose() {
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_base);
EXPECT_TRUE(g_dispose_was_called_for_pre_finalizer_sub_class);
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_mixin);
EXPECT_FALSE(was_destructed_);
g_dispose_was_called_for_pre_finalizer_mixin = true;
}
protected:
PreFinalizerMixin() : was_destructed_(false) {}
bool was_destructed_;
};
class PreFinalizerSubClass : public PreFinalizerBase, public PreFinalizerMixin {
USING_GARBAGE_COLLECTED_MIXIN(PreFinalizerSubClass);
USING_PRE_FINALIZER(PreFinalizerSubClass, Dispose);
public:
static PreFinalizerSubClass* Create() {
return MakeGarbageCollected<PreFinalizerSubClass>();
}
PreFinalizerSubClass() : was_destructed_(false) {}
~PreFinalizerSubClass() override { was_destructed_ = true; }
void Trace(blink::Visitor* visitor) override {}
void Dispose() {
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_base);
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_sub_class);
EXPECT_FALSE(g_dispose_was_called_for_pre_finalizer_mixin);
EXPECT_FALSE(was_destructed_);
g_dispose_was_called_for_pre_finalizer_sub_class = true;
}
protected:
bool was_destructed_;
};
template <typename T>
class FinalizationObserver : public GarbageCollected<FinalizationObserver<T>> {
public:
static FinalizationObserver* Create(T* data) {
return MakeGarbageCollected<FinalizationObserver>(data);
}
FinalizationObserver(T* data) : data_(data), did_call_will_finalize_(false) {}
bool DidCallWillFinalize() const { return did_call_will_finalize_; }
void Trace(blink::Visitor* visitor) {
visitor->template RegisterWeakMembers<
FinalizationObserver<T>, &FinalizationObserver<T>::ZapWeakMembers>(
this);
}
void ZapWeakMembers(Visitor* visitor) {
if (data_ && !ThreadHeap::IsHeapObjectAlive(data_)) {
data_->WillFinalize();
data_ = nullptr;
did_call_will_finalize_ = true;
}
}
private:
WeakMember<T> data_;
bool did_call_will_finalize_;
};
class FinalizationObserverWithHashMap {
public:
typedef HeapHashMap<WeakMember<Observable>,
std::unique_ptr<FinalizationObserverWithHashMap>>
ObserverMap;
explicit FinalizationObserverWithHashMap(Observable& target)
: target_(target) {}
~FinalizationObserverWithHashMap() {
target_.WillFinalize();
did_call_will_finalize_ = true;
}
static ObserverMap& Observe(Observable& target) {
ObserverMap& map = Observers();
ObserverMap::AddResult result = map.insert(&target, nullptr);
if (result.is_new_entry) {
result.stored_value->value =
std::make_unique<FinalizationObserverWithHashMap>(target);
} else {
DCHECK(result.stored_value->value);
}
return map;
}
static void ClearObservers() {
delete observer_map_;
observer_map_ = nullptr;
}
static bool did_call_will_finalize_;
private:
static ObserverMap& Observers() {
if (!observer_map_) {
observer_map_ =
new Persistent<ObserverMap>(MakeGarbageCollected<ObserverMap>());
}
return **observer_map_;
}
Observable& target_;
static Persistent<ObserverMap>* observer_map_;
};
bool FinalizationObserverWithHashMap::did_call_will_finalize_ = false;
Persistent<FinalizationObserverWithHashMap::ObserverMap>*
FinalizationObserverWithHashMap::observer_map_;
class SuperClass;
class PointsBack : public GarbageCollectedFinalized<PointsBack> {
public:
static PointsBack* Create() { return MakeGarbageCollected<PointsBack>(); }
PointsBack() : back_pointer_(nullptr) { ++alive_count_; }
~PointsBack() { --alive_count_; }
void SetBackPointer(SuperClass* back_pointer) {
back_pointer_ = back_pointer;
}
SuperClass* BackPointer() const { return back_pointer_; }
void Trace(blink::Visitor* visitor) { visitor->Trace(back_pointer_); }
static int alive_count_;
private:
WeakMember<SuperClass> back_pointer_;
};
int PointsBack::alive_count_ = 0;
class SuperClass : public GarbageCollectedFinalized<SuperClass> {
public:
static SuperClass* Create(PointsBack* points_back) {
return MakeGarbageCollected<SuperClass>(points_back);
}
explicit SuperClass(PointsBack* points_back) : points_back_(points_back) {
points_back_->SetBackPointer(this);
++alive_count_;
}
virtual ~SuperClass() { --alive_count_; }
void DoStuff(SuperClass* target,
PointsBack* points_back,
int super_class_count) {
ConservativelyCollectGarbage();
EXPECT_EQ(points_back, target->GetPointsBack());
EXPECT_EQ(super_class_count, SuperClass::alive_count_);
}
virtual void Trace(blink::Visitor* visitor) { visitor->Trace(points_back_); }
PointsBack* GetPointsBack() const { return points_back_.Get(); }
static int alive_count_;
private:
Member<PointsBack> points_back_;
};
int SuperClass::alive_count_ = 0;
class SubData : public GarbageCollectedFinalized<SubData> {
public:
SubData() { ++alive_count_; }
~SubData() { --alive_count_; }
void Trace(blink::Visitor* visitor) {}
static int alive_count_;
};
int SubData::alive_count_ = 0;
class SubClass : public SuperClass {
public:
static SubClass* Create(PointsBack* points_back) {
return MakeGarbageCollected<SubClass>(points_back);
}
explicit SubClass(PointsBack* points_back)
: SuperClass(points_back), data_(MakeGarbageCollected<SubData>()) {
++alive_count_;
}
~SubClass() override { --alive_count_; }
void Trace(blink::Visitor* visitor) override {
visitor->Trace(data_);
SuperClass::Trace(visitor);
}
static int alive_count_;
private:
Member<SubData> data_;
};
int SubClass::alive_count_ = 0;
class Mixin : public GarbageCollectedMixin {
public:
void Trace(blink::Visitor* visitor) override {}
virtual char GetPayload(int i) { return padding_[i]; }
protected:
int padding_[8];
};
class UseMixin : public SimpleObject, public Mixin {
USING_GARBAGE_COLLECTED_MIXIN(UseMixin)
public:
static UseMixin* Create() { return MakeGarbageCollected<UseMixin>(); }
UseMixin() {
// Verify that WTF::IsGarbageCollectedType<> works as expected for mixins.
static_assert(WTF::IsGarbageCollectedType<UseMixin>::value,
"IsGarbageCollectedType<> sanity check failed for GC mixin.");
trace_count_ = 0;
}
static int trace_count_;
void Trace(blink::Visitor* visitor) override {
SimpleObject::Trace(visitor);
Mixin::Trace(visitor);
++trace_count_;
}
};
int UseMixin::trace_count_ = 0;
class VectorObject {
DISALLOW_NEW();
public:
VectorObject() { value_ = SimpleFinalizedObject::Create(); }
void Trace(blink::Visitor* visitor) { visitor->Trace(value_); }
private:
Member<SimpleFinalizedObject> value_;
};
class VectorObjectInheritedTrace : public VectorObject {};
class VectorObjectNoTrace {
DISALLOW_NEW();
public:
VectorObjectNoTrace() { value_ = SimpleFinalizedObject::Create(); }
private:
Member<SimpleFinalizedObject> value_;
};
class TerminatedArrayItem {
DISALLOW_NEW();
public:
TerminatedArrayItem(IntWrapper* payload)
: payload_(payload), is_last_(false) {}
void Trace(blink::Visitor* visitor) { visitor->Trace(payload_); }
bool IsLastInArray() const { return is_last_; }
void SetLastInArray(bool value) { is_last_ = value; }
IntWrapper* Payload() const { return payload_; }
private:
Member<IntWrapper> payload_;
bool is_last_;
};
} // namespace blink
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::TerminatedArrayItem);
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::VectorObject);
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(
blink::VectorObjectInheritedTrace);
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::VectorObjectNoTrace);
namespace blink {
class OneKiloByteObject : public GarbageCollectedFinalized<OneKiloByteObject> {
public:
~OneKiloByteObject() { destructor_calls_++; }
char* Data() { return data_; }
void Trace(blink::Visitor* visitor) {}
static int destructor_calls_;
private:
static const size_t kLength = 1024;
char data_[kLength];
};
int OneKiloByteObject::destructor_calls_ = 0;
class DynamicallySizedObject : public GarbageCollected<DynamicallySizedObject> {
public:
static DynamicallySizedObject* Create(size_t size) {
void* slot = ThreadHeap::Allocate<DynamicallySizedObject>(size);
return new (slot) DynamicallySizedObject();
}
void* operator new(std::size_t, void* location) { return location; }
uint8_t Get(int i) { return *(reinterpret_cast<uint8_t*>(this) + i); }
void Trace(blink::Visitor* visitor) {}
private:
DynamicallySizedObject() = default;
};
class FinalizationAllocator
: public GarbageCollectedFinalized<FinalizationAllocator> {
public:
FinalizationAllocator(Persistent<IntWrapper>* wrapper) : wrapper_(wrapper) {}
~FinalizationAllocator() {
for (int i = 0; i < 10; ++i)
*wrapper_ = IntWrapper::Create(42);
for (int i = 0; i < 512; ++i)
MakeGarbageCollected<OneKiloByteObject>();
for (int i = 0; i < 32; ++i)
LargeHeapObject::Create();
}
void Trace(blink::Visitor* visitor) {}
private:
Persistent<IntWrapper>* wrapper_;
};
class PreFinalizationAllocator
: public GarbageCollectedFinalized<PreFinalizationAllocator> {
USING_PRE_FINALIZER(PreFinalizationAllocator, Dispose);
public:
PreFinalizationAllocator(Persistent<IntWrapper>* wrapper)
: wrapper_(wrapper) {}
void Dispose() {
for (int i = 0; i < 10; ++i)
*wrapper_ = IntWrapper::Create(42);
for (int i = 0; i < 512; ++i)
MakeGarbageCollected<OneKiloByteObject>();
for (int i = 0; i < 32; ++i)
LargeHeapObject::Create();
}
void Trace(blink::Visitor* visitor) {}
private:
Persistent<IntWrapper>* wrapper_;
};
class PreFinalizerBackingShrinkForbidden
: public GarbageCollectedFinalized<PreFinalizerBackingShrinkForbidden> {
USING_PRE_FINALIZER(PreFinalizerBackingShrinkForbidden, Dispose);
public:
PreFinalizerBackingShrinkForbidden() {
for (int i = 0; i < 32; ++i) {
vector_.push_back(MakeGarbageCollected<IntWrapper>(i));
}
EXPECT_LT(31ul, vector_.capacity());
for (int i = 0; i < 32; ++i) {
map_.insert(i + 1, MakeGarbageCollected<IntWrapper>(i + 1));
}
EXPECT_LT(31ul, map_.Capacity());
}
void Dispose() {
// Remove all elemets except one so that vector_ will try to shrink.
for (int i = 1; i < 32; ++i) {
vector_.pop_back();
}
// Check that vector_ hasn't shrunk.
EXPECT_LT(31ul, vector_.capacity());
// Just releasing the backing is allowed.
vector_.clear();
EXPECT_EQ(0ul, vector_.capacity());
// Remove elemets so that map_ will try to shrink.
for (int i = 0; i < 32; ++i) {
map_.erase(i + 1);
}
// Check that map_ hasn't shrunk.
EXPECT_LT(31ul, map_.Capacity());
// Just releasing the backing is allowed.
map_.clear();
EXPECT_EQ(0ul, map_.Capacity());
}
void Trace(blink::Visitor* visitor) {
visitor->Trace(vector_);
visitor->Trace(map_);
}
private:
HeapVector<Member<IntWrapper>> vector_;
HeapHashMap<int, Member<IntWrapper>> map_;
};
TEST(HeapTest, PreFinalizerBackingShrinkForbidden) {
MakeGarbageCollected<PreFinalizerBackingShrinkForbidden>();
PreciselyCollectGarbage();
}
class PreFinalizerVectorBackingExpandForbidden
: public GarbageCollectedFinalized<
PreFinalizerVectorBackingExpandForbidden> {
USING_PRE_FINALIZER(PreFinalizerVectorBackingExpandForbidden, Dispose);
public:
PreFinalizerVectorBackingExpandForbidden() {
vector_.push_back(MakeGarbageCollected<IntWrapper>(1));
}
void Dispose() { EXPECT_DEATH(Test(), ""); }
void Test() {
// vector_'s backing will need to expand.
for (int i = 0; i < 32; ++i) {
vector_.push_back(nullptr);
}
}
void Trace(blink::Visitor* visitor) { visitor->Trace(vector_); }
private:
HeapVector<Member<IntWrapper>> vector_;
};
TEST(HeapDeathTest, PreFinalizerVectorBackingExpandForbidden) {
MakeGarbageCollected<PreFinalizerVectorBackingExpandForbidden>();
PreciselyCollectGarbage();
}
class PreFinalizerHashTableBackingExpandForbidden
: public GarbageCollectedFinalized<
PreFinalizerHashTableBackingExpandForbidden> {
USING_PRE_FINALIZER(PreFinalizerHashTableBackingExpandForbidden, Dispose);
public:
PreFinalizerHashTableBackingExpandForbidden() {
map_.insert(123, MakeGarbageCollected<IntWrapper>(123));
}
void Dispose() { EXPECT_DEATH(Test(), ""); }
void Test() {
// map_'s backing will need to expand.
for (int i = 1; i < 32; ++i) {
map_.insert(i, nullptr);
}
}
void Trace(blink::Visitor* visitor) { visitor->Trace(map_); }
private:
HeapHashMap<int, Member<IntWrapper>> map_;
};
TEST(HeapDeathTest, PreFinalizerHashTableBackingExpandForbidden) {
MakeGarbageCollected<PreFinalizerHashTableBackingExpandForbidden>();
PreciselyCollectGarbage();
}
class LargeMixin : public GarbageCollected<LargeMixin>, public Mixin {
USING_GARBAGE_COLLECTED_MIXIN(LargeMixin);
private:
char data[65536];
};
TEST(HeapDeathTest, LargeGarbageCollectedMixin) {
EXPECT_DEATH(MakeGarbageCollected<LargeMixin>(), "");
}
TEST(HeapTest, Transition) {
{
RefCountedAndGarbageCollected::destructor_calls_ = 0;
Persistent<RefCountedAndGarbageCollected> ref_counted =
RefCountedAndGarbageCollected::Create();
PreciselyCollectGarbage();
EXPECT_EQ(0, RefCountedAndGarbageCollected::destructor_calls_);
}
PreciselyCollectGarbage();
EXPECT_EQ(1, RefCountedAndGarbageCollected::destructor_calls_);
RefCountedAndGarbageCollected::destructor_calls_ = 0;
Persistent<PointsBack> points_back1 = PointsBack::Create();
Persistent<PointsBack> points_back2 = PointsBack::Create();
Persistent<SuperClass> super_class = SuperClass::Create(points_back1);
Persistent<SubClass> sub_class = SubClass::Create(points_back2);
EXPECT_EQ(2, PointsBack::alive_count_);
EXPECT_EQ(2, SuperClass::alive_count_);
EXPECT_EQ(1, SubClass::alive_count_);
EXPECT_EQ(1, SubData::alive_count_);
PreciselyCollectGarbage();
EXPECT_EQ(0, RefCountedAndGarbageCollected::destructor_calls_);
EXPECT_EQ(2, PointsBack::alive_count_);
EXPECT_EQ(2, SuperClass::alive_count_);
EXPECT_EQ(1, SubClass::alive_count_);
EXPECT_EQ(1, SubData::alive_count_);
super_class->DoStuff(super_class.Release(), points_back1.Get(), 2);
PreciselyCollectGarbage();
EXPECT_EQ(2, PointsBack::alive_count_);
EXPECT_EQ(1, SuperClass::alive_count_);
EXPECT_EQ(1, SubClass::alive_count_);
EXPECT_EQ(1, SubData::alive_count_);
EXPECT_EQ(nullptr, points_back1->BackPointer());
points_back1.Release();
PreciselyCollectGarbage();
EXPECT_EQ(1, PointsBack::alive_count_);
EXPECT_EQ(1, SuperClass::alive_count_);
EXPECT_EQ(1, SubClass::alive_count_);
EXPECT_EQ(1, SubData::alive_count_);
sub_class->DoStuff(sub_class.Release(), points_back2.Get(), 1);
PreciselyCollectGarbage();
EXPECT_EQ(1, PointsBack::alive_count_);
EXPECT_EQ(0, SuperClass::alive_count_);
EXPECT_EQ(0, SubClass::alive_count_);
EXPECT_EQ(0, SubData::alive_count_);
EXPECT_EQ(nullptr, points_back2->BackPointer());
points_back2.Release();
PreciselyCollectGarbage();
EXPECT_EQ(0, PointsBack::alive_count_);
EXPECT_EQ(0, SuperClass::alive_count_);
EXPECT_EQ(0, SubClass::alive_count_);
EXPECT_EQ(0, SubData::alive_count_);
EXPECT_TRUE(super_class == sub_class);
}
TEST(HeapTest, Threading) {
ThreadedHeapTester::Test();
}
TEST(HeapTest, ThreadedWeakness) {
ThreadedWeaknessTester::Test();
}
TEST(HeapTest, ThreadPersistent) {
ThreadPersistentHeapTester::Test();
}
TEST(HeapTest, BasicFunctionality) {
ThreadHeap& heap = ThreadState::Current()->Heap();
ClearOutOldGarbage();
size_t initial_object_payload_size = heap.ObjectPayloadSizeForTesting();
{
wtf_size_t slack = 0;
// When the test starts there may already have been leaked some memory
// on the heap, so we establish a base line.
size_t base_level = initial_object_payload_size;
bool test_pages_allocated = !base_level;
if (test_pages_allocated)
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes());
// This allocates objects on the general heap which should add a page of
// memory.
DynamicallySizedObject* alloc32 = DynamicallySizedObject::Create(32);
slack += 4;
memset(alloc32, 40, 32);
DynamicallySizedObject* alloc64 = DynamicallySizedObject::Create(64);
slack += 4;
memset(alloc64, 27, 64);
size_t total = 96;
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(),
slack);
if (test_pages_allocated) {
EXPECT_EQ(kBlinkPageSize * 2,
heap.stats_collector()->allocated_space_bytes());
}
EXPECT_EQ(alloc32->Get(0), 40);
EXPECT_EQ(alloc32->Get(31), 40);
EXPECT_EQ(alloc64->Get(0), 27);
EXPECT_EQ(alloc64->Get(63), 27);
ConservativelyCollectGarbage();
EXPECT_EQ(alloc32->Get(0), 40);
EXPECT_EQ(alloc32->Get(31), 40);
EXPECT_EQ(alloc64->Get(0), 27);
EXPECT_EQ(alloc64->Get(63), 27);
}
ClearOutOldGarbage();
size_t total = 0;
wtf_size_t slack = 0;
size_t base_level = heap.ObjectPayloadSizeForTesting();
bool test_pages_allocated = !base_level;
if (test_pages_allocated)
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes());
size_t big = 1008;
Persistent<DynamicallySizedObject> big_area =
DynamicallySizedObject::Create(big);
total += big;
slack += 4;
size_t persistent_count = 0;
const size_t kNumPersistents = 100000;
Persistent<DynamicallySizedObject>* persistents[kNumPersistents];
for (int i = 0; i < 1000; i++) {
size_t size = 128 + i * 8;
total += size;
persistents[persistent_count++] = new Persistent<DynamicallySizedObject>(
DynamicallySizedObject::Create(size));
slack += 4;
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(),
slack);
if (test_pages_allocated) {
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes() &
(kBlinkPageSize - 1));
}
}
{
DynamicallySizedObject* alloc32b(DynamicallySizedObject::Create(32));
slack += 4;
memset(alloc32b, 40, 32);
DynamicallySizedObject* alloc64b(DynamicallySizedObject::Create(64));
slack += 4;
memset(alloc64b, 27, 64);
EXPECT_TRUE(alloc32b != alloc64b);
total += 96;
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(),
slack);
if (test_pages_allocated) {
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes() &
(kBlinkPageSize - 1));
}
}
ClearOutOldGarbage();
total -= 96;
slack -= 8;
if (test_pages_allocated) {
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes() &
(kBlinkPageSize - 1));
}
// Clear the persistent, so that the big area will be garbage collected.
big_area.Release();
ClearOutOldGarbage();
total -= big;
slack -= 4;
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(), slack);
if (test_pages_allocated) {
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes() &
(kBlinkPageSize - 1));
}
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(), slack);
if (test_pages_allocated) {
EXPECT_EQ(0ul, heap.stats_collector()->allocated_space_bytes() &
(kBlinkPageSize - 1));
}
for (size_t i = 0; i < persistent_count; i++) {
delete persistents[i];
persistents[i] = nullptr;
}
uint8_t* address = reinterpret_cast<uint8_t*>(
ThreadHeap::Allocate<DynamicallySizedObject>(100));
for (int i = 0; i < 100; i++)
address[i] = i;
address = reinterpret_cast<uint8_t*>(
ThreadHeap::Reallocate<DynamicallySizedObject>(address, 100000));
for (int i = 0; i < 100; i++)
EXPECT_EQ(address[i], i);
address = reinterpret_cast<uint8_t*>(
ThreadHeap::Reallocate<DynamicallySizedObject>(address, 50));
for (int i = 0; i < 50; i++)
EXPECT_EQ(address[i], i);
// This should be equivalent to free(address).
EXPECT_EQ(reinterpret_cast<uintptr_t>(
ThreadHeap::Reallocate<DynamicallySizedObject>(address, 0)),
0ul);
// This should be equivalent to malloc(0).
EXPECT_EQ(reinterpret_cast<uintptr_t>(
ThreadHeap::Reallocate<DynamicallySizedObject>(nullptr, 0)),
0ul);
}
TEST(HeapTest, SimpleAllocation) {
ThreadHeap& heap = ThreadState::Current()->Heap();
ClearOutOldGarbage();
EXPECT_EQ(0ul, heap.ObjectPayloadSizeForTesting());
// Allocate an object in the heap.
HeapAllocatedArray* array = MakeGarbageCollected<HeapAllocatedArray>();
EXPECT_TRUE(heap.ObjectPayloadSizeForTesting() >= sizeof(HeapAllocatedArray));
// Sanity check of the contents in the heap.
EXPECT_EQ(0, array->at(0));
EXPECT_EQ(42, array->at(42));
EXPECT_EQ(0, array->at(128));
EXPECT_EQ(999 % 128, array->at(999));
}
TEST(HeapTest, SimplePersistent) {
Persistent<TraceCounter> trace_counter = TraceCounter::Create();
EXPECT_EQ(0, trace_counter->TraceCount());
PreciselyCollectGarbage();
int saved_trace_count = trace_counter->TraceCount();
EXPECT_LT(0, saved_trace_count);
Persistent<ClassWithMember> class_with_member = ClassWithMember::Create();
EXPECT_EQ(0, class_with_member->TraceCount());
PreciselyCollectGarbage();
EXPECT_LT(0, class_with_member->TraceCount());
EXPECT_LT(saved_trace_count, trace_counter->TraceCount());
}
TEST(HeapTest, SimpleFinalization) {
ClearOutOldGarbage();
{
SimpleFinalizedObject::destructor_calls_ = 0;
Persistent<SimpleFinalizedObject> finalized =
SimpleFinalizedObject::Create();
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
PreciselyCollectGarbage();
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
}
PreciselyCollectGarbage();
EXPECT_EQ(1, SimpleFinalizedObject::destructor_calls_);
}
#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
TEST(HeapTest, FreelistReuse) {
ClearOutOldGarbage();
for (int i = 0; i < 100; i++)
MakeGarbageCollected<IntWrapper>(i);
IntWrapper* p1 = MakeGarbageCollected<IntWrapper>(100);
PreciselyCollectGarbage();
// In non-production builds, we delay reusing freed memory for at least
// one GC cycle.
for (int i = 0; i < 100; i++) {
IntWrapper* p2 = MakeGarbageCollected<IntWrapper>(i);
EXPECT_NE(p1, p2);
}
PreciselyCollectGarbage();
PreciselyCollectGarbage();
// Now the freed memory in the first GC should be reused.
bool reused_memory_found = false;
for (int i = 0; i < 10000; i++) {
IntWrapper* p2 = MakeGarbageCollected<IntWrapper>(i);
if (p1 == p2) {
reused_memory_found = true;
break;
}
}
EXPECT_TRUE(reused_memory_found);
}
#endif
TEST(HeapTest, LazySweepingPages) {
ClearOutOldGarbage();
SimpleFinalizedObject::destructor_calls_ = 0;
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
for (int i = 0; i < 1000; i++)
SimpleFinalizedObject::Create();
ThreadState::Current()->CollectGarbage(
BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping, BlinkGC::GCReason::kForcedGC);
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
for (int i = 0; i < 10000; i++)
SimpleFinalizedObject::Create();
EXPECT_EQ(1000, SimpleFinalizedObject::destructor_calls_);
PreciselyCollectGarbage();
EXPECT_EQ(11000, SimpleFinalizedObject::destructor_calls_);
}
TEST(HeapTest, LazySweepingLargeObjectPages) {
ClearOutOldGarbage();
// Create free lists that can be reused for IntWrappers created in
// LargeHeapObject::create().
Persistent<IntWrapper> p1 = MakeGarbageCollected<IntWrapper>(1);
for (int i = 0; i < 100; i++) {
MakeGarbageCollected<IntWrapper>(i);
}
Persistent<IntWrapper> p2 = MakeGarbageCollected<IntWrapper>(2);
PreciselyCollectGarbage();
PreciselyCollectGarbage();
LargeHeapObject::destructor_calls_ = 0;
EXPECT_EQ(0, LargeHeapObject::destructor_calls_);
for (int i = 0; i < 10; i++)
LargeHeapObject::Create();
ThreadState::Current()->CollectGarbage(
BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping, BlinkGC::GCReason::kForcedGC);
EXPECT_EQ(0, LargeHeapObject::destructor_calls_);
for (int i = 0; i < 10; i++) {
LargeHeapObject::Create();
EXPECT_EQ(i + 1, LargeHeapObject::destructor_calls_);
}
LargeHeapObject::Create();
LargeHeapObject::Create();
EXPECT_EQ(10, LargeHeapObject::destructor_calls_);
ThreadState::Current()->CollectGarbage(
BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping, BlinkGC::GCReason::kForcedGC);
EXPECT_EQ(10, LargeHeapObject::destructor_calls_);
PreciselyCollectGarbage();
EXPECT_EQ(22, LargeHeapObject::destructor_calls_);
}
class SimpleFinalizedEagerObjectBase
: public GarbageCollectedFinalized<SimpleFinalizedEagerObjectBase> {
public:
virtual ~SimpleFinalizedEagerObjectBase() = default;
void Trace(blink::Visitor* visitor) {}
EAGERLY_FINALIZE();
protected:
SimpleFinalizedEagerObjectBase() = default;
};
class SimpleFinalizedEagerObject : public SimpleFinalizedEagerObjectBase {
public:
static SimpleFinalizedEagerObject* Create() {
return MakeGarbageCollected<SimpleFinalizedEagerObject>();
}
SimpleFinalizedEagerObject() = default;
~SimpleFinalizedEagerObject() override { ++destructor_calls_; }
static int destructor_calls_;
};
template <typename T>
class ParameterizedButEmpty {
public:
EAGERLY_FINALIZE();
};
class SimpleFinalizedObjectInstanceOfTemplate final
: public GarbageCollectedFinalized<SimpleFinalizedObjectInstanceOfTemplate>,
public ParameterizedButEmpty<SimpleFinalizedObjectInstanceOfTemplate> {
public:
static SimpleFinalizedObjectInstanceOfTemplate* Create() {
return MakeGarbageCollected<SimpleFinalizedObjectInstanceOfTemplate>();
}
SimpleFinalizedObjectInstanceOfTemplate() = default;
~SimpleFinalizedObjectInstanceOfTemplate() { ++destructor_calls_; }
void Trace(blink::Visitor* visitor) {}
static int destructor_calls_;
};
int SimpleFinalizedEagerObject::destructor_calls_ = 0;
int SimpleFinalizedObjectInstanceOfTemplate::destructor_calls_ = 0;
TEST(HeapTest, EagerlySweepingPages) {
ClearOutOldGarbage();
SimpleFinalizedObject::destructor_calls_ = 0;
SimpleFinalizedEagerObject::destructor_calls_ = 0;
SimpleFinalizedObjectInstanceOfTemplate::destructor_calls_ = 0;
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
EXPECT_EQ(0, SimpleFinalizedEagerObject::destructor_calls_);
for (int i = 0; i < 1000; i++)
SimpleFinalizedObject::Create();
for (int i = 0; i < 100; i++)
SimpleFinalizedEagerObject::Create();
for (int i = 0; i < 100; i++)
SimpleFinalizedObjectInstanceOfTemplate::Create();
ThreadState::Current()->CollectGarbage(
BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping, BlinkGC::GCReason::kForcedGC);
EXPECT_EQ(0, SimpleFinalizedObject::destructor_calls_);
EXPECT_EQ(100, SimpleFinalizedEagerObject::destructor_calls_);
EXPECT_EQ(100, SimpleFinalizedObjectInstanceOfTemplate::destructor_calls_);
}
TEST(HeapTest, Finalization) {
{
HeapTestSubClass::destructor_calls_ = 0;
HeapTestSuperClass::destructor_calls_ = 0;
HeapTestSubClass* t1 = HeapTestSubClass::Create();
HeapTestSubClass* t2 = HeapTestSubClass::Create();
HeapTestSuperClass* t3 = HeapTestSuperClass::Create();
// FIXME(oilpan): Ignore unused variables.
(void)t1;
(void)t2;
(void)t3;
}
// Nothing is marked so the GC should free everything and call
// the finalizer on all three objects.
PreciselyCollectGarbage();
EXPECT_EQ(2, HeapTestSubClass::destructor_calls_);
EXPECT_EQ(3, HeapTestSuperClass::destructor_calls_);
// Destructors not called again when GCing again.
PreciselyCollectGarbage();
EXPECT_EQ(2, HeapTestSubClass::destructor_calls_);
EXPECT_EQ(3, HeapTestSuperClass::destructor_calls_);
}
TEST(HeapTest, TypedArenaSanity) {
// We use TraceCounter for allocating an object on the general heap.
Persistent<TraceCounter> general_heap_object = TraceCounter::Create();
Persistent<IntNode> typed_heap_object = IntNode::Create(0);
EXPECT_NE(PageFromObject(general_heap_object.Get()),
PageFromObject(typed_heap_object.Get()));
}
TEST(HeapTest, NoAllocation) {
ThreadState* state = ThreadState::Current();
EXPECT_TRUE(state->IsAllocationAllowed());
{
// Disallow allocation
ThreadState::NoAllocationScope no_allocation_scope(state);
EXPECT_FALSE(state->IsAllocationAllowed());
}
EXPECT_TRUE(state->IsAllocationAllowed());
}
TEST(HeapTest, Members) {
ClearOutOldGarbage();
Bar::live_ = 0;
{
Persistent<Baz> h1;
Persistent<Baz> h2;
{
h1 = Baz::Create(Bar::Create());
PreciselyCollectGarbage();
EXPECT_EQ(1u, Bar::live_);
h2 = Baz::Create(Bar::Create());
PreciselyCollectGarbage();
EXPECT_EQ(2u, Bar::live_);
}
PreciselyCollectGarbage();
EXPECT_EQ(2u, Bar::live_);
h1->Clear();
PreciselyCollectGarbage();
EXPECT_EQ(1u, Bar::live_);
}
PreciselyCollectGarbage();
EXPECT_EQ(0u, Bar::live_);
}
TEST(HeapTest, MarkTest) {
ClearOutOldGarbage();
{
Bar::live_ = 0;
Persistent<Bar> bar = Bar::Create();
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(bar));
#endif
EXPECT_EQ(1u, Bar::live_);
{
Foo* foo = Foo::Create(bar);
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(foo));
#endif
EXPECT_EQ(2u, Bar::live_);
EXPECT_TRUE(reinterpret_cast<Address>(foo) !=
reinterpret_cast<Address>(bar.Get()));
ConservativelyCollectGarbage();
EXPECT_TRUE(foo != bar); // To make sure foo is kept alive.
EXPECT_EQ(2u, Bar::live_);
}
PreciselyCollectGarbage();
EXPECT_EQ(1u, Bar::live_);
}
PreciselyCollectGarbage();
EXPECT_EQ(0u, Bar::live_);
}
TEST(HeapTest, DeepTest) {
ClearOutOldGarbage();
const unsigned kDepth = 100000;
Bar::live_ = 0;
{
Bar* bar = Bar::Create();
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(bar));
#endif
Foo* foo = Foo::Create(bar);
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(foo));
#endif
EXPECT_EQ(2u, Bar::live_);
for (unsigned i = 0; i < kDepth; i++) {
Foo* foo2 = Foo::Create(foo);
foo = foo2;
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(foo));
#endif
}
EXPECT_EQ(kDepth + 2, Bar::live_);
ConservativelyCollectGarbage();
EXPECT_TRUE(foo != bar); // To make sure foo and bar are kept alive.
EXPECT_EQ(kDepth + 2, Bar::live_);
}
PreciselyCollectGarbage();
EXPECT_EQ(0u, Bar::live_);
}
TEST(HeapTest, WideTest) {
ClearOutOldGarbage();
Bar::live_ = 0;
{
Bars* bars = Bars::Create();
unsigned width = Bars::kWidth;
EXPECT_EQ(width + 1, Bar::live_);
ConservativelyCollectGarbage();
EXPECT_EQ(width + 1, Bar::live_);
// Use bars here to make sure that it will be on the stack
// for the conservative stack scan to find.
EXPECT_EQ(width, bars->GetWidth());
}
EXPECT_EQ(Bars::kWidth + 1, Bar::live_);
PreciselyCollectGarbage();
EXPECT_EQ(0u, Bar::live_);
}
TEST(HeapTest, HashMapOfMembers) {
ClearOutOldGarbage();
ThreadHeap& heap = ThreadState::Current()->Heap();
IntWrapper::destructor_calls_ = 0;
size_t initial_object_payload_size = heap.ObjectPayloadSizeForTesting();
{
typedef HeapHashMap<Member<IntWrapper>, Member<IntWrapper>,
DefaultHash<Member<IntWrapper>>::Hash,
HashTraits<Member<IntWrapper>>,
HashTraits<Member<IntWrapper>>>
HeapObjectIdentityMap;
Persistent<HeapObjectIdentityMap> map =
MakeGarbageCollected<HeapObjectIdentityMap>();
map->clear();
size_t after_set_was_created = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(after_set_was_created > initial_object_payload_size);
PreciselyCollectGarbage();
size_t after_gc = heap.ObjectPayloadSizeForTesting();
EXPECT_EQ(after_gc, after_set_was_created);
// If the additions below cause garbage collections, these
// pointers should be found by conservative stack scanning.
IntWrapper* one(IntWrapper::Create(1));
IntWrapper* another_one(IntWrapper::Create(1));
map->insert(one, one);
size_t after_one_add = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(after_one_add > after_gc);
HeapObjectIdentityMap::iterator it(map->begin());
HeapObjectIdentityMap::iterator it2(map->begin());
++it;
++it2;
map->insert(another_one, one);
// The addition above can cause an allocation of a new
// backing store. We therefore garbage collect before
// taking the heap stats in order to get rid of the old
// backing store. We make sure to not use conservative
// stack scanning as that could find a pointer to the
// old backing.
PreciselyCollectGarbage();
size_t after_add_and_gc = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(after_add_and_gc >= after_one_add);
EXPECT_EQ(map->size(), 2u); // Two different wrappings of '1' are distinct.
PreciselyCollectGarbage();
EXPECT_TRUE(map->Contains(one));
EXPECT_TRUE(map->Contains(another_one));
IntWrapper* gotten(map->at(one));
EXPECT_EQ(gotten->Value(), one->Value());
EXPECT_EQ(gotten, one);
size_t after_gc2 = heap.ObjectPayloadSizeForTesting();
EXPECT_EQ(after_gc2, after_add_and_gc);
IntWrapper* dozen = nullptr;
for (int i = 1; i < 1000; i++) { // 999 iterations.
IntWrapper* i_wrapper(IntWrapper::Create(i));
IntWrapper* i_squared(IntWrapper::Create(i * i));
map->insert(i_wrapper, i_squared);
if (i == 12)
dozen = i_wrapper;
}
size_t after_adding1000 = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(after_adding1000 > after_gc2);
IntWrapper* gross(map->at(dozen));
EXPECT_EQ(gross->Value(), 144);
// This should clear out any junk backings created by all the adds.
PreciselyCollectGarbage();
size_t after_gc3 = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(after_gc3 <= after_adding1000);
}
PreciselyCollectGarbage();
// The objects 'one', anotherOne, and the 999 other pairs.
EXPECT_EQ(IntWrapper::destructor_calls_, 2000);
size_t after_gc4 = heap.ObjectPayloadSizeForTesting();
EXPECT_EQ(after_gc4, initial_object_payload_size);
}
TEST(HeapTest, NestedAllocation) {
ThreadHeap& heap = ThreadState::Current()->Heap();
ClearOutOldGarbage();
size_t initial_object_payload_size = heap.ObjectPayloadSizeForTesting();
{
Persistent<ConstructorAllocation> constructor_allocation =
ConstructorAllocation::Create();
}
ClearOutOldGarbage();
size_t after_free = heap.ObjectPayloadSizeForTesting();
EXPECT_TRUE(initial_object_payload_size == after_free);
}
TEST(HeapTest, LargeHeapObjects) {
ThreadHeap& heap = ThreadState::Current()->Heap();
ClearOutOldGarbage();
size_t initial_object_payload_size = heap.ObjectPayloadSizeForTesting();
size_t initial_allocated_space =
heap.stats_collector()->allocated_space_bytes();
IntWrapper::destructor_calls_ = 0;
LargeHeapObject::destructor_calls_ = 0;
{
int slack =
8; // LargeHeapObject points to an IntWrapper that is also allocated.
Persistent<LargeHeapObject> object = LargeHeapObject::Create();
#if DCHECK_IS_ON()
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(object));
DCHECK(ThreadState::Current()->Heap().FindPageFromAddress(
reinterpret_cast<char*>(object.Get()) + sizeof(LargeHeapObject) - 1));
#endif
ClearOutOldGarbage();
size_t after_allocation = heap.stats_collector()->allocated_space_bytes();
{
object->Set(0, 'a');
EXPECT_EQ('a', object->Get(0));
object->Set(object->length() - 1, 'b');
EXPECT_EQ('b', object->Get(object->length() - 1));
size_t expected_large_heap_object_payload_size =
ThreadHeap::AllocationSizeFromSize(sizeof(LargeHeapObject)) -
sizeof(HeapObjectHeader);
size_t expected_object_payload_size =
expected_large_heap_object_payload_size + sizeof(IntWrapper);
size_t actual_object_payload_size =
heap.ObjectPayloadSizeForTesting() - initial_object_payload_size;
CheckWithSlack(expected_object_payload_size, actual_object_payload_size,
slack);
// There is probably space for the IntWrapper in a heap page without
// allocating extra pages. However, the IntWrapper allocation might cause
// the addition of a heap page.
size_t large_object_allocation_size =
sizeof(LargeObjectPage) + expected_large_heap_object_payload_size;
size_t allocated_space_lower_bound =
initial_allocated_space + large_object_allocation_size;
size_t allocated_space_upper_bound =
allocated_space_lower_bound + slack + kBlinkPageSize;
EXPECT_LE(allocated_space_lower_bound, after_allocation);
EXPECT_LE(after_allocation, allocated_space_upper_bound);
EXPECT_EQ(0, IntWrapper::destructor_calls_);
EXPECT_EQ(0, LargeHeapObject::destructor_calls_);
for (int i = 0; i < 10; i++)
object = LargeHeapObject::Create();
}
ClearOutOldGarbage();
EXPECT_EQ(after_allocation,
heap.stats_collector()->allocated_space_bytes());
EXPECT_EQ(10, IntWrapper::destructor_calls_);
EXPECT_EQ(10, LargeHeapObject::destructor_calls_);
}
ClearOutOldGarbage();
EXPECT_TRUE(initial_object_payload_size ==
heap.ObjectPayloadSizeForTesting());
EXPECT_EQ(initial_allocated_space,
heap.stats_collector()->allocated_space_bytes());
EXPECT_EQ(11, IntWrapper::destructor_calls_);
EXPECT_EQ(11, LargeHeapObject::destructor_calls_);
PreciselyCollectGarbage();
}
// This test often fails on Android (https://crbug.com/843032).
// We run out of memory on Android devices because ReserveCapacityForSize
// actually allocates a much larger backing than specified (in this case 400MB).
#if defined(OS_ANDROID)
#define MAYBE_LargeHashMap DISABLED_LargeHashMap
#else
#define MAYBE_LargeHashMap LargeHashMap
#endif
TEST(HeapTest, MAYBE_LargeHashMap) {
ClearOutOldGarbage();
// Try to allocate a HashTable larger than kMaxHeapObjectSize
// (crbug.com/597953).
wtf_size_t size = kMaxHeapObjectSize /
sizeof(HeapHashMap<int, Member<IntWrapper>>::ValueType);
Persistent<HeapHashMap<int, Member<IntWrapper>>> map =
MakeGarbageCollected<HeapHashMap<int, Member<IntWrapper>>>();
map->ReserveCapacityForSize(size);
EXPECT_LE(size, map->Capacity());
}
TEST(HeapTest, LargeVector) {
ClearOutOldGarbage();
// Try to allocate a HeapVectors larger than kMaxHeapObjectSize
// (crbug.com/597953).
wtf_size_t size = kMaxHeapObjectSize / sizeof(int);
Persistent<HeapVector<int>> vector =
MakeGarbageCollected<HeapVector<int>>(size);
EXPECT_LE(size, vector->capacity());
}
typedef std::pair<Member<IntWrapper>, int> PairWrappedUnwrapped;
typedef std::pair<int, Member<IntWrapper>> PairUnwrappedWrapped;
typedef std::pair<WeakMember<IntWrapper>, Member<IntWrapper>> PairWeakStrong;
typedef std::pair<Member<IntWrapper>, WeakMember<IntWrapper>> PairStrongWeak;
typedef std::pair<WeakMember<IntWrapper>, int> PairWeakUnwrapped;
typedef std::pair<int, WeakMember<IntWrapper>> PairUnwrappedWeak;
class Container : public GarbageCollected<Container> {
public:
static Container* Create() { return MakeGarbageCollected<Container>(); }
HeapHashMap<Member<IntWrapper>, Member<IntWrapper>> map;
HeapHashSet<Member<IntWrapper>> set;
HeapHashSet<Member<IntWrapper>> set2;
HeapHashCountedSet<Member<IntWrapper>> set3;
HeapVector<Member<IntWrapper>, 2> vector;
HeapVector<PairWrappedUnwrapped, 2> vector_wu;
HeapVector<PairUnwrappedWrapped, 2> vector_uw;
HeapDeque<Member<IntWrapper>, 0> deque;
HeapDeque<PairWrappedUnwrapped, 0> deque_wu;
HeapDeque<PairUnwrappedWrapped, 0> deque_uw;
void Trace(blink::Visitor* visitor) {
visitor->Trace(map);
visitor->Trace(set);
visitor->Trace(set2);
visitor->Trace(set3);
visitor->Trace(vector);
visitor->Trace(vector_wu);
visitor->Trace(vector_uw);
visitor->Trace(deque);
visitor->Trace(deque_wu);
visitor->Trace(deque_uw);
}
};
struct NeedsTracingTrait {
explicit NeedsTracingTrait(IntWrapper* wrapper) : wrapper_(wrapper) {}
void Trace(blink::Visitor* visitor) { visitor->Trace(wrapper_); }
Member<IntWrapper> wrapper_;
};
TEST(HeapTest, HeapVectorFilledWithValue) {
IntWrapper* val = IntWrapper::Create(1);
HeapVector<Member<IntWrapper>> vector(10, val);
EXPECT_EQ(10u, vector.size());
for (wtf_size_t i = 0; i < vector.size(); i++)
EXPECT_EQ(val, vector[i]);
}
TEST(HeapTest, HeapVectorWithInlineCapacity) {
IntWrapper* one = IntWrapper::Create(1);
IntWrapper* two = IntWrapper::Create(2);
IntWrapper* three = IntWrapper::Create(3);
IntWrapper* four = IntWrapper::Create(4);
IntWrapper* five = IntWrapper::Create(5);
IntWrapper* six = IntWrapper::Create(6);
{
HeapVector<Member<IntWrapper>, 2> vector;
vector.push_back(one);
vector.push_back(two);
ConservativelyCollectGarbage();
EXPECT_TRUE(vector.Contains(one));
EXPECT_TRUE(vector.Contains(two));
vector.push_back(three);
vector.push_back(four);
ConservativelyCollectGarbage();
EXPECT_TRUE(vector.Contains(one));
EXPECT_TRUE(vector.Contains(two));
EXPECT_TRUE(vector.Contains(three));
EXPECT_TRUE(vector.Contains(four));
vector.Shrink(1);
ConservativelyCollectGarbage();
EXPECT_TRUE(vector.Contains(one));
EXPECT_FALSE(vector.Contains(two));
EXPECT_FALSE(vector.Contains(three));
EXPECT_FALSE(vector.Contains(four));
}
{
HeapVector<Member<IntWrapper>, 2> vector1;
HeapVector<Member<IntWrapper>, 2> vector2;
vector1.push_back(one);
vector2.push_back(two);
vector1.swap(vector2);
ConservativelyCollectGarbage();
EXPECT_TRUE(vector1.Contains(two));
EXPECT_TRUE(vector2.Contains(one));
}
{
HeapVector<Member<IntWrapper>, 2> vector1;
HeapVector<Member<IntWrapper>, 2> vector2;
vector1.push_back(one);
vector1.push_back(two);
vector2.push_back(three);
vector2.push_back(four);
vector2.push_back(five);
vector2.push_back(six);
vector1.swap(vector2);
ConservativelyCollectGarbage();
EXPECT_TRUE(vector1.Contains(three));
EXPECT_TRUE(vector1.Contains(four));
EXPECT_TRUE(vector1.Contains(five));
EXPECT_TRUE(vector1.Contains(six));
EXPECT_TRUE(vector2.Contains(one));
EXPECT_TRUE(vector2.Contains(two));
}
}
TEST(HeapTest, HeapVectorShrinkCapacity) {
ClearOutOldGarbage();
HeapVector<Member<IntWrapper>> vector1;
HeapVector<Member<IntWrapper>> vector2;
vector1.ReserveCapacity(96);
EXPECT_LE(96u, vector1.capacity());
vector1.Grow(vector1.capacity());
// Assumes none was allocated just after a vector backing of vector1.
vector1.Shrink(56);
vector1.ShrinkToFit();
EXPECT_GT(96u, vector1.capacity());
vector2.ReserveCapacity(20);
// Assumes another vector backing was allocated just after the vector
// backing of vector1.
vector1.Shrink(10);
vector1.ShrinkToFit();
EXPECT_GT(56u, vector1.capacity());
vector1.Grow(192);
EXPECT_LE(192u, vector1.capacity());
}
TEST(HeapTest, HeapVectorShrinkInlineCapacity) {
ClearOutOldGarbage();
const size_t kInlineCapacity = 64;
HeapVector<Member<IntWrapper>, kInlineCapacity> vector1;
vector1.ReserveCapacity(128);
EXPECT_LE(128u, vector1.capacity());
vector1.Grow(vector1.capacity());
// Shrink the external buffer.
vector1.Shrink(90);
vector1.ShrinkToFit();
EXPECT_GT(128u, vector1.capacity());
// TODO(sof): if the ASan support for 'contiguous containers' is enabled,
// Vector inline buffers are disabled; that constraint should be attempted
// removed, but until that time, disable testing handling of capacities
// of inline buffers.
#if !defined(ANNOTATE_CONTIGUOUS_CONTAINER)
// Shrinking switches the buffer from the external one to the inline one.
vector1.Shrink(kInlineCapacity - 1);
vector1.ShrinkToFit();
EXPECT_EQ(kInlineCapacity, vector1.capacity());
// Try to shrink the inline buffer.
vector1.Shrink(1);
vector1.ShrinkToFit();
EXPECT_EQ(kInlineCapacity, vector1.capacity());
#endif
}
TEST(HeapTest, HeapVectorOnStackLargeObjectPageSized) {
ClearOutOldGarbage();
// Try to allocate a vector of a size that will end exactly where the
// LargeObjectPage ends.
using Container = HeapVector<Member<IntWrapper>>;
Container vector;
wtf_size_t size =
(kLargeObjectSizeThreshold + kBlinkGuardPageSize -
static_cast<wtf_size_t>(LargeObjectPage::PageHeaderSize()) -
sizeof(HeapObjectHeader)) /
sizeof(Container::ValueType);
vector.ReserveCapacity(size);
for (unsigned i = 0; i < size; ++i)
vector.push_back(IntWrapper::Create(i));
ConservativelyCollectGarbage();
}
template <typename T, wtf_size_t inlineCapacity, typename U>
bool DequeContains(HeapDeque<T, inlineCapacity>& deque, U u) {
typedef typename HeapDeque<T, inlineCapacity>::iterator iterator;
for (iterator it = deque.begin(); it != deque.end(); ++it) {
if (*it == u)
return true;
}
return false;
}
TEST