blob: 62e5ef531351e65189600f8df82ee9064359ff8f [file] [log] [blame]
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "platform/ThreadSafeFunctional.h"
#include "platform/heap/Handle.h"
#include "platform/heap/Heap.h"
#include "platform/heap/HeapLinkedStack.h"
#include "platform/heap/HeapTerminatedArrayBuilder.h"
#include "platform/heap/SafePoint.h"
#include "platform/heap/SelfKeepAlive.h"
#include "platform/heap/ThreadState.h"
#include "platform/heap/Visitor.h"
#include "platform/testing/UnitTestHelpers.h"
#include "public/platform/Platform.h"
#include "public/platform/WebTaskRunner.h"
#include "public/platform/WebTraceLocation.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "wtf/HashTraits.h"
#include "wtf/LinkedHashSet.h"
#include "wtf/PtrUtil.h"
#include <memory>
namespace blink {
static void preciselyCollectGarbage()
{
ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::ForcedGC);
}
static void conservativelyCollectGarbage()
{
ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::ForcedGC);
}
class IntWrapper : public GarbageCollectedFinalized<IntWrapper> {
public:
static IntWrapper* create(int x)
{
return new IntWrapper(x);
}
virtual ~IntWrapper()
{
++s_destructorCalls;
}
static int s_destructorCalls;
DEFINE_INLINE_TRACE() { }
int value() const { return m_x; }
bool operator==(const IntWrapper& other) const { return other.value() == value(); }
unsigned hash() { return IntHash<int>::hash(m_x); }
IntWrapper(int x) : m_x(x) { }
private:
IntWrapper();
int m_x;
};
static_assert(WTF::IsTraceable<IntWrapper>::value, "IsTraceable<> template failed to recognize trace method.");
struct SameSizeAsPersistent {
void* m_pointer[4];
};
static_assert(sizeof(Persistent<IntWrapper>) <= sizeof(SameSizeAsPersistent), "Persistent handle should stay small");
class ThreadMarker {
public:
ThreadMarker() : m_creatingThread(reinterpret_cast<ThreadState*>(0)), m_num(0) { }
ThreadMarker(unsigned i) : m_creatingThread(ThreadState::current()), m_num(i) { }
ThreadMarker(WTF::HashTableDeletedValueType deleted) : m_creatingThread(reinterpret_cast<ThreadState*>(-1)), m_num(0) { }
~ThreadMarker()
{
EXPECT_TRUE((m_creatingThread == ThreadState::current())
|| (m_creatingThread == reinterpret_cast<ThreadState*>(0))
|| (m_creatingThread == reinterpret_cast<ThreadState*>(-1)));
}
bool isHashTableDeletedValue() const { return m_creatingThread == reinterpret_cast<ThreadState*>(-1); }
bool operator==(const ThreadMarker& other) const { return other.m_creatingThread == m_creatingThread && other.m_num == m_num; }
ThreadState* m_creatingThread;
unsigned m_num;
};
struct ThreadMarkerHash {
static unsigned hash(const ThreadMarker& key)
{
return static_cast<unsigned>(reinterpret_cast<uintptr_t>(key.m_creatingThread) + key.m_num);
}
static bool equal(const ThreadMarker& a, const ThreadMarker& b)
{
return a == b;
}
static const bool safeToCompareToEmptyOrDeleted = false;
};
typedef std::pair<Member<IntWrapper>, WeakMember<IntWrapper>> StrongWeakPair;
struct PairWithWeakHandling : public StrongWeakPair {
DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
public:
// Regular constructor.
PairWithWeakHandling(IntWrapper* one, IntWrapper* two)
: StrongWeakPair(one, two)
{
ASSERT(one); // We use null first field to indicate empty slots in the hash table.
}
// The HashTable (via the HashTrait) calls this constructor with a
// placement new to mark slots in the hash table as being deleted. We will
// never call trace or the destructor on these slots. We mark ourselves deleted
// with a pointer to -1 in the first field.
PairWithWeakHandling(WTF::HashTableDeletedValueType)
: StrongWeakPair(reinterpret_cast<IntWrapper*>(-1), nullptr)
{
}
// Used by the HashTable (via the HashTrait) to skip deleted slots in the
// table. Recognizes objects that were 'constructed' using the above
// constructor.
bool isHashTableDeletedValue() const { return first == reinterpret_cast<IntWrapper*>(-1); }
// Since we don't allocate independent objects of this type, we don't need
// a regular trace method. Instead, we use a traceInCollection method. If
// the entry should be deleted from the collection we return true and don't
// trace the strong pointer.
template<typename VisitorDispatcher>
bool traceInCollection(VisitorDispatcher visitor, WTF::ShouldWeakPointersBeMarkedStrongly strongify)
{
visitor->traceInCollection(second, strongify);
if (!ThreadHeap::isHeapObjectAlive(second))
return true;
// FIXME: traceInCollection is also called from WeakProcessing to check if the entry is dead.
// The below if avoids calling trace in that case by only calling trace when |first| is not yet marked.
if (!ThreadHeap::isHeapObjectAlive(first))
visitor->trace(first);
return false;
}
};
template<typename T> struct WeakHandlingHashTraits : WTF::SimpleClassHashTraits<T> {
// We want to treat the object as a weak object in the sense that it can
// disappear from hash sets and hash maps.
static const WTF::WeakHandlingFlag weakHandlingFlag = WTF::WeakHandlingInCollections;
// Normally whether or not an object needs tracing is inferred
// automatically from the presence of the trace method, but we don't
// necessarily have a trace method, and we may not need one because T
// can perhaps only be allocated inside collections, never as independent
// objects. Explicitly mark this as needing tracing and it will be traced
// in collections using the traceInCollection method, which it must have.
template<typename U = void> struct IsTraceableInCollection {
static const bool value = true;
};
// The traceInCollection method traces differently depending on whether we
// are strongifying the trace operation. We strongify the trace operation
// when there are active iterators on the object. In this case all
// WeakMembers are marked like strong members so that elements do not
// suddenly disappear during iteration. Returns true if weak pointers to
// dead objects were found: In this case any strong pointers were not yet
// traced and the entry should be removed from the collection.
template<typename VisitorDispatcher>
static bool traceInCollection(VisitorDispatcher visitor, T& t, WTF::ShouldWeakPointersBeMarkedStrongly strongify)
{
return t.traceInCollection(visitor, strongify);
}
};
} // namespace blink
namespace WTF {
template<typename T> struct DefaultHash;
template<> struct DefaultHash<blink::ThreadMarker> {
typedef blink::ThreadMarkerHash Hash;
};
// ThreadMarkerHash is the default hash for ThreadMarker
template<> struct HashTraits<blink::ThreadMarker> : GenericHashTraits<blink::ThreadMarker> {
static const bool emptyValueIsZero = true;
static void constructDeletedValue(blink::ThreadMarker& slot, bool) { new (NotNull, &slot) blink::ThreadMarker(HashTableDeletedValue); }
static bool isDeletedValue(const blink::ThreadMarker& slot) { return slot.isHashTableDeletedValue(); }
};
// The hash algorithm for our custom pair class is just the standard double
// hash for pairs. Note that this means you can't mutate either of the parts of
// the pair while they are in the hash table, as that would change their hash
// code and thus their preferred placement in the table.
template<> struct DefaultHash<blink::PairWithWeakHandling> {
typedef PairHash<blink::Member<blink::IntWrapper>, blink::WeakMember<blink::IntWrapper>> Hash;
};
// Custom traits for the pair. These are weakness handling traits, which means
// PairWithWeakHandling must implement the traceInCollection method.
// In addition, these traits are concerned with the two magic values for the
// object, that represent empty and deleted slots in the hash table. The
// SimpleClassHashTraits allow empty slots in the table to be initialzed with
// memset to zero, and we use -1 in the first part of the pair to represent
// deleted slots.
template<> struct HashTraits<blink::PairWithWeakHandling> : blink::WeakHandlingHashTraits<blink::PairWithWeakHandling> {
static const bool hasIsEmptyValueFunction = true;
static bool isEmptyValue(const blink::PairWithWeakHandling& value) { return !value.first; }
static void constructDeletedValue(blink::PairWithWeakHandling& slot, bool) { new (NotNull, &slot) blink::PairWithWeakHandling(HashTableDeletedValue); }
static bool isDeletedValue(const blink::PairWithWeakHandling& value) { return value.isHashTableDeletedValue(); }
};
template<>
struct IsTraceable<blink::PairWithWeakHandling> {
static const bool value = IsTraceable<blink::StrongWeakPair>::value;
};
} // namespace WTF
namespace blink {
class TestGCScope {
public:
explicit TestGCScope(BlinkGC::StackState state)
: m_state(ThreadState::current())
, m_safePointScope(state)
, m_parkedAllThreads(false)
{
ASSERT(m_state->checkThread());
if (LIKELY(m_state->heap().park())) {
m_state->heap().preGC();
m_parkedAllThreads = true;
}
}
bool allThreadsParked() { return m_parkedAllThreads; }
~TestGCScope()
{
// Only cleanup if we parked all threads in which case the GC happened
// and we need to resume the other threads.
if (LIKELY(m_parkedAllThreads)) {
m_state->heap().postGC(BlinkGC::GCWithSweep);
m_state->heap().resume();
}
}
private:
ThreadState* m_state;
SafePointScope m_safePointScope;
bool m_parkedAllThreads; // False if we fail to park all threads
};
#define DEFINE_VISITOR_METHODS(Type) \
void mark(const Type* object, TraceCallback callback) override \
{ \
if (object) \
m_count++; \
} \
bool isMarked(const Type*) override { return false; } \
bool ensureMarked(const Type* objectPointer) override \
{ \
return ensureMarked(objectPointer); \
}
class CountingVisitor : public Visitor {
public:
explicit CountingVisitor(ThreadState* state)
: Visitor(state, Visitor::ThreadLocalMarking)
, m_count(0)
{
}
void mark(const void* object, TraceCallback) override
{
if (object)
m_count++;
}
void markHeader(HeapObjectHeader* header, TraceCallback callback) override
{
ASSERT(header->payload());
m_count++;
}
void registerDelayedMarkNoTracing(const void*) override { }
void registerWeakMembers(const void*, const void*, WeakCallback) override { }
void registerWeakTable(const void*, EphemeronCallback, EphemeronCallback) override { }
#if ENABLE(ASSERT)
bool weakTableRegistered(const void*) override { return false; }
#endif
void registerWeakCellWithCallback(void**, WeakCallback) override { }
bool ensureMarked(const void* objectPointer) override
{
if (!objectPointer || HeapObjectHeader::fromPayload(objectPointer)->isMarked())
return false;
markNoTracing(objectPointer);
return true;
}
size_t count() { return m_count; }
void reset() { m_count = 0; }
private:
StackFrameDepthScope m_scope;
size_t m_count;
};
#undef DEFINE_VISITOR_METHODS
class SimpleObject : public GarbageCollected<SimpleObject> {
public:
static SimpleObject* create() { return new SimpleObject(); }
DEFINE_INLINE_TRACE() { }
char getPayload(int i) { return payload[i]; }
// This virtual method is unused but it is here to make sure
// that this object has a vtable. This object is used
// as the super class for objects that also have garbage
// collected mixins and having a virtual here makes sure
// that adjustment is needed both for marking and for isAlive
// checks.
virtual void virtualMethod() { }
protected:
SimpleObject() { }
char payload[64];
};
class HeapTestSuperClass : public GarbageCollectedFinalized<HeapTestSuperClass> {
public:
static HeapTestSuperClass* create()
{
return new HeapTestSuperClass();
}
virtual ~HeapTestSuperClass()
{
++s_destructorCalls;
}
static int s_destructorCalls;
DEFINE_INLINE_TRACE() { }
protected:
HeapTestSuperClass() { }
};
int HeapTestSuperClass::s_destructorCalls = 0;
class HeapTestOtherSuperClass {
public:
int payload;
};
static const size_t classMagic = 0xABCDDBCA;
class HeapTestSubClass : public HeapTestOtherSuperClass, public HeapTestSuperClass {
public:
static HeapTestSubClass* create()
{
return new HeapTestSubClass();
}
~HeapTestSubClass() override
{
EXPECT_EQ(classMagic, m_magic);
++s_destructorCalls;
}
static int s_destructorCalls;
private:
HeapTestSubClass() : m_magic(classMagic) { }
const size_t m_magic;
};
int HeapTestSubClass::s_destructorCalls = 0;
class HeapAllocatedArray : public GarbageCollected<HeapAllocatedArray> {
public:
HeapAllocatedArray()
{
for (int i = 0; i < s_arraySize; ++i) {
m_array[i] = i % 128;
}
}
int8_t at(size_t i) { return m_array[i]; }
DEFINE_INLINE_TRACE() { }
private:
static const int s_arraySize = 1000;
int8_t m_array[s_arraySize];
};
// Do several GCs to make sure that later GCs don't free up old memory from
// previously run tests in this process.
static void clearOutOldGarbage()
{
ThreadHeap& heap = ThreadState::current()->heap();
while (true) {
size_t used = heap.objectPayloadSizeForTesting();
preciselyCollectGarbage();
if (heap.objectPayloadSizeForTesting() >= used)
break;
}
}
class OffHeapInt : public RefCounted<OffHeapInt> {
public:
static RefPtr<OffHeapInt> create(int x)
{
return adoptRef(new OffHeapInt(x));
}
virtual ~OffHeapInt()
{
++s_destructorCalls;
}
static int s_destructorCalls;
int value() const { return m_x; }
bool operator==(const OffHeapInt& other) const { return other.value() == value(); }
unsigned hash() { return IntHash<int>::hash(m_x); }
void voidFunction() { }
protected:
OffHeapInt(int x) : m_x(x) { }
private:
OffHeapInt();
int m_x;
};
int IntWrapper::s_destructorCalls = 0;
int OffHeapInt::s_destructorCalls = 0;
class ThreadedTesterBase {
protected:
static void test(ThreadedTesterBase* tester)
{
Vector<std::unique_ptr<WebThread>, numberOfThreads> m_threads;
for (int i = 0; i < numberOfThreads; i++) {
m_threads.append(wrapUnique(Platform::current()->createThread("blink gc testing thread")));
m_threads.last()->getWebTaskRunner()->postTask(BLINK_FROM_HERE, threadSafeBind(threadFunc, AllowCrossThreadAccess(tester)));
}
while (tester->m_threadsToFinish) {
SafePointScope scope(BlinkGC::NoHeapPointersOnStack);
testing::yieldCurrentThread();
}
delete tester;
}
virtual void runThread() = 0;
protected:
static const int numberOfThreads = 10;
static const int gcPerThread = 5;
static const int numberOfAllocations = 50;
ThreadedTesterBase() : m_gcCount(0), m_threadsToFinish(numberOfThreads)
{
}
virtual ~ThreadedTesterBase()
{
}
inline bool done() const { return m_gcCount >= numberOfThreads * gcPerThread; }
volatile int m_gcCount;
volatile int m_threadsToFinish;
private:
static void threadFunc(void* data)
{
reinterpret_cast<ThreadedTesterBase*>(data)->runThread();
}
};
// Needed to give this variable a definition (the initializer above is only a
// declaration), so that subclasses can use it.
const int ThreadedTesterBase::numberOfThreads;
class ThreadedHeapTester : public ThreadedTesterBase {
public:
static void test()
{
ThreadedTesterBase::test(new ThreadedHeapTester);
}
~ThreadedHeapTester() override
{
// Verify that the threads cleared their CTPs when
// terminating, preventing access to a finalized heap.
for (auto& globalIntWrapper : m_crossPersistents) {
ASSERT(globalIntWrapper.get());
EXPECT_FALSE(globalIntWrapper.get()->get());
}
}
protected:
using GlobalIntWrapperPersistent = CrossThreadPersistent<IntWrapper>;
Mutex m_mutex;
Vector<std::unique_ptr<GlobalIntWrapperPersistent>> m_crossPersistents;
std::unique_ptr<GlobalIntWrapperPersistent> createGlobalPersistent(int value)
{
return wrapUnique(new GlobalIntWrapperPersistent(IntWrapper::create(value)));
}
void addGlobalPersistent()
{
MutexLocker lock(m_mutex);
m_crossPersistents.append(createGlobalPersistent(0x2a2a2a2a));
}
void runThread() override
{
ThreadState::attachCurrentThread(false);
// Add a cross-thread persistent from this thread; the test object
// verifies that it will have been cleared out after the threads
// have all detached, running their termination GCs while doing so.
addGlobalPersistent();
int gcCount = 0;
while (!done()) {
ThreadState::current()->safePoint(BlinkGC::NoHeapPointersOnStack);
{
Persistent<IntWrapper> wrapper;
std::unique_ptr<GlobalIntWrapperPersistent> globalPersistent = createGlobalPersistent(0x0ed0cabb);
for (int i = 0; i < numberOfAllocations; i++) {
wrapper = IntWrapper::create(0x0bbac0de);
if (!(i % 10)) {
globalPersistent = createGlobalPersistent(0x0ed0cabb);
}
SafePointScope scope(BlinkGC::NoHeapPointersOnStack);
testing::yieldCurrentThread();
}
if (gcCount < gcPerThread) {
preciselyCollectGarbage();
gcCount++;
atomicIncrement(&m_gcCount);
}
// Taking snapshot shouldn't have any bad side effect.
// TODO(haraken): This snapshot GC causes crashes, so disable
// it at the moment. Fix the crash and enable it.
// ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::TakeSnapshot, BlinkGC::ForcedGC);
preciselyCollectGarbage();
EXPECT_EQ(wrapper->value(), 0x0bbac0de);
EXPECT_EQ((*globalPersistent)->value(), 0x0ed0cabb);
}
SafePointScope scope(BlinkGC::NoHeapPointersOnStack);
testing::yieldCurrentThread();
}
ThreadState::detachCurrentThread();
atomicDecrement(&m_threadsToFinish);
}
};
class ThreadedWeaknessTester : public ThreadedTesterBase {
public:
static void test()
{
ThreadedTesterBase::test(new ThreadedWeaknessTester);
}
private:
void runThread() override
{
ThreadState::attachCurrentThread(false);
int gcCount = 0;
while (!done()) {
ThreadState::current()->safePoint(BlinkGC::NoHeapPointersOnStack);
{
Persistent<HeapHashMap<ThreadMarker, WeakMember<IntWrapper>>> weakMap = new HeapHashMap<ThreadMarker, WeakMember<IntWrapper>>;
PersistentHeapHashMap<ThreadMarker, WeakMember<IntWrapper>> weakMap2;
for (int i = 0; i < numberOfAllocations; i++) {
weakMap->add(static_cast<unsigned>(i), IntWrapper::create(0));
weakMap2.add(static_cast<unsigned>(i), IntWrapper::create(0));
SafePointScope scope(BlinkGC::NoHeapPointersOnStack);
testing::yieldCurrentThread();
}
if (gcCount < gcPerThread) {
preciselyCollectGarbage();
gcCount++;
atomicIncrement(&m_gcCount);
}
// Taking snapshot shouldn't have any bad side effect.
// TODO(haraken): This snapshot GC causes crashes, so disable
// it at the moment. Fix the crash and enable it.
// ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::TakeSnapshot, BlinkGC::ForcedGC);
preciselyCollectGarbage();
EXPECT_TRUE(weakMap->isEmpty());
EXPECT_TRUE(weakMap2.isEmpty());
}
SafePointScope scope(BlinkGC::NoHeapPointersOnStack);
testing::yieldCurrentThread();
}
ThreadState::detachCurrentThread();
atomicDecrement(&m_threadsToFinish);
}
};
class ThreadPersistentHeapTester : public ThreadedTesterBase {
public:
static void test()
{
ThreadedTesterBase::test(new ThreadPersistentHeapTester);
}
protected:
class Local final : public GarbageCollected<Local> {
public:
Local() { }
DEFINE_INLINE_TRACE() { }
};
class PersistentChain;
class RefCountedChain : public RefCounted<RefCountedChain> {
public:
static RefCountedChain* create(int count)
{
return new RefCountedChain(count);
}
private:
explicit RefCountedChain(int count)
{
if (count > 0) {
--count;
m_persistentChain = PersistentChain::create(count);
}
}
Persistent<PersistentChain> m_persistentChain;
};
class PersistentChain : public GarbageCollectedFinalized<PersistentChain> {
public:
static PersistentChain* create(int count)
{
return new PersistentChain(count);
}
DEFINE_INLINE_TRACE() { }
private:
explicit PersistentChain(int count)
{
m_refCountedChain = adoptRef(RefCountedChain::create(count));
}
RefPtr<RefCountedChain> m_refCountedChain;
};
void runThread() override
{
ThreadState::attachCurrentThread(false);
PersistentChain::create(100);
// Upon thread detach, GCs will run until all persistents have been
// released. We verify that the draining of persistents proceeds
// as expected by dropping one Persistent<> per GC until there
// are none left.
ThreadState::detachCurrentThread();
atomicDecrement(&m_threadsToFinish);
}
};
// The accounting for memory includes the memory used by rounding up object
// sizes. This is done in a different way on 32 bit and 64 bit, so we have to
// have some slack in the tests.
template<typename T>
void CheckWithSlack(T expected, T actual, int slack)
{
EXPECT_LE(expected, actual);
EXPECT_GE((intptr_t)expected + slack, (intptr_t)actual);
}
class TraceCounter : public GarbageCollectedFinalized<TraceCounter> {
public:
static TraceCounter* create()
{
return new TraceCounter();
}
DEFINE_INLINE_TRACE() { m_traceCount++; }
int traceCount() { return m_traceCount; }
private:
TraceCounter()
: m_traceCount(0)
{
}
int m_traceCount;
};
class ClassWithMember : public GarbageCollected<ClassWithMember> {
public:
static ClassWithMember* create()
{
return new ClassWithMember();
}
DEFINE_INLINE_TRACE()
{
EXPECT_TRUE(ThreadHeap::isHeapObjectAlive(this));
if (!traceCount())
EXPECT_FALSE(ThreadHeap::isHeapObjectAlive(m_traceCounter));
else
EXPECT_TRUE(ThreadHeap::isHeapObjectAlive(m_traceCounter));
visitor->trace(m_traceCounter);
}
int traceCount() { return m_traceCounter->traceCount(); }
private:
ClassWithMember()
: m_traceCounter(TraceCounter::create())
{ }
Member<TraceCounter> m_traceCounter;
};
class SimpleFinalizedObject : public GarbageCollectedFinalized<SimpleFinalizedObject> {
public:
static SimpleFinalizedObject* create()
{
return new SimpleFinalizedObject();
}
~SimpleFinalizedObject()
{
++s_destructorCalls;
}
static int s_destructorCalls;
DEFINE_INLINE_TRACE() { }
private:
SimpleFinalizedObject() { }
};
int SimpleFinalizedObject::s_destructorCalls = 0;
class IntNode : public GarbageCollected<IntNode> {
public:
// IntNode is used to test typed heap allocation. Instead of
// redefining blink::Node to our test version, we keep it separate
// so as to avoid possible warnings about linker duplicates.
// Override operator new to allocate IntNode subtype objects onto
// the dedicated heap for blink::Node.
//
// TODO(haraken): untangling the heap unit tests from Blink would
// simplify and avoid running into this problem - http://crbug.com/425381
GC_PLUGIN_IGNORE("crbug.com/443854")
void* operator new(size_t size)
{
ThreadState* state = ThreadState::current();
const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(IntNode);
return ThreadHeap::allocateOnArenaIndex(state, size, BlinkGC::NodeArenaIndex, GCInfoTrait<IntNode>::index(), typeName);
}
static IntNode* create(int i)
{
return new IntNode(i);
}
DEFINE_INLINE_TRACE() { }
int value() { return m_value; }
private:
IntNode(int i) : m_value(i) { }
int m_value;
};
class Bar : public GarbageCollectedFinalized<Bar> {
public:
static Bar* create()
{
return new Bar();
}
void finalizeGarbageCollectedObject()
{
EXPECT_TRUE(m_magic == magic);
m_magic = 0;
s_live--;
}
bool hasBeenFinalized() const { return !m_magic; }
DEFINE_INLINE_VIRTUAL_TRACE() { }
static unsigned s_live;
protected:
static const int magic = 1337;
int m_magic;
Bar()
: m_magic(magic)
{
s_live++;
}
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Bar);
unsigned Bar::s_live = 0;
class Baz : public GarbageCollected<Baz> {
public:
static Baz* create(Bar* bar)
{
return new Baz(bar);
}
DEFINE_INLINE_TRACE()
{
visitor->trace(m_bar);
}
void clear() { m_bar.release(); }
// willFinalize is called by FinalizationObserver.
void willFinalize()
{
EXPECT_TRUE(!m_bar->hasBeenFinalized());
}
private:
explicit Baz(Bar* bar)
: m_bar(bar)
{
}
Member<Bar> m_bar;
};
class Foo : public Bar {
public:
static Foo* create(Bar* bar)
{
return new Foo(bar);
}
static Foo* create(Foo* foo)
{
return new Foo(foo);
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
if (m_pointsToFoo)
visitor->mark(static_cast<Foo*>(m_bar));
else
visitor->mark(m_bar);
}
private:
Foo(Bar* bar)
: Bar()
, m_bar(bar)
, m_pointsToFoo(false)
{
}
Foo(Foo* foo)
: Bar()
, m_bar(foo)
, m_pointsToFoo(true)
{
}
Bar* m_bar;
bool m_pointsToFoo;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Foo);
class Bars : public Bar {
public:
static Bars* create()
{
return new Bars();
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
for (unsigned i = 0; i < m_width; i++)
visitor->trace(m_bars[i]);
}
unsigned getWidth() const
{
return m_width;
}
static const unsigned width = 7500;
private:
Bars() : m_width(0)
{
for (unsigned i = 0; i < width; i++) {
m_bars[i] = Bar::create();
m_width++;
}
}
unsigned m_width;
Member<Bar> m_bars[width];
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Bars);
class ConstructorAllocation : public GarbageCollected<ConstructorAllocation> {
public:
static ConstructorAllocation* create() { return new ConstructorAllocation(); }
DEFINE_INLINE_TRACE() { visitor->trace(m_intWrapper); }
private:
ConstructorAllocation()
{
m_intWrapper = IntWrapper::create(42);
}
Member<IntWrapper> m_intWrapper;
};
class LargeHeapObject : public GarbageCollectedFinalized<LargeHeapObject> {
public:
~LargeHeapObject()
{
s_destructorCalls++;
}
static LargeHeapObject* create() { return new LargeHeapObject(); }
char get(size_t i) { return m_data[i]; }
void set(size_t i, char c) { m_data[i] = c; }
size_t length() { return s_length; }
DEFINE_INLINE_TRACE()
{
visitor->trace(m_intWrapper);
}
static int s_destructorCalls;
private:
static const size_t s_length = 1024 * 1024;
LargeHeapObject()
{
m_intWrapper = IntWrapper::create(23);
}
Member<IntWrapper> m_intWrapper;
char m_data[s_length];
};
int LargeHeapObject::s_destructorCalls = 0;
// This test class served a more important role while Blink
// was transitioned over to using Oilpan. That required classes
// that were hybrid, both ref-counted and on the Oilpan heap
// (the RefCountedGarbageCollected<> class providing just that.)
//
// There's no current need for having a ref-counted veneer on
// top of a GCed class, but we preserve it here to exercise the
// implementation technique that it used -- keeping an internal
// "keep alive" persistent reference that is set & cleared across
// ref-counting operations.
//
class RefCountedAndGarbageCollected : public GarbageCollectedFinalized<RefCountedAndGarbageCollected> {
public:
static RefCountedAndGarbageCollected* create()
{
return new RefCountedAndGarbageCollected;
}
~RefCountedAndGarbageCollected()
{
++s_destructorCalls;
}
void ref()
{
if (UNLIKELY(!m_refCount)) {
ASSERT(ThreadState::current()->findPageFromAddress(reinterpret_cast<Address>(this)));
m_keepAlive = this;
}
++m_refCount;
}
void deref()
{
ASSERT(m_refCount > 0);
if (!--m_refCount)
m_keepAlive.clear();
}
DEFINE_INLINE_TRACE() { }
static int s_destructorCalls;
private:
RefCountedAndGarbageCollected()
: m_refCount(0)
{
}
int m_refCount;
SelfKeepAlive<RefCountedAndGarbageCollected> m_keepAlive;
};
int RefCountedAndGarbageCollected::s_destructorCalls = 0;
class RefCountedAndGarbageCollected2 : public HeapTestOtherSuperClass, public GarbageCollectedFinalized<RefCountedAndGarbageCollected2> {
public:
static RefCountedAndGarbageCollected2* create()
{
return new RefCountedAndGarbageCollected2;
}
~RefCountedAndGarbageCollected2()
{
++s_destructorCalls;
}
void ref()
{
if (UNLIKELY(!m_refCount)) {
ASSERT(ThreadState::current()->findPageFromAddress(reinterpret_cast<Address>(this)));
m_keepAlive = this;
}
++m_refCount;
}
void deref()
{
ASSERT(m_refCount > 0);
if (!--m_refCount)
m_keepAlive.clear();
}
DEFINE_INLINE_TRACE() { }
static int s_destructorCalls;
private:
RefCountedAndGarbageCollected2()
: m_refCount(0)
{
}
int m_refCount;
SelfKeepAlive<RefCountedAndGarbageCollected2> m_keepAlive;
};
int RefCountedAndGarbageCollected2::s_destructorCalls = 0;
#define DEFINE_VISITOR_METHODS(Type) \
void mark(const Type* object, TraceCallback callback) override \
{ \
mark(object); \
} \
class RefCountedGarbageCollectedVisitor : public CountingVisitor {
public:
RefCountedGarbageCollectedVisitor(ThreadState* state, int expected, void** objects)
: CountingVisitor(state)
, m_count(0)
, m_expectedCount(expected)
, m_expectedObjects(objects)
{
}
void mark(const void* ptr) { markNoTrace(ptr); }
virtual void markNoTrace(const void* ptr)
{
if (!ptr)
return;
if (m_count < m_expectedCount)
EXPECT_TRUE(expectedObject(ptr));
else
EXPECT_FALSE(expectedObject(ptr));
m_count++;
}
void mark(const void* ptr, TraceCallback) override
{
mark(ptr);
}
void markHeader(HeapObjectHeader* header, TraceCallback callback) override
{
mark(header->payload());
}
bool validate() { return m_count >= m_expectedCount; }
void reset() { m_count = 0; }
private:
bool expectedObject(const void* ptr)
{
for (int i = 0; i < m_expectedCount; i++) {
if (m_expectedObjects[i] == ptr)
return true;
}
return false;
}
int m_count;
int m_expectedCount;
void** m_expectedObjects;
};
#undef DEFINE_VISITOR_METHODS
class Weak : public Bar {
public:
static Weak* create(Bar* strong, Bar* weak)
{
return new Weak(strong, weak);
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
visitor->trace(m_strongBar);
visitor->template registerWeakMembers<Weak, &Weak::zapWeakMembers>(this);
}
void zapWeakMembers(Visitor* visitor)
{
if (!ThreadHeap::isHeapObjectAlive(m_weakBar))
m_weakBar = 0;
}
bool strongIsThere() { return !!m_strongBar; }
bool weakIsThere() { return !!m_weakBar; }
private:
Weak(Bar* strongBar, Bar* weakBar)
: Bar()
, m_strongBar(strongBar)
, m_weakBar(weakBar)
{
}
Member<Bar> m_strongBar;
Bar* m_weakBar;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(Weak);
class WithWeakMember : public Bar {
public:
static WithWeakMember* create(Bar* strong, Bar* weak)
{
return new WithWeakMember(strong, weak);
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
visitor->trace(m_strongBar);
visitor->trace(m_weakBar);
}
bool strongIsThere() { return !!m_strongBar; }
bool weakIsThere() { return !!m_weakBar; }
private:
WithWeakMember(Bar* strongBar, Bar* weakBar)
: Bar()
, m_strongBar(strongBar)
, m_weakBar(weakBar)
{
}
Member<Bar> m_strongBar;
WeakMember<Bar> m_weakBar;
};
WILL_NOT_BE_EAGERLY_TRACED_CLASS(WithWeakMember);
class Observable : public GarbageCollectedFinalized<Observable> {
USING_PRE_FINALIZER(Observable, willFinalize);
public:
static Observable* create(Bar* bar) { return new Observable(bar); }
~Observable() { m_wasDestructed = true; }
DEFINE_INLINE_TRACE() { visitor->trace(m_bar); }
// willFinalize is called by FinalizationObserver. willFinalize can touch
// other on-heap objects.
void willFinalize()
{
EXPECT_FALSE(m_wasDestructed);
EXPECT_FALSE(m_bar->hasBeenFinalized());
s_willFinalizeWasCalled = true;
}
static bool s_willFinalizeWasCalled;
private:
explicit Observable(Bar* bar)
: m_bar(bar)
, m_wasDestructed(false)
{
}
Member<Bar> m_bar;
bool m_wasDestructed;
};
bool Observable::s_willFinalizeWasCalled = false;
class ObservableWithPreFinalizer : public GarbageCollectedFinalized<ObservableWithPreFinalizer> {
USING_PRE_FINALIZER(ObservableWithPreFinalizer, dispose);
public:
static ObservableWithPreFinalizer* create() { return new ObservableWithPreFinalizer(); }
~ObservableWithPreFinalizer() { m_wasDestructed = true; }
DEFINE_INLINE_TRACE() { }
void dispose()
{
ThreadState::current()->unregisterPreFinalizer(this);
EXPECT_FALSE(m_wasDestructed);
s_disposeWasCalled = true;
}
static bool s_disposeWasCalled;
protected:
ObservableWithPreFinalizer()
: m_wasDestructed(false)
{
ThreadState::current()->registerPreFinalizer(this);
}
bool m_wasDestructed;
};
bool ObservableWithPreFinalizer::s_disposeWasCalled = false;
bool s_disposeWasCalledForPreFinalizerBase = false;
bool s_disposeWasCalledForPreFinalizerMixin = false;
bool s_disposeWasCalledForPreFinalizerSubClass = false;
class PreFinalizerBase : public GarbageCollectedFinalized<PreFinalizerBase> {
USING_PRE_FINALIZER(PreFinalizerBase, dispose);
public:
static PreFinalizerBase* create() { return new PreFinalizerBase(); }
virtual ~PreFinalizerBase() { m_wasDestructed = true; }
DEFINE_INLINE_VIRTUAL_TRACE() { }
void dispose()
{
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerBase);
EXPECT_TRUE(s_disposeWasCalledForPreFinalizerSubClass);
EXPECT_TRUE(s_disposeWasCalledForPreFinalizerMixin);
EXPECT_FALSE(m_wasDestructed);
s_disposeWasCalledForPreFinalizerBase = true;
}
protected:
PreFinalizerBase()
: m_wasDestructed(false)
{
ThreadState::current()->registerPreFinalizer(this);
}
bool m_wasDestructed;
};
class PreFinalizerMixin : public GarbageCollectedMixin {
USING_PRE_FINALIZER(PreFinalizerMixin, dispose);
public:
~PreFinalizerMixin() { m_wasDestructed = true; }
DEFINE_INLINE_VIRTUAL_TRACE() { }
void dispose()
{
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerBase);
EXPECT_TRUE(s_disposeWasCalledForPreFinalizerSubClass);
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerMixin);
EXPECT_FALSE(m_wasDestructed);
s_disposeWasCalledForPreFinalizerMixin = true;
}
protected:
PreFinalizerMixin()
: m_wasDestructed(false)
{
ThreadState::current()->registerPreFinalizer(this);
}
bool m_wasDestructed;
};
class PreFinalizerSubClass : public PreFinalizerBase, public PreFinalizerMixin {
USING_GARBAGE_COLLECTED_MIXIN(PreFinalizerSubClass);
USING_PRE_FINALIZER(PreFinalizerSubClass, dispose);
public:
static PreFinalizerSubClass* create() { return new PreFinalizerSubClass(); }
~PreFinalizerSubClass() { m_wasDestructed = true; }
DEFINE_INLINE_VIRTUAL_TRACE() { }
void dispose()
{
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerBase);
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerSubClass);
EXPECT_FALSE(s_disposeWasCalledForPreFinalizerMixin);
EXPECT_FALSE(m_wasDestructed);
s_disposeWasCalledForPreFinalizerSubClass = true;
}
protected:
PreFinalizerSubClass()
: m_wasDestructed(false)
{
ThreadState::current()->registerPreFinalizer(this);
}
bool m_wasDestructed;
};
template <typename T> class FinalizationObserver : public GarbageCollected<FinalizationObserver<T>> {
public:
static FinalizationObserver* create(T* data) { return new FinalizationObserver(data); }
bool didCallWillFinalize() const { return m_didCallWillFinalize; }
DEFINE_INLINE_TRACE()
{
visitor->template registerWeakMembers<FinalizationObserver<T>, &FinalizationObserver<T>::zapWeakMembers>(this);
}
void zapWeakMembers(Visitor* visitor)
{
if (m_data && !ThreadHeap::isHeapObjectAlive(m_data)) {
m_data->willFinalize();
m_data = nullptr;
m_didCallWillFinalize = true;
}
}
private:
FinalizationObserver(T* data)
: m_data(data)
, m_didCallWillFinalize(false)
{
}
WeakMember<T> m_data;
bool m_didCallWillFinalize;
};
class FinalizationObserverWithHashMap {
public:
typedef HeapHashMap<WeakMember<Observable>, std::unique_ptr<FinalizationObserverWithHashMap>> ObserverMap;
explicit FinalizationObserverWithHashMap(Observable& target) : m_target(target) { }
~FinalizationObserverWithHashMap()
{
m_target.willFinalize();
s_didCallWillFinalize = true;
}
static ObserverMap& observe(Observable& target)
{
ObserverMap& map = observers();
ObserverMap::AddResult result = map.add(&target, nullptr);
if (result.isNewEntry)
result.storedValue->value = wrapUnique(new FinalizationObserverWithHashMap(target));
else
ASSERT(result.storedValue->value);
return map;
}
static void clearObservers()
{
delete s_observerMap;
s_observerMap = nullptr;
}
static bool s_didCallWillFinalize;
private:
static ObserverMap& observers()
{
if (!s_observerMap)
s_observerMap = new Persistent<ObserverMap>(new ObserverMap());
return **s_observerMap;
}
Observable& m_target;
static Persistent<ObserverMap>* s_observerMap;
};
bool FinalizationObserverWithHashMap::s_didCallWillFinalize = false;
Persistent<FinalizationObserverWithHashMap::ObserverMap>* FinalizationObserverWithHashMap::s_observerMap;
class SuperClass;
class PointsBack : public GarbageCollectedFinalized<PointsBack> {
public:
static PointsBack* create()
{
return new PointsBack;
}
~PointsBack()
{
--s_aliveCount;
}
void setBackPointer(SuperClass* backPointer)
{
m_backPointer = backPointer;
}
SuperClass* backPointer() const { return m_backPointer; }
DEFINE_INLINE_TRACE()
{
visitor->trace(m_backPointer);
}
static int s_aliveCount;
private:
PointsBack() : m_backPointer(nullptr)
{
++s_aliveCount;
}
WeakMember<SuperClass> m_backPointer;
};
int PointsBack::s_aliveCount = 0;
class SuperClass : public GarbageCollectedFinalized<SuperClass> {
public:
static SuperClass* create(PointsBack* pointsBack)
{
return new SuperClass(pointsBack);
}
virtual ~SuperClass()
{
--s_aliveCount;
}
void doStuff(SuperClass* target, PointsBack* pointsBack, int superClassCount)
{
conservativelyCollectGarbage();
EXPECT_EQ(pointsBack, target->getPointsBack());
EXPECT_EQ(superClassCount, SuperClass::s_aliveCount);
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
visitor->trace(m_pointsBack);
}
PointsBack* getPointsBack() const { return m_pointsBack.get(); }
static int s_aliveCount;
protected:
explicit SuperClass(PointsBack* pointsBack)
: m_pointsBack(pointsBack)
{
m_pointsBack->setBackPointer(this);
++s_aliveCount;
}
private:
Member<PointsBack> m_pointsBack;
};
int SuperClass::s_aliveCount = 0;
class SubData : public GarbageCollectedFinalized<SubData> {
public:
SubData() { ++s_aliveCount; }
~SubData() { --s_aliveCount; }
DEFINE_INLINE_TRACE() { }
static int s_aliveCount;
};
int SubData::s_aliveCount = 0;
class SubClass : public SuperClass {
public:
static SubClass* create(PointsBack* pointsBack)
{
return new SubClass(pointsBack);
}
~SubClass() override
{
--s_aliveCount;
}
DEFINE_INLINE_VIRTUAL_TRACE()
{
visitor->trace(m_data);
SuperClass::trace(visitor);
}
static int s_aliveCount;
private:
explicit SubClass(PointsBack* pointsBack)
: SuperClass(pointsBack)
, m_data(new SubData)
{
++s_aliveCount;
}
private:
Member<SubData> m_data;
};
int SubClass::s_aliveCount = 0;
class Mixin : public GarbageCollectedMixin {
public:
DEFINE_INLINE_VIRTUAL_TRACE() { }
virtual char getPayload(int i) { return m_padding[i]; }
protected:
int m_padding[8];
};
class UseMixin : public SimpleObject, public Mixin {
USING_GARBAGE_COLLECTED_MIXIN(UseMixin)
public:
static UseMixin* create()
{
return new UseMixin();
}
static int s_traceCount;
DEFINE_INLINE_VIRTUAL_TRACE()
{
SimpleObject::trace(visitor);
Mixin::trace(visitor);
++s_traceCount;
}
private:
UseMixin()
{
// Verify that WTF::IsGarbageCollectedType<> works as expected for mixins.
static_assert(WTF::IsGarbageCollectedType<UseMixin>::value, "IsGarbageCollectedType<> sanity check failed for GC mixin.");
s_traceCount = 0;
}
};
int UseMixin::s_traceCount = 0;
class VectorObject {
DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
public:
VectorObject()
{
m_value = SimpleFinalizedObject::create();
}
DEFINE_INLINE_TRACE()
{
visitor->trace(m_value);
}
private:
Member<SimpleFinalizedObject> m_value;
};
class VectorObjectInheritedTrace : public VectorObject { };
class VectorObjectNoTrace {
DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
public:
VectorObjectNoTrace()
{
m_value = SimpleFinalizedObject::create();
}
private:
Member<SimpleFinalizedObject> m_value;
};
class TerminatedArrayItem {
DISALLOW_NEW_EXCEPT_PLACEMENT_NEW();
public:
TerminatedArrayItem(IntWrapper* payload) : m_payload(payload), m_isLast(false) { }
DEFINE_INLINE_TRACE() { visitor->trace(m_payload); }
bool isLastInArray() const { return m_isLast; }
void setLastInArray(bool value) { m_isLast = value; }
IntWrapper* payload() const { return m_payload; }
private:
Member<IntWrapper> m_payload;
bool m_isLast;
};
} // namespace blink
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::VectorObject);
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::VectorObjectInheritedTrace);
WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(blink::VectorObjectNoTrace);
namespace blink {
class OneKiloByteObject : public GarbageCollectedFinalized<OneKiloByteObject> {
public:
~OneKiloByteObject() { s_destructorCalls++; }
char* data() { return m_data; }
DEFINE_INLINE_TRACE() { }
static int s_destructorCalls;
private:
static const size_t s_length = 1024;
char m_data[s_length];
};
int OneKiloByteObject::s_destructorCalls = 0;
class DynamicallySizedObject : public GarbageCollected<DynamicallySizedObject> {
public:
static DynamicallySizedObject* create(size_t size)
{
void* slot = ThreadHeap::allocate<DynamicallySizedObject>(size);
return new (slot) DynamicallySizedObject();
}
void* operator new(std::size_t, void* location)
{
return location;
}
uint8_t get(int i)
{
return *(reinterpret_cast<uint8_t*>(this) + i);
}
DEFINE_INLINE_TRACE() { }
private:
DynamicallySizedObject() { }
};
class FinalizationAllocator : public GarbageCollectedFinalized<FinalizationAllocator> {
public:
FinalizationAllocator(Persistent<IntWrapper>* wrapper)
: m_wrapper(wrapper)
{
}
~FinalizationAllocator()
{
for (int i = 0; i < 10; ++i)
*m_wrapper = IntWrapper::create(42);
for (int i = 0; i < 512; ++i)
new OneKiloByteObject();
for (int i = 0; i < 32; ++i)
LargeHeapObject::create();
}
DEFINE_INLINE_TRACE() { }
private:
Persistent<IntWrapper>* m_wrapper;
};
class PreFinalizationAllocator : public GarbageCollectedFinalized<PreFinalizationAllocator> {
USING_PRE_FINALIZER(PreFinalizationAllocator, dispose);
public:
PreFinalizationAllocator(Persistent<IntWrapper>* wrapper)
: m_wrapper(wrapper)
{
ThreadState::current()->registerPreFinalizer(this);
}
void dispose()
{
for (int i = 0; i < 10; ++i)
*m_wrapper = IntWrapper::create(42);
for (int i = 0; i < 512; ++i)
new OneKiloByteObject();
for (int i = 0; i < 32; ++i)
LargeHeapObject::create();
}
DEFINE_INLINE_TRACE() { }
private:
Persistent<IntWrapper>* m_wrapper;
};
TEST(HeapTest, Transition)
{
{
RefCountedAndGarbageCollected::s_destructorCalls = 0;
Persistent<RefCountedAndGarbageCollected> refCounted = RefCountedAndGarbageCollected::create();
preciselyCollectGarbage();
EXPECT_EQ(0, RefCountedAndGarbageCollected::s_destructorCalls);
}
preciselyCollectGarbage();
EXPECT_EQ(1, RefCountedAndGarbageCollected::s_destructorCalls);
RefCountedAndGarbageCollected::s_destructorCalls = 0;
Persistent<PointsBack> pointsBack1 = PointsBack::create();
Persistent<PointsBack> pointsBack2 = PointsBack::create();
Persistent<SuperClass> superClass = SuperClass::create(pointsBack1);
Persistent<SubClass> subClass = SubClass::create(pointsBack2);
EXPECT_EQ(2, PointsBack::s_aliveCount);
EXPECT_EQ(2, SuperClass::s_aliveCount);
EXPECT_EQ(1, SubClass::s_aliveCount);
EXPECT_EQ(1, SubData::s_aliveCount);
preciselyCollectGarbage();
EXPECT_EQ(0, RefCountedAndGarbageCollected::s_destructorCalls);
EXPECT_EQ(2, PointsBack::s_aliveCount);
EXPECT_EQ(2, SuperClass::s_aliveCount);
EXPECT_EQ(1, SubClass::s_aliveCount);
EXPECT_EQ(1, SubData::s_aliveCount);
superClass->doStuff(superClass.release(), pointsBack1.get(), 2);
preciselyCollectGarbage();
EXPECT_EQ(2, PointsBack::s_aliveCount);
EXPECT_EQ(1, SuperClass::s_aliveCount);
EXPECT_EQ(1, SubClass::s_aliveCount);
EXPECT_EQ(1, SubData::s_aliveCount);
EXPECT_EQ(0, pointsBack1->backPointer());
pointsBack1.release();
preciselyCollectGarbage();
EXPECT_EQ(1, PointsBack::s_aliveCount);
EXPECT_EQ(1, SuperClass::s_aliveCount);
EXPECT_EQ(1, SubClass::s_aliveCount);
EXPECT_EQ(1, SubData::s_aliveCount);
subClass->doStuff(subClass.release(), pointsBack2.get(), 1);
preciselyCollectGarbage();
EXPECT_EQ(1, PointsBack::s_aliveCount);
EXPECT_EQ(0, SuperClass::s_aliveCount);
EXPECT_EQ(0, SubClass::s_aliveCount);
EXPECT_EQ(0, SubData::s_aliveCount);
EXPECT_EQ(0, pointsBack2->backPointer());
pointsBack2.release();
preciselyCollectGarbage();
EXPECT_EQ(0, PointsBack::s_aliveCount);
EXPECT_EQ(0, SuperClass::s_aliveCount);
EXPECT_EQ(0, SubClass::s_aliveCount);
EXPECT_EQ(0, SubData::s_aliveCount);
EXPECT_TRUE(superClass == subClass);
}
TEST(HeapTest, Threading)
{
ThreadedHeapTester::test();
}
TEST(HeapTest, ThreadedWeakness)
{
ThreadedWeaknessTester::test();
}
TEST(HeapTest, ThreadPersistent)
{
ThreadPersistentHeapTester::test();
}
TEST(HeapTest, BasicFunctionality)
{
ThreadHeap& heap = ThreadState::current()->heap();
clearOutOldGarbage();
size_t initialObjectPayloadSize = heap.objectPayloadSizeForTesting();
{
size_t slack = 0;
// When the test starts there may already have been leaked some memory
// on the heap, so we establish a base line.
size_t baseLevel = initialObjectPayloadSize;
bool testPagesAllocated = !baseLevel;
if (testPagesAllocated)
EXPECT_EQ(heap.heapStats().allocatedSpace(), 0ul);
// This allocates objects on the general heap which should add a page of memory.
DynamicallySizedObject* alloc32 = DynamicallySizedObject::create(32);
slack += 4;
memset(alloc32, 40, 32);
DynamicallySizedObject* alloc64 = DynamicallySizedObject::create(64);
slack += 4;
memset(alloc64, 27, 64);
size_t total = 96;
CheckWithSlack(baseLevel + total, heap.objectPayloadSizeForTesting(), slack);
if (testPagesAllocated)
EXPECT_EQ(heap.heapStats().allocatedSpace(), blinkPageSize * 2);
EXPECT_EQ(alloc32->get(0), 40);
EXPECT_EQ(alloc32->get(31), 40);
EXPECT_EQ(alloc64->get(0), 27);
EXPECT_EQ(alloc64->get(63), 27);
conservativelyCollectGarbage();
EXPECT_EQ(alloc32->get(0), 40);
EXPECT_EQ(alloc32->get(31), 40);
EXPECT_EQ(alloc64->get(0), 27);
EXPECT_EQ(alloc64->get(63), 27);
}
clearOutOldGarbage();
size_t total = 0;
size_t slack = 0;
size_t baseLevel = heap.objectPayloadSizeForTesting();
bool testPagesAllocated = !baseLevel;
if (testPagesAllocated)
EXPECT_EQ(heap.heapStats().allocatedSpace(), 0ul);
size_t big = 1008;
Persistent<DynamicallySizedObject> bigArea = DynamicallySizedObject::create(big);
total += big;
slack += 4;
size_t persistentCount = 0;
const size_t numPersistents = 100000;
Persistent<DynamicallySizedObject>* persistents[numPersistents];
for (int i = 0; i < 1000; i++) {
size_t size = 128 + i * 8;
total += size;
persistents[persistentCount++] = new Persistent<DynamicallySizedObject>(DynamicallySizedObject::create(size));
slack += 4;
CheckWithSlack(baseLevel + total, heap.objectPayloadSizeForTesting(), slack);
if (testPagesAllocated)
EXPECT_EQ(0ul, heap.heapStats().allocatedSpace() & (blinkPageSize - 1));
}
{
DynamicallySizedObject* alloc32b(DynamicallySizedObject::create(32));
slack += 4;
memset(alloc32b, 40, 32);
DynamicallySizedObject* alloc64b(DynamicallySizedObject::create(64));
slack += 4;
memset(alloc64b, 27, 64);
EXPECT_TRUE(alloc32b != alloc64b);
total += 96;
CheckWithSlack(baseLevel + total, heap.objectPayloadSizeForTesting(), slack);
if (testPagesAllocated)
EXPECT_EQ(0ul, heap.heapStats().allocatedSpace() & (blinkPageSize - 1));
}
clearOutOldGarbage();
total -= 96;
slack -= 8;
if (testPagesAllocated)
EXPECT_EQ(0ul, heap.heapStats().allocatedSpace() & (blinkPageSize - 1));
// Clear the persistent, so that the big area will be garbage collected.
bigArea.release();
clearOutOldGarbage();
total -= big;
slack -= 4;
CheckWithSlack(baseLevel + total, heap.objectPayloadSizeForTesting(), slack);
if (testPagesAllocated)
EXPECT_EQ(0ul, heap.heapStats().allocatedSpace() & (blinkPageSize - 1));
CheckWithSlack(baseLevel + total, heap.objectPayloadSizeForTesting(), slack);
if (testPagesAllocated)
EXPECT_EQ(0ul, heap.heapStats().allocatedSpace() & (blinkPageSize - 1));
for (size_t i = 0; i < persistentCount; i++) {
delete persistents[i];
persistents[i] = 0;
}
uint8_t* address = reinterpret_cast<uint8_t*>(ThreadHeap::allocate<DynamicallySizedObject>(100));
for (int i = 0; i < 100; i++)
address[i] = i;
address = reinterpret_cast<uint8_t*>(ThreadHeap::reallocate<DynamicallySizedObject>(address, 100000));
for (int i = 0; i < 100; i++)
EXPECT_EQ(address[i], i);
address = reinterpret_cast<uint8_t*>(ThreadHeap::reallocate<DynamicallySizedObject>(address, 50));
for (int i = 0; i < 50; i++)
EXPECT_EQ(address[i], i);
// This should be equivalent to free(address).
EXPECT_EQ(reinterpret_cast<uintptr_t>(ThreadHeap::reallocate<DynamicallySizedObject>(address, 0)), 0ul);
// This should be equivalent to malloc(0).
EXPECT_EQ(reinterpret_cast<uintptr_t>(ThreadHeap::reallocate<DynamicallySizedObject>(0, 0)), 0ul);
}
TEST(HeapTest, SimpleAllocation)
{
ThreadHeap& heap = ThreadState::current()->heap();
clearOutOldGarbage();
EXPECT_EQ(0ul, heap.objectPayloadSizeForTesting());
// Allocate an object in the heap.
HeapAllocatedArray* array = new HeapAllocatedArray();
EXPECT_TRUE(heap.objectPayloadSizeForTesting() >= sizeof(HeapAllocatedArray));
// Sanity check of the contents in the heap.
EXPECT_EQ(0, array->at(0));
EXPECT_EQ(42, array->at(42));
EXPECT_EQ(0, array->at(128));
EXPECT_EQ(999 % 128, array->at(999));
}
TEST(HeapTest, SimplePersistent)
{
Persistent<TraceCounter> traceCounter = TraceCounter::create();
EXPECT_EQ(0, traceCounter->traceCount());
preciselyCollectGarbage();
EXPECT_EQ(1, traceCounter->traceCount());
Persistent<ClassWithMember> classWithMember = ClassWithMember::create();
EXPECT_EQ(0, classWithMember->traceCount());
preciselyCollectGarbage();
EXPECT_EQ(1, classWithMember->traceCount());
EXPECT_EQ(2, traceCounter->traceCount());
}
TEST(HeapTest, SimpleFinalization)
{
{
Persistent<SimpleFinalizedObject> finalized = SimpleFinalizedObject::create();
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
preciselyCollectGarbage();
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
}
preciselyCollectGarbage();
EXPECT_EQ(1, SimpleFinalizedObject::s_destructorCalls);
}
#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
TEST(HeapTest, FreelistReuse)
{
clearOutOldGarbage();
for (int i = 0; i < 100; i++)
new IntWrapper(i);
IntWrapper* p1 = new IntWrapper(100);
preciselyCollectGarbage();
// In non-production builds, we delay reusing freed memory for at least
// one GC cycle.
for (int i = 0; i < 100; i++) {
IntWrapper* p2 = new IntWrapper(i);
EXPECT_NE(p1, p2);
}
preciselyCollectGarbage();
preciselyCollectGarbage();
// Now the freed memory in the first GC should be reused.
bool reusedMemoryFound = false;
for (int i = 0; i < 10000; i++) {
IntWrapper* p2 = new IntWrapper(i);
if (p1 == p2) {
reusedMemoryFound = true;
break;
}
}
EXPECT_TRUE(reusedMemoryFound);
}
#endif
TEST(HeapTest, LazySweepingPages)
{
clearOutOldGarbage();
SimpleFinalizedObject::s_destructorCalls = 0;
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
for (int i = 0; i < 1000; i++)
SimpleFinalizedObject::create();
ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, BlinkGC::ForcedGC);
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
for (int i = 0; i < 10000; i++)
SimpleFinalizedObject::create();
EXPECT_EQ(1000, SimpleFinalizedObject::s_destructorCalls);
preciselyCollectGarbage();
EXPECT_EQ(11000, SimpleFinalizedObject::s_destructorCalls);
}
TEST(HeapTest, LazySweepingLargeObjectPages)
{
clearOutOldGarbage();
// Create free lists that can be reused for IntWrappers created in
// LargeHeapObject::create().
Persistent<IntWrapper> p1 = new IntWrapper(1);
for (int i = 0; i < 100; i++) {
new IntWrapper(i);
}
Persistent<IntWrapper> p2 = new IntWrapper(2);
preciselyCollectGarbage();
preciselyCollectGarbage();
LargeHeapObject::s_destructorCalls = 0;
EXPECT_EQ(0, LargeHeapObject::s_destructorCalls);
for (int i = 0; i < 10; i++)
LargeHeapObject::create();
ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, BlinkGC::ForcedGC);
EXPECT_EQ(0, LargeHeapObject::s_destructorCalls);
for (int i = 0; i < 10; i++) {
LargeHeapObject::create();
EXPECT_EQ(i + 1, LargeHeapObject::s_destructorCalls);
}
LargeHeapObject::create();
LargeHeapObject::create();
EXPECT_EQ(10, LargeHeapObject::s_destructorCalls);
ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, BlinkGC::ForcedGC);
EXPECT_EQ(10, LargeHeapObject::s_destructorCalls);
preciselyCollectGarbage();
EXPECT_EQ(22, LargeHeapObject::s_destructorCalls);
}
class SimpleFinalizedEagerObjectBase : public GarbageCollectedFinalized<SimpleFinalizedEagerObjectBase> {
public:
virtual ~SimpleFinalizedEagerObjectBase() { }
DEFINE_INLINE_TRACE() { }
EAGERLY_FINALIZE();
protected:
SimpleFinalizedEagerObjectBase() { }
};
class SimpleFinalizedEagerObject : public SimpleFinalizedEagerObjectBase {
public:
static SimpleFinalizedEagerObject* create()
{
return new SimpleFinalizedEagerObject();
}
~SimpleFinalizedEagerObject() override
{
++s_destructorCalls;
}
static int s_destructorCalls;
private:
SimpleFinalizedEagerObject() { }
};
template<typename T>
class ParameterizedButEmpty {
public:
EAGERLY_FINALIZE();
};
class SimpleFinalizedObjectInstanceOfTemplate final : public GarbageCollectedFinalized<SimpleFinalizedObjectInstanceOfTemplate>, public ParameterizedButEmpty<SimpleFinalizedObjectInstanceOfTemplate> {
public:
static SimpleFinalizedObjectInstanceOfTemplate* create()
{
return new SimpleFinalizedObjectInstanceOfTemplate();
}
~SimpleFinalizedObjectInstanceOfTemplate()
{
++s_destructorCalls;
}
DEFINE_INLINE_TRACE() { }
static int s_destructorCalls;
private:
SimpleFinalizedObjectInstanceOfTemplate() { }
};
int SimpleFinalizedEagerObject::s_destructorCalls = 0;
int SimpleFinalizedObjectInstanceOfTemplate::s_destructorCalls = 0;
TEST(HeapTest, EagerlySweepingPages)
{
clearOutOldGarbage();
SimpleFinalizedObject::s_destructorCalls = 0;
SimpleFinalizedEagerObject::s_destructorCalls = 0;
SimpleFinalizedObjectInstanceOfTemplate::s_destructorCalls = 0;
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
EXPECT_EQ(0, SimpleFinalizedEagerObject::s_destructorCalls);
for (int i = 0; i < 1000; i++)
SimpleFinalizedObject::create();
for (int i = 0; i < 100; i++)
SimpleFinalizedEagerObject::create();
for (int i = 0; i < 100; i++)
SimpleFinalizedObjectInstanceOfTemplate::create();
ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, BlinkGC::ForcedGC);
EXPECT_EQ(0, SimpleFinalizedObject::s_destructorCalls);
EXPECT_EQ(100, SimpleFinalizedEagerObject::s_destructorCalls);
EXPECT_EQ(100, SimpleFinalizedObjectInstanceOfTemplate::s_destructorCalls);
}
TEST(HeapTest, Finalization)
{
{
HeapTestSubClass* t1 = HeapTestSubClass::create();
HeapTestSubClass* t2 = HeapTestSubClass::create();
HeapTestSuperClass* t3 = HeapTestSuperClass::create();
// FIXME(oilpan): Ignore unused variables.
(void)t1;
(void)t2;
(void)t3;
}
// Nothing is marked so the GC should free everything and call
// the finalizer on all three objects.
preciselyCollectGarbage();
EXPECT_EQ(2, HeapTestSubClass::s_destructorCalls);
EXPECT_EQ(3, HeapTestSuperClass::s_destructorCalls);
// Destructors not called again when GCing again.
preciselyCollectGarbage();
EXPECT_EQ(2, HeapTestSubClass::s_destructorCalls);
EXPECT_EQ(3, HeapTestSuperClass::s_destructorCalls);
}
TEST(HeapTest, TypedArenaSanity)
{
// We use TraceCounter for allocating an object on the general heap.
Persistent<TraceCounter> generalHeapObject = TraceCounter::create();
Persistent<IntNode> typedHeapObject = IntNode::create(0);
EXPECT_NE(pageFromObject(generalHeapObject.get()),
pageFromObject(typedHeapObject.get()));
}
TEST(HeapTest, NoAllocation)
{
ThreadState* state = ThreadState::current();
EXPECT_TRUE(state->isAllocationAllowed());
{
// Disallow allocation
ThreadState::NoAllocationScope noAllocationScope(state);
EXPECT_FALSE(state->isAllocationAllowed());
}
EXPECT_TRUE(state->isAllocationAllowed());
}
TEST(HeapTest, Members)
{
Bar::s_live = 0;
{
Persistent<Baz> h1;
Persistent<Baz> h2;
{
h1 = Baz::create(Bar::create());
preciselyCollectGarbage();
EXPECT_EQ(1u, Bar::s_live);
h2 = Baz::create(Bar::create());
preciselyCollectGarbage();
EXPECT_EQ(2u, Bar::s_live);
}
preciselyCollectGarbage();
EXPECT_EQ(2u, Bar::s_live);
h1->clear();
preciselyCollectGarbage();
EXPECT_EQ(1u, Bar::s_live);
}
preciselyCollectGarbage();
EXPECT_EQ(0u, Bar::s_live);
}
TEST(HeapTest, MarkTest)
{
{
Bar::s_live = 0;
Persistent<Bar> bar = Bar::create();
ASSERT(ThreadState::current()->findPageFromAddress(bar));
EXPECT_EQ(1u, Bar::s_live);
{
Foo* foo = Foo::create(bar);
ASSERT(ThreadState::current()->findPageFromAddress(foo));
EXPECT_EQ(2u, Bar::s_live);
EXPECT_TRUE(reinterpret_cast<Address>(foo) != reinterpret_cast<Address>(bar.get()));
conservativelyCollectGarbage();
EXPECT_TRUE(foo != bar); // To make sure foo is kept alive.
EXPECT_EQ(2u, Bar::s_live);
}
preciselyCollectGarbage();
EXPECT_EQ(1u, Bar::s_live);
}
preciselyCollectGarbage();
EXPECT_EQ(0u, Bar::s_live);
}
TEST(HeapTest, DeepTest)
{
const unsigned depth = 100000;
Bar::s_live = 0;
{
Bar* bar = Bar::create();
ASSERT(ThreadState::current()->findPageFromAddress(bar));
Foo* foo = Foo::create(bar);
ASSERT(ThreadState::current()->findPageFromAddress(foo));
EXPECT_EQ(2u, Bar::s_live);
for (unsigned i = 0; i < depth; i++) {
Foo* foo2 = Foo::create(foo);
foo = foo2;
ASSERT(ThreadState::current()->findPageFromAddress(foo));
}
EXPECT_EQ(depth + 2, Bar::s_live);
conservativelyCollectGarbage();
EXPECT_TRUE(foo != bar); // To make sure foo and bar are kept alive.
EXPECT_EQ(depth + 2, Bar::s_live);
}
preciselyCollectGarbage();
EXPECT_EQ(0u, Bar::s_live);
}
TEST(HeapTest, WideTest)
{
Bar::s_live = 0;
{
Bars* bars = Bars::create();
unsigned width = Bars::width;
EXPECT_EQ(width + 1, Bar::s_live);
conservativelyCollectGarbage();
EXPECT_EQ(width + 1, Bar::s_live);
// Use bars here to make sure that it will be on the stack
// for the conservative stack scan to find.
EXPECT_EQ(width, bars->getWidth());
}
EXPECT_EQ(Bars::width + 1, Bar::s_live);
preciselyCollectGarbage();
EXPECT_EQ(0u, Bar::s_live);
}
TEST(HeapTest, HashMapOfMembers)
{
ThreadHeap& heap = ThreadState::current()->heap();
IntWrapper::s_destructorCalls = 0;
clearOutOldGarbage();
size_t initialObjectPayloadSize = heap.objectPayloadSizeForTesting();
{
typedef HeapHashMap<
Member<IntWrapper>,
Member<IntWrapper>,
DefaultHash<Member<IntWrapper>>::Hash,
HashTraits<Member<IntWrapper>>,
HashTraits<Member<IntWrapper>>> HeapObjectIdentityMap;
Persistent<HeapObjectIdentityMap> map = new HeapObjectIdentityMap();
map->clear();
size_t afterSetWasCreated = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(afterSetWasCreated > initialObjectPayloadSize);
preciselyCollectGarbage();
size_t afterGC = heap.objectPayloadSizeForTesting();
EXPECT_EQ(afterGC, afterSetWasCreated);
// If the additions below cause garbage collections, these
// pointers should be found by conservative stack scanning.
IntWrapper* one(IntWrapper::create(1));
IntWrapper* anotherOne(IntWrapper::create(1));
map->add(one, one);
size_t afterOneAdd = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(afterOneAdd > afterGC);
HeapObjectIdentityMap::iterator it(map->begin());
HeapObjectIdentityMap::iterator it2(map->begin());
++it;
++it2;
map->add(anotherOne, one);
// The addition above can cause an allocation of a new
// backing store. We therefore garbage collect before
// taking the heap stats in order to get rid of the old
// backing store. We make sure to not use conservative
// stack scanning as that could find a pointer to the
// old backing.
preciselyCollectGarbage();
size_t afterAddAndGC = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(afterAddAndGC >= afterOneAdd);
EXPECT_EQ(map->size(), 2u); // Two different wrappings of '1' are distinct.
preciselyCollectGarbage();
EXPECT_TRUE(map->contains(one));
EXPECT_TRUE(map->contains(anotherOne));
IntWrapper* gotten(map->get(one));
EXPECT_EQ(gotten->value(), one->value());
EXPECT_EQ(gotten, one);
size_t afterGC2 = heap.objectPayloadSizeForTesting();
EXPECT_EQ(afterGC2, afterAddAndGC);
IntWrapper* dozen = 0;
for (int i = 1; i < 1000; i++) { // 999 iterations.
IntWrapper* iWrapper(IntWrapper::create(i));
IntWrapper* iSquared(IntWrapper::create(i * i));
map->add(iWrapper, iSquared);
if (i == 12)
dozen = iWrapper;
}
size_t afterAdding1000 = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(afterAdding1000 > afterGC2);
IntWrapper* gross(map->get(dozen));
EXPECT_EQ(gross->value(), 144);
// This should clear out any junk backings created by all the adds.
preciselyCollectGarbage();
size_t afterGC3 = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(afterGC3 <= afterAdding1000);
}
preciselyCollectGarbage();
// The objects 'one', anotherOne, and the 999 other pairs.
EXPECT_EQ(IntWrapper::s_destructorCalls, 2000);
size_t afterGC4 = heap.objectPayloadSizeForTesting();
EXPECT_EQ(afterGC4, initialObjectPayloadSize);
}
TEST(HeapTest, NestedAllocation)
{
ThreadHeap& heap = ThreadState::current()->heap();
clearOutOldGarbage();
size_t initialObjectPayloadSize = heap.objectPayloadSizeForTesting();
{
Persistent<ConstructorAllocation> constructorAllocation = ConstructorAllocation::create();
}
clearOutOldGarbage();
size_t afterFree = heap.objectPayloadSizeForTesting();
EXPECT_TRUE(initialObjectPayloadSize == afterFree);
}
TEST(HeapTest, LargeHeapObjects)
{
ThreadHeap& heap = ThreadState::current()->heap();
clearOutOldGarbage();
size_t initialObjectPayloadSize = heap.objectPayloadSizeForTesting();
size_t initialAllocatedSpace = heap.heapStats().allocatedSpace();
IntWrapper::s_destructorCalls = 0;
LargeHeapObject::s_destructorCalls = 0;
{
int slack = 8; // LargeHeapObject points to an IntWrapper that is also allocated.
Persistent<LargeHeapObject> object = LargeHeapObject::create();
ASSERT(ThreadState::current()->findPageFromAddress(object));
ASSERT(ThreadState::current()->findPageFromAddress(reinterpret_cast<char*>(object.get()) + sizeof(LargeHeapObject) - 1));
clearOutOldGarbage();
size_t afterAllocation = heap.heapStats().allocatedSpace();
{
object->set(0, 'a');
EXPECT_EQ('a', object->get(0));
object->set(object->length() - 1, 'b');
EXPECT_EQ('b', object->get(object->length() - 1));
size_t expectedLargeHeapObjectPayloadSize = ThreadHeap::allocationSizeFromSize(sizeof(LargeHeapObject));
size_t expectedObjectPayloadSize = expectedLargeHeapObjectPayloadSize + sizeof(IntWrapper);
size_t actualObjectPayloadSize = heap.objectPayloadSizeForTesting() - initialObjectPayloadSize;
CheckWithSlack(expectedObjectPayloadSize, actualObjectPayloadSize, slack);
// There is probably space for the IntWrapper in a heap page without
// allocating extra pages. However, the IntWrapper allocation might cause
// the addition of a heap page.
size_t largeObjectAllocationSize = sizeof(LargeObjectPage) + expectedLargeHeapObjectPayloadSize;
size_t allocatedSpaceLowerBound = initialAllocatedSpace + largeObjectAllocationSize;
size_t allocatedSpaceUpperBound = allocatedSpaceLowerBound + slack + blinkPageSize;
EXPECT_LE(allocatedSpaceLowerBound, afterAllocation);
EXPECT_LE(afterAllocation, allocatedSpaceUpperBound);
EXPECT_EQ(0, IntWrapper::s_destructorCalls);
EXPECT_EQ(0, LargeHeapObject::s_destructorCalls);
for (int i = 0; i < 10; i++)
object = LargeHeapObject::create();
}
clearOutOldGarbage();
EXPECT_TRUE(heap.heapStats().allocatedSpace() == afterAllocation);
EXPECT_EQ(10, IntWrapper::s_destructorCalls);
EXPECT_EQ(10, LargeHeapObject::s_destructorCalls);
}
clearOutOldGarbage();
EXPECT_TRUE(initialObjectPayloadSize == heap.objectPayloadSizeForTesting());
EXPECT_TRUE(initialAllocatedSpace == heap.heapStats().allocatedSpace());
EXPECT_EQ(11, IntWrapper::s_destructorCalls);
EXPECT_EQ(11, LargeHeapObject::s_destructorCalls);
preciselyCollectGarbage();
}
TEST(HeapTest, LargeHashMap)
{
clearOutOldGarbage();
size_t size = (1 << 27) / sizeof(int);
Persistent<HeapHashMap<int, Member<IntWrapper>>> map = new HeapHashMap<int, Member<IntWrapper>>();
map->reserveCapacityForSize(size);
EXPECT_LE(size, map->capacity());
}
TEST(HeapTest, LargeVector)
{
clearOutOldGarbage();
size_t size = (1 << 27) / sizeof(int);
Persistent<HeapVector<int>> vector = new HeapVector<int>(size);
EXPECT_LE(size, vector->capacity());
}
typedef std::pair<Member<IntWrapper>, int> PairWrappedUnwrapped;
typedef std::pair<int, Member<IntWrapper>> PairUnwrappedWrapped;
typedef std::pair<WeakMember<IntWrapper>, Member<IntWrapper>> PairWeakStrong;
typedef std::pair<Member<IntWrapper>, WeakMember<IntWrapper>> PairStrongWeak;
typedef std::pair<WeakMember<IntWrapper>, int> PairWeakUnwrapped;
typedef std::pair<int, WeakMember<IntWrapper>> PairUnwrappedWeak;
class Container : public GarbageCollected<Container> {
public:
static Container* create() { return new Container(); }
HeapHashMap<Member<IntWrapper>, Member<IntWrapper>> map;
HeapHashSet<Member<IntWrapper>> set;
HeapHashSet<Member<IntWrapper>> set2;
HeapHashCountedSet<Member<IntWrapper>> set3;
HeapVector<Member<IntWrapper>, 2> vector;
HeapVector<PairWrappedUnwrapped, 2> vectorWU;
HeapVector<PairUnwrappedWrapped, 2> vectorUW;
HeapDeque<Member<IntWrapper>, 0> deque;
HeapDeque<PairWrappedUnwrapped, 0> dequeWU;
HeapDeque<PairUnwrappedWrapped, 0> dequeUW;
DEFINE_INLINE_TRACE()
{
visitor->trace(map);
visitor->trace(set);
visitor->trace(set2);
visitor->trace(set3);
visitor->trace(vector);
visitor->trace(vectorWU);
visitor->trace(vectorUW);
visitor->trace(deque);
visitor->trace(dequeWU);
visitor->trace(dequeUW);
}
};
struct NeedsTracingTrait {
explicit NeedsTracingTrait(IntWrapper* wrapper) : m_wrapper(wrapper) { }
DEFINE_INLINE_TRACE() { visitor->trace(m_wrapper); }
Member<IntWrapper> m_wrapper;
};
TEST(HeapTest, HeapVectorFilledWithValue)
{
IntWrapper* val = IntWrapper::create(1);
HeapVector<Member<IntWrapper>> vector(10, val);
EXPECT_EQ(10u, vector.size());
for (size_t i = 0; i < vector.size(); i++)
EXPECT_EQ(val, vector[i]);
}
TEST(HeapTest, HeapVectorWithInlineCapacity)
{
IntWrapper* one = IntWrapper::create(1);
IntWrapper* two = IntWrapper::create(2);
IntWrapper* three = IntWrapper::create(3);
IntWrapper* four = IntWrapper::create(4);
IntWrapper* five = IntWrapper::create(5);
IntWrapper* six = IntWrapper::create(6);
{
HeapVector<Member<IntWrapper>, 2> vector;
vector.append(one);
vector.append(two);
conservativelyCollectGarbage();
EXPECT_TRUE(vector.contains(one));
EXPECT_TRUE(vector.contains(two));
vector.append(three);
vector.append(four);
conservativelyCollectGarbage();
EXPECT_TRUE(vector.contains(one));
EXPECT_TRUE(vector.contains(two));
EXPECT_TRUE(vector.contains(three));
EXPECT_TRUE(vector.contains(four));
vector.shrink(1);
conservativelyCollectGarbage();
EXPECT_TRUE(vector.contains(one));
EXPECT_FALSE(vector.contains(two));
EXPECT_FALSE(vector.contains(three));
EXPECT_FALSE(vector.contains(four));
}
{
HeapVector<Member<IntWrapper>, 2> vector1;
HeapVector<Member<IntWrapper>, 2> vector2;
vector1.append(one);
vector2.append(two);
vector1.swap(vector2);
conservativelyCollectGarbage();
EXPECT_TRUE(vector1.contains(two));
EXPECT_TRUE(vector2.contains(one));
}
{
HeapVector<Member<IntWrapper>, 2> vector1;
HeapVector<Member<IntWrapper>, 2> vector2;
vector1.append(one);
vector1.append(two);
vector2.append(three);
vector2.append(four);
vector2.append(five);
vector2.append(six);
vector1.swap(vector2);
conservativelyCollectGarbage();
EXPECT_TRUE(vector1.contains(three));
EXPECT_TRUE(vector1.contains(four));
EXPECT_TRUE(vector1.contains(five));
EXPECT_TRUE(vector1.contains(six));
EXPECT_TRUE(vector2.contains(one));
EXPECT_TRUE(vector2.contains(two));
}
}
TEST(HeapTest, HeapVectorShrinkCapacity)
{
clearOutOldGarbage();
HeapVector<Member<IntWrapper>> vector1;
HeapVector<Member<IntWrapper>> vector2;
vector1.reserveCapacity(96);
EXPECT_LE(96u, vector1.capacity());
vector1.grow(vector1.capacity());
// Assumes none was allocated just after a vector backing of vector1.
vector1.shrink(56);
vector1.shrinkToFit();
EXPECT_GT(96u, vector1.capacity());
vector2.reserveCapacity(20);
// Assumes another vector backing was allocated just after the vector
// backing of vector1.
vector1.shrink(10);
vector1.shrinkToFit();
EXPECT_GT(56u, vector1.capacity());
vector1.grow(192);
EXPECT_LE(192u, vector1.capacity());
}
TEST(HeapTest, HeapVectorShrinkInlineCapacity)
{
clearOutOldGarbage();
const size_t inlineCapacity = 64;
HeapVector<Member<IntWrapper>, inlineCapacity> vector1;
vector1.reserveCapacity(128);
EXPECT_LE(128u, vector1.capacity());
vector1.grow(vector1.capacity());
// Shrink the external buffer.
vector1.shrink(90);
vector1.shrinkToFit();
EXPECT_GT(128u, vector1.capacity());
// TODO(sof): if the ASan support for 'contiguous containers' is enabled,
// Vector inline buffers are disabled; that constraint should be attempted
// removed, but until that time, disable testing handling of capacities
// of inline buffers.
#if !defined(ANNOTATE_CONTIGUOUS_CONTAINER)
// Shrinking switches the buffer from the external one to the inline one.
vector1.shrink(inlineCapacity - 1);
vector1.shrinkToFit();
EXPECT_EQ(inlineCapacity, vector1.capacity());
// Try to shrink the inline buffer.
vector1.shrink(1);
vector1.shrinkToFit();
EXPECT_EQ(inlineCapacity, vector1.capacity());
#endif
}
template<typename T, size_t inlineCapacity, typename U>
bool dequeContains(HeapDeque<T, inlineCapacity>& deque, U u)
{
typedef typename HeapDeque<T, inlineCapacity>::iterator iterator;
for (iterator it = deque.begin(); it != deque.end(); ++it) {
if (*it == u)
return true;
}
return false;
}
TEST(HeapTest, HeapCollectionTypes)
{
IntWrapper::s_destructorCalls = 0;
typedef HeapHashMap<Member<IntWrapper>, Member<IntWrapper>> MemberMember;
typedef HeapHashMap<Member<IntWrapper>, int> MemberPrimitive;
typedef HeapHashMap<int, Member<IntWrapper>> PrimitiveMember;
typedef HeapHashSet<Member<IntWrapper>> MemberSet;
typedef HeapHashCountedSet<Member<IntWrapper>> MemberCountedSet;
typedef HeapVector<Member<IntWrapper>, 2> MemberVector;
typedef HeapDeque<Member<IntWrapper>, 0> MemberDeque;
typedef HeapVector<PairWrappedUnwrapped, 2> VectorWU;
typedef HeapVector<PairUnwrappedWrapped, 2> VectorUW;
typedef HeapDeque<PairWrappedUnwrapped, 0> DequeWU;
typedef HeapDeque<PairUnwrappedWrapped, 0> DequeUW;
Persistent<MemberMember> memberMember = new MemberMember();
Persistent<MemberMember> memberMember2 = new MemberMember();
Persistent<MemberMember> memberMember3 = new MemberMember();
Persistent<MemberPrimitive> memberPrimitive = new MemberPrimitive();
Persistent<PrimitiveMember> primitiveMember = new PrimitiveMember();
Persistent<MemberSet> set = new MemberSet();
Persistent<MemberSet> set2 = new MemberSet();
Persistent<MemberCountedSet> set3 = new MemberCountedSet();
Persistent<MemberVector> vector = new MemberVector();
Persistent<MemberVector> vector2 = new MemberVector();
Persistent<VectorWU> vectorWU = new VectorWU();
Persistent<VectorWU> vectorWU2 = new VectorWU();
Persistent<VectorUW> vectorUW = new VectorUW();
Persistent<VectorUW> vectorUW2 = new VectorUW();
Persistent<MemberDeque> deque = new MemberDeque();
Persistent<MemberDeque> deque2 = new MemberDeque();
Persistent<DequeWU> dequeWU = new DequeWU();
Persistent<DequeWU> dequeWU2 = new DequeWU();
Persistent<DequeUW> dequeUW = new DequeUW();
Persistent<DequeUW> dequeUW2 = new DequeUW();
Persistent<Container> container = Container::create();
clearOutOldGarbage();
{
Persistent<IntWrapper> one(IntWrapper::create(1));
Persistent<IntWrapper> two(IntWrapper::create(2));
Persistent<IntWrapper> oneB(IntWrapper::create(1));
Persistent<IntWrapper> twoB(IntWrapper::create(2));
Persistent<IntWrapper> oneC(IntWrapper::create(1));
Persistent<IntWrapper> oneD(IntWrapper::create(1));
Persistent<IntWrapper> oneE(IntWrapper::create(1));
Persistent<IntWrapper> oneF(IntWrapper::create(1));
{
IntWrapper* threeB(IntWrapper::create(3));
IntWrapper* threeC(IntWrapper::create(3));
IntWrapper* threeD(IntWrapper::create(3));
IntWrapper* threeE(IntW