blob: f78329613ebb685d226ed2b03846d34c4c8e95cc [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/mark-compact.h"
#include <unordered_map>
#include <unordered_set>
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/isolate-utils.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/weak-object-worklists.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
#include "src/logging/tracing-flags.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/objects/visitors.h"
#include "src/snapshot/shared-heap-serializer.h"
#include "src/tasks/cancelable-task.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/utils/utils-inl.h"
namespace v8 {
namespace internal {
const char* Marking::kWhiteBitPattern = "00";
const char* Marking::kBlackBitPattern = "11";
const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
// =============================================================================
// Verifiers
// =============================================================================
#ifdef VERIFY_HEAP
namespace {
class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
public:
virtual void Run() = 0;
protected:
explicit MarkingVerifier(Heap* heap)
: ObjectVisitorWithCageBases(heap), heap_(heap) {}
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
virtual bool IsMarked(HeapObject object) = 0;
virtual bool IsBlackOrGrey(HeapObject object) = 0;
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
VerifyCodePointer(slot);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override {
VerifyMap(object.map(cage_base()));
}
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
};
void MarkingVerifier::VerifyRoots() {
heap_->IterateRootsIncludingClients(this,
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
Address next_object_must_be_here_or_later = start;
for (auto object_and_size :
LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
HeapObject object = object_and_size.first;
size_t size = object_and_size.second;
Address current = object.address();
if (current < start) continue;
if (current >= end) break;
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object.Iterate(cage_base(), this);
next_object_must_be_here_or_later = current + size;
// The object is either part of a black area of black allocation or a
// regular black object
CHECK(bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
current = next_object_must_be_here_or_later;
}
}
void MarkingVerifier::VerifyMarking(NewSpace* space) {
if (!space) return;
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
CHECK_EQ(space->first_allocatable_address(),
space->first_page()->area_start());
PageRange range(space->first_allocatable_address(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
VerifyMarkingOnPage(page, page->area_start(), limit);
}
}
void MarkingVerifier::VerifyMarking(PagedSpace* space) {
for (Page* p : *space) {
VerifyMarkingOnPage(p, p->area_start(), p->area_end());
}
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
if (!lo_space) return;
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(cage_base(), this);
}
}
}
class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
: MarkingVerifier(heap),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
void Run() override {
VerifyRoots();
VerifyMarking(heap_->new_space());
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
if (heap_->map_space()) VerifyMarking(heap_->map_space());
VerifyMarking(heap_->lo_space());
VerifyMarking(heap_->code_lo_space());
}
protected:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) override {
return marking_state_->bitmap(chunk);
}
bool IsMarked(HeapObject object) override {
return marking_state_->IsBlack(object);
}
bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
// The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject target_object = rinfo->target_object(cage_base());
if (!host.IsWeakObject(target_object)) {
VerifyHeapObjectImpl(target_object);
}
}
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
if (heap_->IsShared() !=
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
return;
if (heap_->ShouldBeInSharedOldSpace(heap_object)) {
CHECK(heap_->SharedHeapContains(heap_object));
}
CHECK(marking_state_->IsBlackOrGrey(heap_object));
}
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
}
}
}
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
class EvacuationVerifier : public ObjectVisitorWithCageBases,
public RootVisitor {
public:
virtual void Run() = 0;
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
VerifyCodePointer(slot);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override {
VerifyMap(object.map(cage_base()));
}
protected:
explicit EvacuationVerifier(Heap* heap)
: ObjectVisitorWithCageBases(heap), heap_(heap) {}
inline Heap* heap() { return heap_; }
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
void VerifyRoots();
void VerifyEvacuationOnPage(Address start, Address end);
void VerifyEvacuation(NewSpace* new_space);
void VerifyEvacuation(PagedSpace* paged_space);
Heap* heap_;
};
void EvacuationVerifier::VerifyRoots() {
heap_->IterateRootsIncludingClients(this,
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
if (!object.IsFreeSpaceOrFiller(cage_base())) {
object.Iterate(cage_base(), this);
}
current += object.Size(cage_base());
}
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
if (!space) return;
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
Address limit = it != range.end() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
VerifyEvacuationOnPage(current, limit);
}
}
void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
for (Page* p : *space) {
if (p->IsEvacuationCandidate()) continue;
if (p->Contains(space->top())) {
CodePageMemoryModificationScope memory_modification_scope(p);
heap_->CreateFillerObjectAt(
space->top(), static_cast<int>(space->limit() - space->top()),
ClearRecordedSlots::kNo);
}
VerifyEvacuationOnPage(p->area_start(), p->area_end());
}
}
class FullEvacuationVerifier : public EvacuationVerifier {
public:
explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
void Run() override {
VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
}
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
if (heap_->IsShared() !=
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
return;
CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
Heap::InToPage(heap_object));
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
}
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
for (TSlot current = start; current < end; ++current) {
typename TSlot::TObject object = current.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
}
}
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
// The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
}
};
} // namespace
#endif // VERIFY_HEAP
// =============================================================================
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// =============================================================================
namespace {
int NumberOfAvailableCores() {
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
DCHECK_GE(num_cores, 1);
DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
return num_cores;
}
} // namespace
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
if (!heap_->CanPromoteYoungAndExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit.
tasks = 1;
}
return tasks;
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
#ifdef DEBUG
state_(IDLE),
#endif
is_shared_heap_(heap->IsShared()),
marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
}
MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
void MarkCompactCollector::TearDown() {
AbortCompaction();
if (heap()->incremental_marking()->IsMarking()) {
local_marking_worklists()->Publish();
heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
local_weak_objects()->Publish();
weak_objects()->Clear();
}
sweeper()->TearDown();
}
// static
bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
MapWord map_word = map.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress().IsMap();
} else {
return map_word.ToMap().IsMap();
}
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
if (FLAG_trace_evacuation_candidates) {
PrintIsolate(
isolate(),
"Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
p->area_size() - p->allocated_bytes(), p->FreeListsLength());
}
p->MarkEvacuationCandidate();
evacuation_candidates_.push_back(p);
}
static void TraceFragmentation(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
intptr_t reserved = (number_of_pages * space->AreaSize());
intptr_t free = reserved - space->SizeOfObjects();
PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
DCHECK(!compacting_);
DCHECK(evacuation_candidates_.empty());
// Bailouts for completely disabled compaction.
if (!FLAG_compact ||
(mode == StartCompactionMode::kAtomic && !heap()->IsGCWithoutStack() &&
!FLAG_compact_with_stack) ||
(FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())) {
return false;
}
CollectEvacuationCandidates(heap()->old_space());
if (heap()->map_space() && FLAG_compact_maps) {
CollectEvacuationCandidates(heap()->map_space());
}
if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
}
if (FLAG_trace_fragmentation && heap()->map_space()) {
TraceFragmentation(heap()->map_space());
}
compacting_ = !evacuation_candidates_.empty();
return compacting_;
}
void MarkCompactCollector::StartMarking() {
std::vector<Address> contexts =
heap()->memory_measurement()->StartProcessing();
if (FLAG_stress_per_context_marking_worklist) {
contexts.clear();
HandleScope handle_scope(heap()->isolate());
for (auto context : heap()->FindAllNativeContexts()) {
contexts.push_back(context->ptr());
}
}
code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
marking_worklists(),
cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
: MarkingWorklists::Local::kNoCppMarkingState);
local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), local_weak_objects_.get(),
heap_, epoch(), code_flush_mode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
#endif
}
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
MarkLiveObjects();
ClearNonLiveReferences();
VerifyMarking();
heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats();
StartSweepSpaces();
Evacuate();
Finish();
}
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
ReadOnlyHeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
if (!space) return;
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
if (!space) return;
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
MemoryChunk::FromHeapObject(obj)));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
if (heap_->map_space()) {
VerifyMarkbitsAreClean(heap_->map_space());
}
VerifyMarkbitsAreClean(heap_->new_space());
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
VerifyMarkbitsAreClean(heap_->lo_space());
VerifyMarkbitsAreClean(heap_->code_lo_space());
VerifyMarkbitsAreClean(heap_->new_lo_space());
}
#endif // VERIFY_HEAP
void MarkCompactCollector::FinishSweepingIfOutOfWork() {
if (sweeper()->sweeping_in_progress() && FLAG_concurrent_sweeping &&
!sweeper()->AreSweeperTasksRunning()) {
// At this point we know that all concurrent sweeping tasks have run
// out of work and quit: all pages are swept. The main thread still needs
// to complete sweeping though.
EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
}
if (heap()->cpp_heap()) {
// Ensure that sweeping is also completed for the C++ managed heap, if one
// exists and it's out of work.
CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
}
}
void MarkCompactCollector::EnsureSweepingCompleted(
SweepingForcedFinalizationMode mode) {
if (sweeper()->sweeping_in_progress()) {
TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
ThreadKind::kMain);
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
if (heap()->map_space()) {
heap()->map_space()->RefillFreeList();
heap()->map_space()->SortFreeList();
}
heap()->tracer()->NotifySweepingCompleted();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
FullEvacuationVerifier verifier(heap());
verifier.Run();
}
#endif
}
if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
heap()->cpp_heap()) {
// Ensure that sweeping is also completed for the C++ managed heap, if one
// exists.
CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
}
}
void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
sweeper()->EnsurePageIsSwept(page);
}
void MarkCompactCollector::DrainSweepingWorklistForSpace(
AllocationSpace space) {
if (!sweeper()->sweeping_in_progress()) return;
sweeper()->DrainSweepingWorklistForSpace(space);
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const size_t kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
} else if (heap()->ShouldOptimizeForMemoryUsage()) {
*target_fragmentation_percent =
kTargetFragmentationPercentForOptimizeMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
} else {
const double estimated_compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
if (estimated_compaction_speed != 0) {
// Estimate the target fragmentation based on traced compaction speed
// and a goal for a single page.
const double estimated_ms_per_area =
1 + area_size / estimated_compaction_speed;
*target_fragmentation_percent = static_cast<int>(
100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
if (*target_fragmentation_percent <
kTargetFragmentationPercentForReduceMemory) {
*target_fragmentation_percent =
kTargetFragmentationPercentForReduceMemory;
}
} else {
*target_fragmentation_percent = kTargetFragmentationPercent;
}
*max_evacuated_bytes = kMaxEvacuatedBytes;
}
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
space->identity() == MAP_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
const bool in_standard_path =
!(FLAG_manual_evacuation_candidates_selection ||
FLAG_stress_compaction_random || FLAG_stress_compaction ||
FLAG_compact_on_every_full_gc);
// Those variables will only be initialized if |in_standard_path|, and are not
// used otherwise.
size_t max_evacuated_bytes;
int target_fragmentation_percent;
size_t free_bytes_threshold;
if (in_standard_path) {
// We use two conditions to decide whether a page qualifies as an evacuation
// candidate, or not:
// * Target fragmentation: How fragmented is a page, i.e., how is the ratio
// between live bytes and capacity of this page (= area).
// * Evacuation quota: A global quota determining how much bytes should be
// compacted.
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
&max_evacuated_bytes);
free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
}
// Pairs of (live_bytes_in_page, page).
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
DCHECK(!sweeping_in_progress());
Page* owner_of_linear_allocation_area =
space->top() == space->limit()
? nullptr
: Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
!p->CanAllocate())
continue;
if (p->IsPinned()) {
DCHECK(
!p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING));
continue;
}
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
// released.
CHECK(!p->IsEvacuationCandidate());
CHECK_NULL(p->slot_set<OLD_TO_OLD>());
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
if (in_standard_path) {
// Only the pages with at more than |free_bytes_threshold| free bytes are
// considered for evacuation.
if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
} else {
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
// Unpin pages for the next GC
if (p->IsFlagSet(MemoryChunk::PINNED)) {
p->ClearFlag(MemoryChunk::PINNED);
}
}
int candidate_count = 0;
size_t total_live_bytes = 0;
const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
candidate_count++;
total_live_bytes += pages[i].first;
p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
AddEvacuationCandidate(p);
}
}
} else if (FLAG_stress_compaction_random) {
double fraction = isolate()->fuzzer_rng()->NextDouble();
size_t pages_to_mark_count =
static_cast<size_t>(fraction * (pages.size() + 1));
for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
pages.size(), pages_to_mark_count)) {
candidate_count++;
total_live_bytes += pages[i].first;
AddEvacuationCandidate(pages[i].second);
}
} else if (FLAG_stress_compaction) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
if (i % 2 == 0) {
candidate_count++;
total_live_bytes += pages[i].first;
AddEvacuationCandidate(p);
}
}
} else {
// The following approach determines the pages that should be evacuated.
//
// Sort pages from the most free to the least free, then select
// the first n pages for evacuation such that:
// - the total size of evacuated objects does not exceed the specified
// limit.
// - fragmentation of (n+1)-th page does not exceed the specified limit.
std::sort(pages.begin(), pages.end(),
[](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
if (FLAG_compact_on_every_full_gc ||
((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
}
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
"compaction-selection-page: space=%s free_bytes_page=%zu "
"fragmentation_limit_kb=%zu "
"fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
space->name(), (area_size - live_bytes) / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
// in the worst case: ceil(total_live_bytes / area_size)
int estimated_new_pages =
static_cast<int>((total_live_bytes + area_size - 1) / area_size);
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
if ((estimated_released_pages == 0) && !FLAG_compact_on_every_full_gc) {
candidate_count = 0;
}
for (int i = 0; i < candidate_count; i++) {
AddEvacuationCandidate(pages[i].second);
}
}
if (FLAG_trace_fragmentation) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
"total_live_bytes=%zu\n",
space->name(), reduce_memory, candidate_count,
total_live_bytes / KB);
}
}
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
RememberedSet<OLD_TO_CODE>::ClearAll(heap());
}
for (Page* p : evacuation_candidates_) {
p->ClearEvacuationCandidate();
}
compacting_ = false;
evacuation_candidates_.clear();
}
DCHECK(evacuation_candidates_.empty());
}
void MarkCompactCollector::Prepare() {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
#ifdef DEBUG
DCHECK(state_ == IDLE);
state_ = PREPARE_GC;
#endif
DCHECK(!sweeping_in_progress());
if (!was_marked_incrementally_) {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
auto embedder_flags = heap_->flags_for_embedder_tracer();
// PrepareForTrace should be called before visitor initialization in
// StartMarking.
heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
}
heap_->FreeLinearAllocationAreas();
PagedSpaceIterator spaces(heap());
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
// All objects are guaranteed to be initialized in atomic pause
if (heap()->new_lo_space()) {
heap()->new_lo_space()->ResetPendingObject();
}
if (heap()->new_space()) {
DCHECK_EQ(heap()->new_space()->top(),
heap()->new_space()->original_top_acquire());
}
}
void MarkCompactCollector::FinishConcurrentMarking() {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
}
}
void MarkCompactCollector::VerifyMarking() {
CHECK(local_marking_worklists()->IsEmpty());
DCHECK(heap_->incremental_marking()->IsStopped());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
FullMarkingVerifier verifier(heap());
verifier.Run();
}
#endif
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap()->old_space()->VerifyLiveBytes();
if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
}
#endif
}
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
SweepArrayBufferExtensions();
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
marking_visitor_.reset();
local_marking_worklists_.reset();
marking_worklists_.ReleaseContextWorklists();
native_context_stats_.Clear();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
local_weak_objects_->next_ephemerons_local.Publish();
local_weak_objects_.reset();
weak_objects_.next_ephemerons.Clear();
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
// The stub caches are not traversed during GC; clear them to force
// their lazy re-initialization. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
isolate()->load_stub_cache()->Clear();
isolate()->store_stub_cache()->Clear();
if (have_code_to_deoptimize_) {
// Some code objects were marked for deoptimization during the GC.
Deoptimizer::DeoptimizeMarkedCode(isolate());
have_code_to_deoptimize_ = false;
}
}
void MarkCompactCollector::SweepArrayBufferExtensions() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
heap_->array_buffer_sweeper()->RequestSweep(
ArrayBufferSweeper::SweepingType::kFull);
}
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) {
MarkObjectByPointer(root, p);
}
}
void VisitRunningCode(FullObjectSlot p) final {
Code code = Code::cast(*p);
// If Code is currently executing, then we must not remove its
// deoptimization literals, which it might need in order to successfully
// deoptimize.
//
// Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
// that heap snapshots accurately describe the roots.
if (code.kind() != CodeKind::BASELINE) {
DeoptimizationData deopt_data =
DeoptimizationData::cast(code.deoptimization_data());
if (deopt_data.length() > 0) {
DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
int literals_length = literals.length();
for (int i = 0; i < literals_length; ++i) {
MaybeObject maybe_literal = literals.Get(i);
HeapObject heap_literal;
if (maybe_literal.GetHeapObject(&heap_literal)) {
MarkObjectByPointer(Root::kStackRoots,
FullObjectSlot(&heap_literal));
}
}
}
}
// And then mark the Code itself.
VisitRootPointer(Root::kStackRoots, nullptr, p);
}
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
Object object = *p;
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
BasicMemoryChunk* target_page =
BasicMemoryChunk::FromHeapObject(heap_object);
if (is_shared_heap_ != target_page->InSharedHeap()) return;
collector_->MarkRootObject(root, heap_object);
}
MarkCompactCollector* const collector_;
const bool is_shared_heap_;
};
// This visitor is used to visit the body of special objects held alive by
// other roots.
//
// It is currently used for
// - Code held alive by the top optimized frame. This code cannot be deoptimized
// and thus have to be kept alive in an isolate way, i.e., it should not keep
// alive other code objects reachable through the weak list but they should
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
: public ObjectVisitorWithCageBases {
public:
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
: ObjectVisitorWithCageBases(collector->isolate()),
collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
MarkObject(host, p.load(cage_base()));
}
void VisitMapPointer(HeapObject host) final {
MarkObject(host, host.map(cage_base()));
}
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
MarkObject(host, p.load(cage_base()));
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
MarkObject(host, slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
UNREACHABLE();
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkObject(host, target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
MarkObject(host, rinfo->target_object(cage_base()));
}
private:
V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
// We use this visitor both in client and shared GCs. The client GC should
// not mark objects in the shared heap. In shared GCs we are marking each
// client's top stack frame, so it is actually legal to encounter references
// into the client heap here in a shared GC. We need to bail out in these
// cases as well.
if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
collector_->MarkObject(host, heap_object);
}
MarkCompactCollector* const collector_;
};
class MarkCompactCollector::SharedHeapObjectVisitor final
: public ObjectVisitorWithCageBases {
public:
explicit SharedHeapObjectVisitor(MarkCompactCollector* collector)
: ObjectVisitorWithCageBases(collector->isolate()),
collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
MarkObject(host, p, p.load(cage_base()));
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
MaybeObject object = p.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object))
MarkObject(host, ObjectSlot(p), heap_object);
}
void VisitMapPointer(HeapObject host) final {
MarkObject(host, host.map_slot(), host.map(cage_base()));
}
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
MarkObject(host, p, p.load(cage_base()));
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
MarkObject(host, ObjectSlot(slot.address()), slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), ObjectSlot(p));
VisitPointer(host, p);
}
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
RecordRelocSlot(host, rinfo, target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
HeapObject target = rinfo->target_object(cage_base());
RecordRelocSlot(host, rinfo, target);
}
private:
V8_INLINE void MarkObject(HeapObject host, ObjectSlot slot, Object object) {
DCHECK(!host.InSharedHeap());
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
if (!heap_object.InSharedHeap()) return;
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot.address());
collector_->MarkRootObject(Root::kClientHeap, heap_object);
}
V8_INLINE void RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
if (ShouldRecordRelocSlot(host, rinfo, target)) {
RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk,
info.slot_type, info.offset);
}
}
V8_INLINE bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
return BasicMemoryChunk::FromHeapObject(target)->InSharedHeap();
}
MarkCompactCollector* const collector_;
};
class InternalizedStringTableCleaner : public RootVisitor {
public:
explicit InternalizedStringTableCleaner(Heap* heap)
: heap_(heap), pointers_removed_(0) {}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
UNREACHABLE();
}
void VisitRootPointers(Root root, const char* description,
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
Isolate* isolate = heap_->isolate();
for (OffHeapObjectSlot p = start; p < end; ++p) {
Object o = p.load(isolate);
if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
DCHECK(!Heap::InYoungGeneration(heap_object));
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
p.store(StringTable::deleted_element());
}
}
}
}
int PointersRemoved() { return pointers_removed_; }
private:
Heap* heap_;
int pointers_removed_;
};
class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
if (o.IsExternalString()) {
heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
DCHECK(o.IsThinString());
}
// Set the entry to the_hole_value (as deleted).
p.store(the_hole);
}
}
}
}
private:
Heap* heap_;
};
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit MarkCompactWeakObjectRetainer(
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: marking_state_(marking_state) {}
Object RetainAs(Object object) override {
HeapObject heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
return object;
} else if (object.IsAllocationSite() &&
!(AllocationSite::cast(object).IsZombie())) {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
Object nested = object;
while (nested.IsAllocationSite()) {
AllocationSite current_site = AllocationSite::cast(nested);
// MarkZombie will override the nested_site, read it first before
// marking
nested = current_site.nested_site();
current_site.MarkZombie();
marking_state_->WhiteToBlack(current_site);
}
return object;
} else {
return Object();
}
}
private:
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
public:
explicit RecordMigratedSlotVisitor(
MarkCompactCollector* collector,
EphemeronRememberedSet* ephemeron_remembered_set)
: ObjectVisitorWithCageBases(collector->isolate()),
collector_(collector),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
RecordMigratedSlot(host, MaybeObject::FromObject(p.load(cage_base())),
p.address());
}
inline void VisitMapPointer(HeapObject host) final {
VisitPointer(host, host.map_slot());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
RecordMigratedSlot(host, p.load(cage_base()), p.address());
}
inline void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
// This code is similar to the implementation of VisitPointer() modulo
// new kind of slot.
DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
Object code = slot.load(code_cage_base());
RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
}
inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot value) override {
DCHECK(host.IsEphemeronHashTable());
DCHECK(!Heap::InYoungGeneration(host));
VisitPointer(host, value);
if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
auto table = EphemeronHashTable::unchecked_cast(host);
auto insert_result =
ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
insert_result.first->second.insert(index);
} else {
VisitPointer(host, key);
}
}
inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InYoungGeneration(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = rinfo->target_object(cage_base());
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
// Entries that are skipped for recording.
inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitExternalReference(Foreign host, Address* p) final {}
inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
protected:
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
} else {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
}
}
}
MarkCompactCollector* collector_;
EphemeronRememberedSet* ephemeron_remembered_set_;
};
class MigrationObserver {
public:
explicit MigrationObserver(Heap* heap) : heap_(heap) {}
virtual ~MigrationObserver() = default;
virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) = 0;
protected:
Heap* heap_;
};
class ProfilingMigrationObserver final : public MigrationObserver {
public:
explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
}
heap_->OnMoveEvent(dst, src, size);
}
};
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() = default;
virtual bool Visit(HeapObject object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
public:
void AddObserver(MigrationObserver* observer) {
migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
observers_.push_back(observer);
}
protected:
enum MigrationMode { kFast, kObserved };
PtrComprCageBase cage_base() {
#if V8_COMPRESS_POINTERS
return PtrComprCageBase{heap_->isolate()};
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size,
AllocationSpace dest);
template <MigrationMode mode>
static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst.address();
Address src_addr = src.address();
PtrComprCageBase cage_base = base->cage_base();
DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
// In case the object's map gets relocated during GC we load the old map
// here. This is fine since they store the same content.
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
} else if (dest == MAP_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
Code code = Code::cast(dst);
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
// In case the object's map gets relocated during GC we load the old map
// here. This is fine since they store the same content.
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
shared_old_allocator_(shared_old_allocator),
record_visitor_(record_visitor),
shared_string_table_(shared_old_allocator != nullptr) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
int size, HeapObject* target_object) {
#ifdef DEBUG
if (FLAG_stress_compaction && AbortCompactionForTesting(object))
return false;
#endif // DEBUG
Map map = object.map(cage_base());
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
if (ShouldPromoteIntoSharedHeap(map)) {
DCHECK_EQ(target_space, OLD_SPACE);
DCHECK(Heap::InYoungGeneration(object));
DCHECK_NOT_NULL(shared_old_allocator_);
allocation = shared_old_allocator_->AllocateRaw(size, alignment,
AllocationOrigin::kGC);
} else {
allocation = local_allocator_->Allocate(target_space, size,
AllocationOrigin::kGC, alignment);
}
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
if (target_space == CODE_SPACE)
MemoryChunk::FromHeapObject(*target_object)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject((*target_object).address());
return true;
}
return false;
}
inline bool ShouldPromoteIntoSharedHeap(Map map) {
if (shared_string_table_) {
return String::IsInPlaceInternalizableExcludingExternal(
map.instance_type());
}
return false;
}
inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
HeapObject dst, int size) {
for (MigrationObserver* obs : observers_) {
obs->Move(dest, src, dst, size);
}
}
inline void MigrateObject(HeapObject dst, HeapObject src, int size,
AllocationSpace dest) {
migration_function_(this, dst, src, size, dest);
}
#ifdef DEBUG
bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
kPageAlignmentMask & ~kObjectAlignmentMask;
if ((object.ptr() & kPageAlignmentMask) == mask) {
Page* page = Page::FromHeapObject(object);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
} else {
page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
return true;
}
}
}
return false;
}
#endif // DEBUG
Heap* heap_;
EvacuationAllocator* local_allocator_;
ConcurrentAllocator* shared_old_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
bool shared_string_table_ = false;
};
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
always_promote_young_(always_promote_young) {}
inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: young object promotion failed");
}
promoted_size_ += size;
return true;
}
if (heap_->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
return true;
}
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
return true;
}
intptr_t promoted_size() { return promoted_size_; }
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
Map map = object.map();
// Some objects can be evacuated without creating a copy.
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
object.set_map_word(MapWord::FromForwardingAddress(actual),
kRelaxedStore);
return true;
}
// TODO(mlippautz): Handle ConsString.
return false;
}
inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
HeapObject* target_object) {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object.map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation = local_allocator_->Allocate(
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
if (allocation.IsFailure()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
}
bool ok = allocation.To(target_object);
DCHECK(ok);
USE(ok);
return space_allocated_in;
}
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation = local_allocator_->Allocate(
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
if (allocation.IsFailure()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
}
return allocation;
}
LocalAllocationBuffer buffer_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
AlwaysPromoteYoung always_promote_young_;
};
template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap),
record_visitor_(record_visitor),
moved_bytes_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
static void Move(Page* page) {
switch (mode) {
case NEW_TO_NEW:
page->heap()->new_space()->MovePageFromSpaceToSpace(page);
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break;
case NEW_TO_OLD: {
page->heap()->new_space()->from_space().RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
break;
}
}
}
inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
object.IterateFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
}
return true;
}
intptr_t moved_bytes() { return moved_bytes_; }
void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
private:
Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor) {}
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
.IsForwardingAddress());
return true;
}
return false;
}
};
class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap)
: heap_(heap)
#ifdef V8_COMPRESS_POINTERS
,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
}
// The pointer compression cage base value used for decompression of all
// tagged values except references to Code objects.
V8_INLINE PtrComprCageBase cage_base() const {
#ifdef V8_COMPRESS_POINTERS
return cage_base_;
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
Map map = object.map(cage_base());
// Instead of calling object.IterateBodyFast(cage_base(), &visitor) here
// we can shortcut and use the precomputed size value passed to the visitor.
DCHECK_EQ(object.SizeFromMap(map), size);
object.IterateBodyFast(map, size, &visitor);
return true;
}
private:
Heap* heap_;
#ifdef V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
#endif // V8_COMPRESS_POINTERS
};
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
Object o = *p;
if (!o.IsHeapObject()) return false;
HeapObject heap_object = HeapObject::cast(o);
return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
heap_object);
}
void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateRootsIncludingClients(
root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
// Custom marking for top optimized frame.
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
if (isolate()->is_shared()) {
isolate()->global_safepoint()->IterateClientIsolates(
[this, custom_root_body_visitor](Isolate* client) {
ProcessTopOptimizedFrame(custom_root_body_visitor, client);
});
}
}
void MarkCompactCollector::MarkObjectsFromClientHeaps() {
if (!isolate()->is_shared()) return;
SharedHeapObjectVisitor visitor(this);
isolate()->global_safepoint()->IterateClientIsolates(
[&visitor](Isolate* client) {
Heap* heap = client->heap();
HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
PtrComprCageBase cage_base(client);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
obj.IterateFast(cage_base, &visitor);
}
});
}
void MarkCompactCollector::VisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(), obj);
}
void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK(marking_state()->IsBlack(obj));
DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
}
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
bool another_ephemeron_iteration_main_thread;
do {
PerformWrapperTracing();
if (iterations >= max_iterations) {
// Give up fixpoint iteration and switch to linear algorithm.
return false;
}
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
DCHECK(
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
}
another_ephemeron_iteration_main_thread = ProcessEphemerons();
FinishConcurrentMarking();
}
CHECK(
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
++iterations;
} while (another_ephemeron_iteration_main_thread ||
heap()->concurrent_marking()->another_ephemeron_iteration() ||
!local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
return true;
}
bool MarkCompactCollector::ProcessEphemerons() {
Ephemeron ephemeron;
bool another_ephemeron_iteration = false;
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
}
// Drain marking worklist and push discovered ephemerons into
// discovered_ephemerons.
size_t objects_processed;
std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
// As soon as a single object was processed and potentially marked another
// object we need another iteration. Otherwise we might miss to apply
// ephemeron semantics on it.
if (objects_processed > 0) another_ephemeron_iteration = true;
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
}
// Flush local ephemerons for main task to global pool.
local_weak_objects()->ephemeron_hash_tables_local.Publish();
local_weak_objects()->next_ephemerons_local.Publish();
return another_ephemeron_iteration;
}
void MarkCompactCollector::ProcessEphemeronsLinear() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
CHECK(heap()->concurrent_marking()->IsStopped());
std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
DCHECK(
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
ephemeron_marking_.newly_discovered_limit = key_to_values.size();
bool work_to_do = true;
while (work_to_do) {
PerformWrapperTracing();
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered_limit = key_to_values.size();
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
// Drain marking worklist and push all discovered objects into
// newly_discovered.
ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(0);
}
while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
if (ephemeron_marking_.newly_discovered_overflowed) {
// If newly_discovered was overflowed just visit all ephemerons in
// next_ephemerons.
local_weak_objects()->next_ephemerons_local.Publish();
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
local_marking_worklists()->Push(ephemeron.value);
}
});
} else {
// This is the good case: newly_discovered stores all discovered
// objects. Now use key_to_values to see if discovered objects keep more
// objects alive due to ephemeron semantics.
for (HeapObject object : ephemeron_marking_.newly_discovered) {
auto range = key_to_values.equal_range(object);
for (auto it = range.first; it != range.second; ++it) {
HeapObject value = it->second;
MarkObject(object, value);
}
}
}
// Do NOT drain marking worklist here, otherwise the current checks
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
work_to_do = !local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
}
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(local_marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
// Flush local ephemerons for main task to global pool.
local_weak_objects()->ephemeron_hash_tables_local.Publish();
local_weak_objects()->next_ephemerons_local.Publish();
}
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
if (local_marking_worklists()->PublishWrapper()) {
DCHECK(local_marking_worklists()->IsWrapperEmpty());
} else {
// Cannot directly publish wrapper objects.
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
while (local_marking_worklists()->PopWrapper(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
}
}
void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
size_t objects_processed = 0;
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
PtrComprCageBase cage_base(isolate);
while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller(cage_base)) {
// Due to copying mark bits and the fact that grey and black have their
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(object.map(cage_base) ==
ReadOnlyRoots(isolate).one_pointer_filler_map(),
marking_state()->IsBlack(object));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(object.map(cage_base) !=
ReadOnlyRoots(isolate).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(object));
continue;
}
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(!(marking_state()->IsWhite(object)));
if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
Map map = object.map(cage_base);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
local_marking_worklists()->SwitchToContext(context);
}
}
size_t visited_size = marking_visitor_->Visit(map, object);
if (is_per_context_mode) {
native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
map, object, visited_size);
}
bytes_processed += visited_size;
objects_processed++;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
break;
}
}
return std::make_pair(bytes_processed, objects_processed);
}
// Generate definitions for use in other files.
template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
size_t bytes_to_process);
template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
local_marking_worklists()->Push(value);
return true;
}
} else if (marking_state()->IsWhite(value)) {
local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
}
return false;
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(local_marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
local_weak_objects()->next_ephemerons_local.Publish();
if (!ProcessEphemeronsUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
// guaranteed linear algorithm.
ProcessEphemeronsLinear();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Ephemeron ephemeron;
DCHECK(
local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
}
}
#endif
CHECK(local_marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
Isolate* isolate) {
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
it.Advance()) {
if (it.frame()->is_unoptimized()) return;
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
PtrComprCageBase cage_base(isolate);
Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
}
return;
}
}
}
void MarkCompactCollector::RecordObjectStats() {
if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
// Cannot run during bootstrapping due to incomplete objects.
if (isolate()->bootstrapper()->IsActive()) return;
heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_.get());
collector.Collect();
if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
std::stringstream live, dead;
heap()->live_object_stats_->Dump(live);
heap()->dead_object_stats_->Dump(dead);
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
"V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
"live", TRACE_STR_COPY(live.str().c_str()), "dead",
TRACE_STR_COPY(dead.str().c_str()));
}
if (FLAG_trace_gc_object_stats) {
heap()->live_object_stats_->PrintJSON("live");
heap()->dead_object_stats_->PrintJSON("dead");
}
heap()->live_object_stats_->CheckpointObjectStats();
heap()->dead_object_stats_->ClearObjectStats();
}
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
IncrementalMarking* incremental_marking = heap_->incremental_marking();
if (was_marked_incrementally_) {
incremental_marking->Finalize();
MarkingBarrier::PublishAll(heap());
} else {
CHECK(incremental_marking->IsStopped());
}
}
#ifdef DEBUG
DCHECK(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
heap_->local_embedder_heap_tracer()->EnterFinalPause();
RootMarkingVisitor root_visitor(this);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
MarkRoots(&root_visitor, &custom_root_body_visitor);
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
MarkObjectsFromClientHeaps();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
}
DrainMarkingWorklist();
FinishConcurrentMarking();
DrainMarkingWorklist();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(local_marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
// are unmarked. Mark objects reachable due to embedder heap tracing or
// harmony weak maps.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(local_marking_worklists()->IsEmpty());
}
// The objects reachable from the roots, weak maps, and embedder heap
// tracing are marked. Objects pointed to only by weak global handles cannot
// be immediately reclaimed. Instead, we have to mark them as pending and
// mark objects reachable from them.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
&IsUnmarkedHeapObject);
DrainMarkingWorklist();
}
// Process finalizers, effectively keeping them alive until the next
// garbage collection.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
&root_visitor);
DrainMarkingWorklist();
}
// Repeat ephemeron processing from the newly marked objects.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
// We depend on IterateWeakRootsForPhantomHandles being called before
// ProcessOldCodeCandidates in order to identify flushed bytecode in the
// CPU profiler.
{
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
&IsUnmarkedHeapObject);
}
}
if (was_marked_incrementally_) {
MarkingBarrier::DeactivateAll(heap());
GlobalHandles::DisableMarkingBarrier(heap()->isolate());
}
epoch_++;
}
void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
if (isolate()->OwnsStringTable()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
StringTable* string_table = isolate()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap());
string_table->DropOldData();
string_table->IterateElements(&internalized_visitor);
string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
}
ExternalStringTableCleaner external_visitor(heap());
heap()->external_string_table_.IterateAll(&external_visitor);
heap()->external_string_table_.CleanUpAll();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
// so that we flush any bytecode if needed so we could correctly set the
// code object on the JSFunction.
ProcessOldCodeCandidates();
ProcessFlushedBaselineCandidates();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
ClearFlushedJsFunctions();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer(
non_atomic_marking_state());
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
// ClearFullMapTransitions must be called before weak references are
// cleared.
ClearFullMapTransitions();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
ClearWeakReferences();
ClearWeakCollections();
ClearJSWeakRefs();
}
PROFILE(heap()->isolate(), WeakCodeClearEvent());
MarkDependentCodeForDeoptimization();
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
isolate()->external_pointer_table().Sweep(isolate());
}
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
std::pair<HeapObject, Code> weak_object_in_code;
while (local_weak_objects()->weak_objects_in_code_local.Pop(
&weak_object_in_code)) {
HeapObject object = weak_object_in_code.first;
Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
!code.embedded_objects_cleared()) {
if (!code.marked_for_deoptimization()) {
code.SetMarkedForDeoptimization("weak objects");
have_code_to_deoptimize_ = true;
}
code.ClearEmbeddedObjects(heap_);
DCHECK(code.embedded_objects_cleared());
}
}
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
Object potential_parent = dead_target.constructor_or_back_pointer();
if (potential_parent.IsMap()) {
Map parent = Map::cast(potential_parent);
DisallowGarbageCollection no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(isolate(), parent)
.HasSimpleTransitionTo(dead_target)) {
ClearPotentialSimpleMapTransition(parent, dead_target);
}
}
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
Map dead_target) {
DCHECK(!map.is_prototype_map());
DCHECK(!dead_target.is_prototype_map());
DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
DescriptorArray descriptors = map.instance_descriptors(isolate());
if (descriptors == dead_target.instance_descriptors(isolate()) &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
}
}
void MarkCompactCollector::FlushBytecodeFromSFI(
SharedFunctionInfo shared_info) {
DCHECK(shared_info.HasBytecodeArray());
// Retain objects required for uncompiled data.
String inferred_name = shared_info.inferred_name();
int start_position = shared_info.StartPosition();
int end_position = shared_info.EndPosition();
shared_info.DiscardCompiledMetadata(
isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
// The size of the bytecode array should always be larger than an
// UncompiledData object.
STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
UncompiledDataWithoutPreparseData::kSize);
// Replace bytecode array with an uncompiled data array.
HeapObject compiled_data = shared_info.GetBytecodeArray(isolate());
Address compiled_data_start = compiled_data.address();
int compiled_data_size = compiled_data.Size();
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
compiled_data.set_map_after_allocation(
ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
SKIP_WRITE_BARRIER);
// Create a filler object for any left over space in the bytecode array.
if (!heap()->IsLargeObject(compiled_data)) {
heap()->CreateFillerObjectAt(
compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
}
// Initialize the uncompiled data.
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
uncompiled_data.InitAfterBytecodeFlush(
inferred_name, start_position, end_position,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
// Mark the uncompiled data as black, and ensure all fields have already been
// marked.
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
shared_info.set_function_data(uncompiled_data, kReleaseStore);
DCHECK(!shared_info.is_compiled());
}
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
while (local_weak_objects()->code_flushing_candidates_local.Pop(
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
CodeT baseline_codet =
CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
// Safe to do a relaxed load here since the CodeT was acquire-loaded.
Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
// checks. In future if this changes we need to update these checks to
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
// Regardless of whether the CodeT is a CodeDataContainer or the Code
// itself, if the Code is live then the CodeT has to be live and will
// have been marked via the owning JSFunction.
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
// the function_data field to the BytecodeArray/InterpreterData.
flushing_candidate.set_function_data(
baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
}
}
if (!is_bytecode_live) {
// If baseline code flushing is disabled we should only flush bytecode
// from functions that don't have baseline data.
DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
FlushBytecodeFromSFI(flushing_candidate);
}
// Now record the slot, which has either been updated to an uncompiled data,
// Baseline code or BytecodeArray which is still alive.
ObjectSlot slot =
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
}
}
void MarkCompactCollector::ClearFlushedJsFunctions() {
DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
JSFunction flushed_js_function;
while (local_weak_objects()->flushed_js_functions_local.Pop(
&flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
}
}
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_baseline_code ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
&flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
// Record the code slot that has been updated either to CompileLazy,
// InterpreterEntryTrampoline or baseline code.
ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
}
}
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
int num_transitions = array.number_of_entries();
if (num_transitions > 0) {
Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
Object constructor_or_back_pointer = map.constructor_or_back_pointer();
if (constructor_or_back_pointer.IsSmi()) {
DCHECK(isolate()->has_active_deserializer());
DCHECK_EQ(constructor_or_back_pointer,
Smi::uninitialized_deserialization_value());
continue;
}
Map parent = Map::cast(map.constructor_or_back_pointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
parent_is_alive ? parent.instance_descriptors(isolate())
: DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
TrimDescriptorArray(parent, descriptors);
}
}
}
}
}
// Returns false if no maps have died, or if the transition array is
// still being deserialized.
bool MarkCompactCollector::TransitionArrayNeedsCompaction(
TransitionArray transitions, int num_transitions) {
for (int i = 0; i < num_transitions; ++i) {
MaybeObject raw_target = transitions.GetRawTarget(i);
if (raw_target.IsSmi()) {
// This target is still being deserialized,
DCHECK(isolate()->has_active_deserializer());
DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
for (int j = 0; j < num_transitions; ++j) {
DCHECK_IMPLIES(
!transitions.GetRawTarget(j).IsSmi(),
!non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
}
#endif
return false;
} else if (non_atomic_marking_state()->IsWhite(
TransitionsAccessor::GetTargetFromRaw(raw_target))) {
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
for (int j = 0; j < num_transitions; ++j) {
DCHECK(!transitions.GetRawTarget(j).IsSmi());
}
#endif
return true;
}
}
return false;
}
bool MarkCompactCollector::CompactTransitionArray(Map map,
TransitionArray transitions,
DescriptorArray descriptors) {
DCHECK(!map.is_prototype_map());
int num_transitions = transitions.number_of_entries();
if (!TransitionArrayNeedsCompaction(transitions, num_transitions)) {
return false;
}
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
Map target = transitions.GetTarget(i);
DCHECK_EQ(target.constructor_or_back_pointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
target.instance_descriptors(isolate()) == descriptors) {
DCHECK(!target.is_prototype_map());