blob: 81821110a742b8548b95e569f46cdf33a42c0f26 [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/mark-compact.h"
#include <unordered_map>
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/tasks/cancelable-task.h"
#include "src/utils/utils-inl.h"
namespace v8 {
namespace internal {
const char* Marking::kWhiteBitPattern = "00";
const char* Marking::kBlackBitPattern = "11";
const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
// =============================================================================
// Verifiers
// =============================================================================
#ifdef VERIFY_HEAP
namespace {
class MarkingVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
protected:
explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
virtual bool IsMarked(HeapObject object) = 0;
virtual bool IsBlackOrGrey(HeapObject object) = 0;
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
}
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
};
void MarkingVerifier::VerifyRoots() {
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
HeapObject object;
Address next_object_must_be_here_or_later = start;
for (Address current = start; current < end;) {
object = HeapObject::FromAddress(current);
// One word fillers at the end of a black area can be grey.
if (IsBlackOrGrey(object) &&
object.map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object.Iterate(this);
next_object_must_be_here_or_later = current + object.Size();
// The object is either part of a black area of black allocation or a
// regular black object
CHECK(
bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
current = next_object_must_be_here_or_later;
} else {
current += kTaggedSize;
}
}
}
void MarkingVerifier::VerifyMarking(NewSpace* space) {
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
CHECK_EQ(space->first_allocatable_address(),
space->first_page()->area_start());
PageRange range(space->first_allocatable_address(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
VerifyMarkingOnPage(page, page->area_start(), limit);
}
}
void MarkingVerifier::VerifyMarking(PagedSpace* space) {
for (Page* p : *space) {
VerifyMarkingOnPage(p, p->area_start(), p->area_end());
}
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(this);
}
}
}
class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
: MarkingVerifier(heap),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
void Run() override {
VerifyRoots();
VerifyMarking(heap_->new_space());
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
VerifyMarking(heap_->lo_space());
VerifyMarking(heap_->code_lo_space());
}
protected:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) override {
return marking_state_->bitmap(chunk);
}
bool IsMarked(HeapObject object) override {
return marking_state_->IsBlack(object);
}
bool IsBlackOrGrey(HeapObject object) override {
return marking_state_->IsBlackOrGrey(object);
}
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
if (!host.IsWeakObject(rinfo->target_object())) {
HeapObject object = rinfo->target_object();
VerifyHeapObjectImpl(object);
}
}
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
CHECK(marking_state_->IsBlackOrGrey(heap_object));
}
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = *slot;
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
}
}
}
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
VerifyRootPointers(start, end);
}
protected:
explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
inline Heap* heap() { return heap_; }
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
void VerifyRoots();
void VerifyEvacuationOnPage(Address start, Address end);
void VerifyEvacuation(NewSpace* new_space);
void VerifyEvacuation(PagedSpace* paged_space);
Heap* heap_;
};
void EvacuationVerifier::VerifyRoots() {
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
if (!object.IsFreeSpaceOrFiller()) object.Iterate(this);
current += object.Size();
}
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
Address limit = it != range.end() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
VerifyEvacuationOnPage(current, limit);
}
}
void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
for (Page* p : *space) {
if (p->IsEvacuationCandidate()) continue;
if (p->Contains(space->top())) {
CodePageMemoryModificationScope memory_modification_scope(p);
heap_->CreateFillerObjectAt(
space->top(), static_cast<int>(space->limit() - space->top()),
ClearRecordedSlots::kNo);
}
VerifyEvacuationOnPage(p->area_start(), p->area_end());
}
}
class FullEvacuationVerifier : public EvacuationVerifier {
public:
explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
void Run() override {
VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
VerifyEvacuation(heap_->map_space());
}
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
Heap::InToPage(heap_object));
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
}
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
for (TSlot current = start; current < end; ++current) {
typename TSlot::TObject object = *current;
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
}
}
}
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
VerifyPointersImpl(start, end);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
VerifyHeapObjectImpl(rinfo->target_object());
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
}
};
} // namespace
#endif // VERIFY_HEAP
// =============================================================================
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// =============================================================================
namespace {
int NumberOfAvailableCores() {
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
DCHECK_GE(num_cores, 1);
DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
return num_cores;
}
} // namespace
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(),
pages / (MB / Page::kPageSize) + 1)
: 1;
if (!heap_->CanExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit.
tasks = 1;
}
return tasks;
}
int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
int slots) {
DCHECK_GT(pages, 0);
// Limit the number of update tasks as task creation often dominates the
// actual work that is being done.
const int kMaxPointerUpdateTasks = 8;
const int kSlotsPerTask = 600;
const int wanted_tasks =
(slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
return FLAG_parallel_pointer_update
? Min(kMaxPointerUpdateTasks,
Min(NumberOfAvailableCores(), wanted_tasks))
: 1;
}
int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
int pages) {
DCHECK_GT(pages, 0);
// No cap needed because all pages we need to process are fully filled with
// interesting objects.
return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
: 1;
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
#endif
was_marked_incrementally_(false),
evacuation_(false),
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
old_to_new_slots_ = -1;
}
MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
marking_worklists_holder()->Clear();
}
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
if (FLAG_trace_evacuation_candidates) {
PrintIsolate(
isolate(),
"Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
p->area_size() - p->allocated_bytes(), p->FreeListsLength());
}
p->MarkEvacuationCandidate();
evacuation_candidates_.push_back(p);
}
static void TraceFragmentation(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
intptr_t reserved = (number_of_pages * space->AreaSize());
intptr_t free = reserved - space->SizeOfObjects();
PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.empty());
if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
return false;
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
}
if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->map_space());
}
compacting_ = !evacuation_candidates_.empty();
}
return compacting_;
}
void MarkCompactCollector::StartMarking() {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
heap_->new_space()->ResetOriginalTop();
heap_->new_lo_space()->ResetPendingObject();
}
std::vector<Address> contexts =
heap()->memory_measurement()->StartProcessing();
if (FLAG_stress_per_context_marking_worklist) {
contexts.clear();
HandleScope handle_scope(heap()->isolate());
for (auto context : heap()->FindAllNativeContexts()) {
contexts.push_back(context->ptr());
}
}
marking_worklists_holder()->CreateContextWorklists(contexts);
marking_worklists_ = std::make_unique<MarkingWorklists>(
kMainThreadTask, marking_worklists_holder());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), marking_worklists(), weak_objects(), heap_, epoch(),
Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
#endif
}
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
#ifdef ENABLE_MINOR_MC
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
#endif // ENABLE_MINOR_MC
MarkLiveObjects();
ClearNonLiveReferences();
VerifyMarking();
heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats();
StartSweepSpaces();
Evacuate();
Finish();
}
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
ReadOnlyHeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
MemoryChunk::FromHeapObject(obj)));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
VerifyMarkbitsAreClean(heap_->lo_space());
VerifyMarkbitsAreClean(heap_->code_lo_space());
VerifyMarkbitsAreClean(heap_->new_lo_space());
}
#endif // VERIFY_HEAP
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper()->sweeping_in_progress()) return;
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
heap()->map_space()->SortFreeList();
heap()->tracer()->NotifySweepingCompleted();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
FullEvacuationVerifier verifier(heap());
verifier.Run();
}
#endif
}
void MarkCompactCollector::DrainSweepingWorklists() {
if (!sweeper()->sweeping_in_progress()) return;
sweeper()->DrainSweepingWorklists();
}
void MarkCompactCollector::DrainSweepingWorklistForSpace(
AllocationSpace space) {
if (!sweeper()->sweeping_in_progress()) return;
sweeper()->DrainSweepingWorklistForSpace(space);
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const size_t kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
} else if (heap()->ShouldOptimizeForMemoryUsage()) {
*target_fragmentation_percent =
kTargetFragmentationPercentForOptimizeMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
} else {
const double estimated_compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
if (estimated_compaction_speed != 0) {
// Estimate the target fragmentation based on traced compaction speed
// and a goal for a single page.
const double estimated_ms_per_area =
1 + area_size / estimated_compaction_speed;
*target_fragmentation_percent = static_cast<int>(
100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
if (*target_fragmentation_percent <
kTargetFragmentationPercentForReduceMemory) {
*target_fragmentation_percent =
kTargetFragmentationPercentForReduceMemory;
}
} else {
*target_fragmentation_percent = kTargetFragmentationPercent;
}
*max_evacuated_bytes = kMaxEvacuatedBytes;
}
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
const bool in_standard_path =
!(FLAG_manual_evacuation_candidates_selection ||
FLAG_stress_compaction_random || FLAG_stress_compaction ||
FLAG_always_compact);
// Those variables will only be initialized if |in_standard_path|, and are not
// used otherwise.
size_t max_evacuated_bytes;
int target_fragmentation_percent;
size_t free_bytes_threshold;
if (in_standard_path) {
// We use two conditions to decide whether a page qualifies as an evacuation
// candidate, or not:
// * Target fragmentation: How fragmented is a page, i.e., how is the ratio
// between live bytes and capacity of this page (= area).
// * Evacuation quota: A global quota determining how much bytes should be
// compacted.
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
&max_evacuated_bytes);
free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
}
// Pairs of (live_bytes_in_page, page).
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
DCHECK(!sweeping_in_progress());
Page* owner_of_linear_allocation_area =
space->top() == space->limit()
? nullptr
: Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
!p->CanAllocate())
continue;
if (p->IsPinned()) {
DCHECK(
!p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING));
continue;
}
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
// released.
CHECK(!p->IsEvacuationCandidate());
CHECK_NULL(p->slot_set<OLD_TO_OLD>());
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
if (in_standard_path) {
// Only the pages with at more than |free_bytes_threshold| free bytes are
// considered for evacuation.
if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
} else {
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
}
int candidate_count = 0;
size_t total_live_bytes = 0;
const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
candidate_count++;
total_live_bytes += pages[i].first;
p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
AddEvacuationCandidate(p);
}
}
} else if (FLAG_stress_compaction_random) {
double fraction = isolate()->fuzzer_rng()->NextDouble();
size_t pages_to_mark_count =
static_cast<size_t>(fraction * (pages.size() + 1));
for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
pages.size(), pages_to_mark_count)) {
candidate_count++;
total_live_bytes += pages[i].first;
AddEvacuationCandidate(pages[i].second);
}
} else if (FLAG_stress_compaction) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
if (i % 2 == 0) {
candidate_count++;
total_live_bytes += pages[i].first;
AddEvacuationCandidate(p);
}
}
} else {
// The following approach determines the pages that should be evacuated.
//
// Sort pages from the most free to the least free, then select
// the first n pages for evacuation such that:
// - the total size of evacuated objects does not exceed the specified
// limit.
// - fragmentation of (n+1)-th page does not exceed the specified limit.
std::sort(pages.begin(), pages.end(),
[](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
if (FLAG_always_compact ||
((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
}
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
"compaction-selection-page: space=%s free_bytes_page=%zu "
"fragmentation_limit_kb=%zu "
"fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
space->name(), (area_size - live_bytes) / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
// in the worst case: ceil(total_live_bytes / area_size)
int estimated_new_pages =
static_cast<int>((total_live_bytes + area_size - 1) / area_size);
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
if ((estimated_released_pages == 0) && !FLAG_always_compact) {
candidate_count = 0;
}
for (int i = 0; i < candidate_count; i++) {
AddEvacuationCandidate(pages[i].second);
}
}
if (FLAG_trace_fragmentation) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
"total_live_bytes=%zu\n",
space->name(), reduce_memory, candidate_count,
total_live_bytes / KB);
}
}
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
for (Page* p : evacuation_candidates_) {
p->ClearEvacuationCandidate();
}
compacting_ = false;
evacuation_candidates_.clear();
}
DCHECK(evacuation_candidates_.empty());
}
void MarkCompactCollector::Prepare() {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
#ifdef DEBUG
DCHECK(state_ == IDLE);
state_ = PREPARE_GC;
#endif
DCHECK(!FLAG_never_compact || !FLAG_always_compact);
// Instead of waiting we could also abort the sweeper threads here.
EnsureSweepingCompleted();
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
heap_->array_buffer_sweeper()->EnsureFinished();
}
if (heap()->incremental_marking()->IsSweeping()) {
heap()->incremental_marking()->Stop();
}
if (!was_marked_incrementally_) {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer());
}
if (!FLAG_never_compact) {
StartCompaction();
}
StartMarking();
}
PagedSpaceIterator spaces(heap());
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
if (FLAG_local_heaps) {
// Fill and reset all background thread LABs
heap_->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
heap()->account_external_memory_concurrently_freed();
}
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
}
}
void MarkCompactCollector::VerifyMarking() {
CHECK(marking_worklists()->IsEmpty());
DCHECK(heap_->incremental_marking()->IsStopped());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
FullMarkingVerifier verifier(heap());
verifier.Run();
}
#endif
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap()->old_space()->VerifyLiveBytes();
heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
}
#endif
}
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
SweepArrayBufferExtensions();
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
marking_visitor_.reset();
marking_worklists_.reset();
marking_worklists_holder_.ReleaseContextWorklists();
native_context_stats_.Clear();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
weak_objects_.next_ephemerons.Clear();
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
// The stub caches are not traversed during GC; clear them to force
// their lazy re-initialization. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
isolate()->load_stub_cache()->Clear();
isolate()->store_stub_cache()->Clear();
if (have_code_to_deoptimize_) {
// Some code objects were marked for deoptimization during the GC.
Deoptimizer::DeoptimizeMarkedCode(isolate());
have_code_to_deoptimize_ = false;
}
}
void MarkCompactCollector::SweepArrayBufferExtensions() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
heap_->array_buffer_sweeper()->RequestSweepFull();
}
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
}
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
}
MarkCompactCollector* const collector_;
};
// This visitor is used to visit the body of special objects held alive by
// other roots.
//
// It is currently used for
// - Code held alive by the top optimized frame. This code cannot be deoptimized
// and thus have to be kept alive in an isolate way, i.e., it should not keep
// alive other code objects reachable through the weak list but they should
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
: public ObjectVisitor {
public:
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
MarkObject(host, *p);
}
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
UNREACHABLE();
}
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkObject(host, target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
MarkObject(host, rinfo->target_object());
}
private:
V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object.IsHeapObject()) return;
collector_->MarkObject(host, HeapObject::cast(object));
}
MarkCompactCollector* const collector_;
};
class InternalizedStringTableCleaner : public ObjectVisitor {
public:
InternalizedStringTableCleaner(Heap* heap, HeapObject table)
: heap_(heap), pointers_removed_(0), table_(table) {}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
for (ObjectSlot p = start; p < end; ++p) {
Object o = *p;
if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
p.store(the_hole);
} else {
// StringTable contains only old space strings.
DCHECK(!Heap::InYoungGeneration(o));
MarkCompactCollector::RecordSlot(table_, p, heap_object);
}
}
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
UNREACHABLE();
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
int PointersRemoved() { return pointers_removed_; }
private:
Heap* heap_;
int pointers_removed_;
HeapObject table_;
};
class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
if (o.IsExternalString()) {
heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
DCHECK(o.IsThinString());
}
// Set the entry to the_hole_value (as deleted).
p.store(the_hole);
}
}
}
}
private:
Heap* heap_;
};
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit MarkCompactWeakObjectRetainer(
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: marking_state_(marking_state) {}
Object RetainAs(Object object) override {
HeapObject heap_object = HeapObject::cast(object);
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
return object;
} else if (object.IsAllocationSite() &&
!(AllocationSite::cast(object).IsZombie())) {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
Object nested = object;
while (nested.IsAllocationSite()) {
AllocationSite current_site = AllocationSite::cast(nested);
// MarkZombie will override the nested_site, read it first before
// marking
nested = current_site.nested_site();
current_site.MarkZombie();
marking_state_->WhiteToBlack(current_site);
}
return object;
} else {
return Object();
}
}
private:
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
class RecordMigratedSlotVisitor : public ObjectVisitor {
public:
explicit RecordMigratedSlotVisitor(
MarkCompactCollector* collector,
EphemeronRememberedSet* ephemeron_remembered_set)
: collector_(collector),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(*p));
RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
RecordMigratedSlot(host, *p, p.address());
}
inline void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot value) override {
DCHECK(host.IsEphemeronHashTable());
DCHECK(!Heap::InYoungGeneration(host));
VisitPointer(host, value);
if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
auto table = EphemeronHashTable::unchecked_cast(host);
auto insert_result =
ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
insert_result.first->second.insert(index);
} else {
VisitPointer(host, key);
}
}
inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InYoungGeneration(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = HeapObject::cast(rinfo->target_object());
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
// Entries that are skipped for recording.
inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitExternalReference(Foreign host, Address* p) final {}
inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
protected:
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
}
}
MarkCompactCollector* collector_;
EphemeronRememberedSet* ephemeron_remembered_set_;
};
class MigrationObserver {
public:
explicit MigrationObserver(Heap* heap) : heap_(heap) {}
virtual ~MigrationObserver() = default;
virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) = 0;
protected:
Heap* heap_;
};
class ProfilingMigrationObserver final : public MigrationObserver {
public:
explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
}
heap_->OnMoveEvent(dst, src, size);
}
};
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() = default;
virtual bool Visit(HeapObject object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
public:
void AddObserver(MigrationObserver* observer) {
migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
observers_.push_back(observer);
}
protected:
enum MigrationMode { kFast, kObserved };
using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size,
AllocationSpace dest);
template <MigrationMode mode>
static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst.address();
Address src_addr = src.address();
DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
Code::cast(dst).Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
src.set_map_word(MapWord::FromForwardingAddress(dst));
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
record_visitor_(record_visitor) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
int size, HeapObject* target_object) {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
AllocationResult allocation = local_allocator_->Allocate(
target_space, size, AllocationOrigin::kGC, alignment);
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
if (target_space == CODE_SPACE)
MemoryChunk::FromHeapObject(*target_object)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject((*target_object).address());
return true;
}
return false;
}
inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
HeapObject dst, int size) {
for (MigrationObserver* obs : observers_) {
obs->Move(dest, src, dst, size);
}
}
inline void MigrateObject(HeapObject dst, HeapObject src, int size,
AllocationSpace dest) {
migration_function_(this, dst, src, size, dest);
}
#ifdef VERIFY_HEAP
bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
kPageAlignmentMask & ~kObjectAlignmentMask;
if ((object.ptr() & kPageAlignmentMask) == mask) {
Page* page = Page::FromHeapObject(object);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
} else {
page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
return true;
}
}
}
return false;
}
#endif // VERIFY_HEAP
Heap* heap_;
EvacuationAllocator* local_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
};
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
bool always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
always_promote_young_(always_promote_young) {}
inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
if (always_promote_young_) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: young object promotion failed");
}
promoted_size_ += size;
return true;
}
if (heap_->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
return true;
}
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
return true;
}
intptr_t promoted_size() { return promoted_size_; }
intptr_t semispace_copied_size() { return semispace_copied_size_; }
private:
inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
Map map = object.map();
// Some objects can be evacuated without creating a copy.
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
object.set_map_word(MapWord::FromForwardingAddress(actual));
return true;
}
// TODO(mlippautz): Handle ConsString.
return false;
}
inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
HeapObject* target_object) {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object.map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation = local_allocator_->Allocate(
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
}
bool ok = allocation.To(target_object);
DCHECK(ok);
USE(ok);
return space_allocated_in;
}
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation = local_allocator_->Allocate(
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
}
return allocation;
}
LocalAllocationBuffer buffer_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
bool always_promote_young_;
};
template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap),
record_visitor_(record_visitor),
moved_bytes_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
static void Move(Page* page) {
switch (mode) {
case NEW_TO_NEW:
page->heap()->new_space()->MovePageFromSpaceToSpace(page);
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break;
case NEW_TO_OLD: {
page->heap()->new_space()->from_space().RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
break;
}
}
}
inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
object.IterateBodyFast(record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
}
return true;
}
intptr_t moved_bytes() { return moved_bytes_; }
void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
private:
Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress());
return true;
}
return false;
}
};
class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
object.IterateBodyFast(&visitor);
return true;
}
private:
Heap* heap_;
};
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
Object o = *p;
if (!o.IsHeapObject()) return false;
HeapObject heap_object = HeapObject::cast(o);
return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
heap_object);
}
void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
StringTable string_table = heap()->string_table();
// Mark the string table itself.
if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
string_table.IteratePrefix(custom_root_body_visitor);
if (marking_worklists()->IsPerContextMode()) {
native_context_stats_.IncrementSize(MarkingWorklists::kSharedContext,
string_table.map(), string_table,
string_table.Size());
}
}
}
void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateRoots(root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
// Custom marking for string table and top optimized frame.
MarkStringTable(custom_root_body_visitor);
ProcessTopOptimizedFrame(custom_root_body_visitor);
}
void MarkCompactCollector::VisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(), obj);
}
void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK(marking_state()->IsBlack(obj));
DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
MemoryChunk::HAS_PROGRESS_BAR),
0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(), obj);
}
void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
HeapObject host, DescriptorArray descriptors,
int number_of_own_descriptors) {
marking_visitor_->MarkDescriptorArrayFromWriteBarrier(
host, descriptors, number_of_own_descriptors);
}
void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
bool work_to_do = true;
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
while (work_to_do) {
PerformWrapperTracing();
if (iterations >= max_iterations) {
// Give up fixpoint iteration and switch to linear algorithm.
ProcessEphemeronsLinear();
break;
}
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_ephemeron_marked(false);
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
work_to_do = ProcessEphemerons();
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
work_to_do = work_to_do || !marking_worklists()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
!marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
CHECK(marking_worklists()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
bool MarkCompactCollector::ProcessEphemerons() {
Ephemeron ephemeron;
bool ephemeron_marked = false;
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
// Drain marking worklist and push discovered ephemerons into
// discovered_ephemerons.
DrainMarkingWorklist();
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
// Flush local ephemerons for main task to global pool.
weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
return ephemeron_marked;
}
void MarkCompactCollector::ProcessEphemeronsLinear() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
CHECK(heap()->concurrent_marking()->IsStopped());
std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
DCHECK(weak_objects_.current_ephemerons.IsEmpty());
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
ephemeron_marking_.newly_discovered_limit = key_to_values.size();
bool work_to_do = true;
while (work_to_do) {
PerformWrapperTracing();
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered_limit = key_to_values.size();
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
// Drain marking worklist and push all discovered objects into
// newly_discovered.
ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(0);
}
while (
weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
}
}
if (ephemeron_marking_.newly_discovered_overflowed) {
// If newly_discovered was overflowed just visit all ephemerons in
// next_ephemerons.
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
marking_worklists()->Push(ephemeron.value);
}
});
} else {
// This is the good case: newly_discovered stores all discovered
// objects. Now use key_to_values to see if discovered objects keep more
// objects alive due to ephemeron semantics.
for (HeapObject object : ephemeron_marking_.newly_discovered) {
auto range = key_to_values.equal_range(object);
for (auto it = range.first; it != range.second; ++it) {
HeapObject value = it->second;
MarkObject(object, value);
}
}
}
// Do NOT drain marking worklist here, otherwise the current checks
// for work_to_do are not sufficient for determining if another iteration
// is necessary.
work_to_do = !marking_worklists()->IsEmpty() ||
!marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
}
ResetNewlyDiscovered();
ephemeron_marking_.newly_discovered.shrink_to_fit();
CHECK(marking_worklists()->IsEmpty());
}
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
{
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
while (marking_worklists()->PopEmbedder(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
}
}
void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
bool is_per_context_mode = marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate();
while (marking_worklists()->Pop(&object) ||
marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
if (object.IsFreeSpaceOrFiller()) {
// Due to copying mark bits and the fact that grey and black have their
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(
object.map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlack(object));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(
object.map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(object));
continue;
}
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(!(marking_state()->IsWhite(object)));
if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
Map map = object.map(isolate);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
marking_worklists()->SwitchToContext(context);
}
}
size_t visited_size = marking_visitor_->Visit(map, object);
if (is_per_context_mode) {
native_context_stats_.IncrementSize(marking_worklists()->Context(), map,
object, visited_size);
}
bytes_processed += visited_size;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
break;
}
}
return bytes_processed;
}
// Generate definitions for use in other files.
template size_t MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
size_t bytes_to_process);
template size_t MarkCompactCollector::ProcessMarkingWorklist<
MarkCompactCollector::MarkingWorklistProcessingMode::
kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklists()->Push(value);
return true;
}
} else if (marking_state()->IsWhite(value)) {
weak_objects_.next_ephemerons.Push(kMainThreadTask, Ephemeron{key, value});
}
return false;
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(marking_worklists()->IsEmpty());
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
ProcessEphemeronsUntilFixpoint();
CHECK(marking_worklists()->IsEmpty());
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
if (it.frame()->type() == StackFrame::INTERPRETED) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
}
return;
}
}
}
void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_.get());
collector.Collect();
if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
std::stringstream live, dead;
heap()->live_object_stats_->Dump(live);
heap()->dead_object_stats_->Dump(dead);
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
"V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
"live", TRACE_STR_COPY(live.str().c_str()), "dead",
TRACE_STR_COPY(dead.str().c_str()));
}
if (FLAG_trace_gc_object_stats) {
heap()->live_object_stats_->PrintJSON("live");
heap()->dead_object_stats_->PrintJSON("dead");
}
heap()->live_object_stats_->CheckpointObjectStats();
heap()->dead_object_stats_->ClearObjectStats();
}
}
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
IncrementalMarking* incremental_marking = heap_->incremental_marking();
if (was_marked_incrementally_) {
incremental_marking->Finalize();
} else {
CHECK(incremental_marking->IsStopped());
}
}
#ifdef DEBUG
DCHECK(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
heap_->local_embedder_heap_tracer()->EnterFinalPause();
RootMarkingVisitor root_visitor(this);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
MarkRoots(&root_visitor, &custom_root_body_visitor);
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
DrainMarkingWorklist();
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
DrainMarkingWorklist();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
DCHECK(marking_worklists()->IsEmpty());
// Mark objects reachable through the embedder heap. This phase is
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
!marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
}
// The objects reachable from the roots are marked, yet unreachable objects
// are unmarked. Mark objects reachable due to embedder heap tracing or
// harmony weak maps.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmpty());
}
// The objects reachable from the roots, weak maps, and embedder heap
// tracing are marked. Objects pointed to only by weak global handles cannot
// be immediately reclaimed. Instead, we have to mark them as pending and
// mark objects reachable from them.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
&IsUnmarkedHeapObject);
DrainMarkingWorklist();
}
// Process finalizers, effectively keeping them alive until the next
// garbage collection.
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
&root_visitor);
DrainMarkingWorklist();
}
// Repeat ephemeron processing from the newly marked objects.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
DCHECK(marking_worklists()->IsEmbedderEmpty());
DCHECK(marking_worklists()->IsEmpty());
}
{
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
&IsUnmarkedHeapObject);
}
}
if (was_marked_incrementally_) {
heap()->marking_barrier()->Deactivate();
}
epoch_++;
}
void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
StringTable string_table = heap()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
string_table.IterateElements(&internalized_visitor);
string_table.ElementsRemoved(internalized_visitor.PointersRemoved());
ExternalStringTableCleaner external_visitor(heap());
heap()->external_string_table_.IterateAll(&external_visitor);
heap()->external_string_table_.CleanUpAll();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
ClearOldBytecodeCandidates();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
ClearFlushedJsFunctions();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer(
non_atomic_marking_state());
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
// ClearFullMapTransitions must be called before weak references are
// cleared.
ClearFullMapTransitions();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
ClearWeakReferences();
ClearWeakCollections();
ClearJSWeakRefs();
}
MarkDependentCodeForDeoptimization();
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
std::pair<HeapObject, Code> weak_object_in_code;
while (weak_objects_.weak_objects_in_code.Pop(kMainThreadTask,
&weak_object_in_code)) {
HeapObject object = weak_object_in_code.first;
Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
!code.embedded_objects_cleared()) {
if (!code.marked_for_deoptimization()) {
code.SetMarkedForDeoptimization("weak objects");
have_code_to_deoptimize_ = true;
}
code.ClearEmbeddedObjects(heap_);
DCHECK(code.embedded_objects_cleared());
}
}
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
Object potential_parent = dead_target.constructor_or_backpointer();
if (potential_parent.IsMap()) {
Map parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(isolate(), parent, &no_gc_obviously)
.HasSimpleTransitionTo(dead_target)) {
ClearPotentialSimpleMapTransition(parent, dead_target);
}
}
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
Map dead_target) {
DCHECK(!map.is_prototype_map());
DCHECK(!dead_target.is_prototype_map());
DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
DescriptorArray descriptors = map.instance_descriptors();
if (descriptors == dead_target.instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
}
}
void MarkCompactCollector::FlushBytecodeFromSFI(
SharedFunctionInfo shared_info) {
DCHECK(shared_info.HasBytecodeArray());
// Retain objects required for uncompiled data.
String inferred_name = shared_info.inferred_name();
int start_position = shared_info.StartPosition();
int end_position = shared_info.EndPosition();
shared_info.DiscardCompiledMetadata(
isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
// The size of the bytecode array should always be larger than an
// UncompiledData object.
STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
UncompiledDataWithoutPreparseData::kSize);
// Replace bytecode array with an uncompiled data array.
HeapObject compiled_data = shared_info.GetBytecodeArray();
Address compiled_data_start = compiled_data.address();
int compiled_data_size = compiled_data.Size();
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
compiled_data.set_map_after_allocation(
ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
SKIP_WRITE_BARRIER);
// Create a filler object for any left over space in the bytecode array.
if (!heap()->IsLargeObject(compiled_data)) {
heap()->CreateFillerObjectAt(
compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
}
// Initialize the uncompiled data.
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
uncompiled_data.InitAfterBytecodeFlush(
inferred_name, start_position, end_position,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
// Mark the uncompiled data as black, and ensure all fields have already been
// marked.
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
shared_info.set_function_data(uncompiled_data);
DCHECK(!shared_info.is_compiled());
}
void MarkCompactCollector::ClearOldBytecodeCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.bytecode_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
&flushing_candidate)) {
// If the BytecodeArray is dead, flush it, which will replace the field with
// an uncompiled data object.
if (!non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray())) {
FlushBytecodeFromSFI(flushing_candidate);
}
// Now record the slot, which has either been updated to an uncompiled data,
// or is the BytecodeArray which is still alive.
ObjectSlot slot =
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
}
}
void MarkCompactCollector::ClearFlushedJsFunctions() {
DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
JSFunction flushed_js_function;
while (weak_objects_.flushed_js_functions.Pop(kMainThreadTask,
&flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
}
}
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
while (weak_objects_.transition_arrays.Pop(kMainThreadTask, &array)) {
int num_transitions = array.number_of_entries();
if (num_transitions > 0) {
Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
Map parent = Map::cast(map.constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
parent_is_alive ? parent.instance_descriptors() : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
TrimDescriptorArray(parent, descriptors);
}
}
}
}
}
bool MarkCompactCollector::CompactTransitionArray(Map map,
TransitionArray transitions,
DescriptorArray descriptors) {
DCHECK(!map.is_prototype_map());
int num_transitions = transitions.number_of_entries();
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
Map target = transitions.GetTarget(i);
DCHECK_EQ(target.constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
target.instance_descriptors() == descriptors) {
DCHECK(!target.is_prototype_map());
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
Name key = transitions.GetKey(i);
transitions.SetKey(transition_index, key);
HeapObjectSlot key_slot = transitions.GetKeySlot(transition_index);
RecordSlot(transitions, key_slot, key);
MaybeObject raw_target = transitions.GetRawTarget(i);
transitions.SetRawTarget(transition_index, raw_target);
HeapObjectSlot target_slot =
transitions.GetTargetSlot(transition_index);
RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
}
transition_index++;
}
}
// If there are no transitions to be cleared, return.
if (transition_index == num_transitions) {
DCHECK(!descriptors_owner_died);
return false;
}
// Note that we never eliminate a transition array, though we might right-trim
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
int trim = transitions.Capacity() - transition_index;
if (trim > 0) {
heap_->RightTrimWeakFixedArray(transitions,
trim * TransitionArray::kEntrySize);
transitions.SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
}
void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
int descriptors_to_trim) {
int old_nof_all_descriptors = array.number_of_all_descriptors();
int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
DCHECK_LT(0, descriptors_to_trim);
DCHECK_LE(0, new_nof_all_descriptors);
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
ClearRecordedSlots::kNo);
array.set_number_of_all_descriptors(new_nof_all_descriptors);
}
void MarkCompactCollector::TrimDescriptorArray(Map map,
DescriptorArray descriptors) {
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
// TODO(ulan): Trim only if slack is greater than some percentage threshold.
int to_trim =
descriptors.number_of_all_descriptors() - number_of_own_descriptors;
if (to_trim > 0) {
descriptors.set_number_of_descriptors(number_of_own_descriptors);
RightTrimDescriptorArray(descriptors, to_trim);
TrimEnumCache(map, descriptors);
descriptors.Sort();
if (FLAG_unbox_double_fields) {
LayoutDescriptor layout_descriptor = map.layout_descriptor();
layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
}
}
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
map.set_owns_descriptors(true);
}
void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
int live_enum = map.EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map.NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors.ClearEnumCache();
EnumCache enum_cache = descriptors.enum_cache();
FixedArray keys = enum_cache.keys();
int to_trim = keys.length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(keys, to_trim);
FixedArray indices = enum_cache.indices();
to_trim = indices.length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(indices, to_trim);
}
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
EphemeronHashTable table;
while (weak_objects_.ephemeron_hash_tables.Pop(kMainThreadTask, &table)) {
for (InternalIndex i : table.IterateEntries()) {
HeapObject key = HeapObject::cast(table.KeyAt(i));
#ifdef VERIFY_HEAP
Object value = table.ValueAt(i);
if (value.IsHeapObject()) {
CHECK_IMPLIES(
non_atomic_marking_state()->IsBlackOrGrey(key),
non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
}
#endif
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
table.RemoveEntry(i);
}
}
}
for (auto it = heap_->ephemeron_remembered_set_.begin();
it != heap_->ephemeron_remembered_set_.end();) {
if (!non_atomic_marking_state()->IsBlackOrGrey(it->first)) {
it = heap_->ephemeron_remembered_set_.erase(it);
} else {
++it;
}
}
}
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
std::pair<HeapObject, HeapObjectSlot> slot;
HeapObjectReference cleared_weak_ref =
HeapObjectReference::ClearedValue(isolate());
while (weak_objects_.weak_references.Pop(kMainThreadTask, &slot)) {
HeapObject value;
// The slot could have been overwritten, so we have to treat it
// as MaybeObjectSlot.
MaybeObjectSlot location(slot.second);
if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value.IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
RecordSlot(slot.first, HeapObjectSlot(location), value);
} else {
if (value.IsMap()) {
// The map is non-live.
ClearPotentialSimpleMapTransition(Map::cast(value));
}
location.store(cleared_weak_ref);
}
}
}
}
void MarkCompactCollector::ClearJSWeakRefs() {
if (!FLAG_harmony_weak_refs) {
return;
}
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
} else {
// The value of the JSWeakRef is alive.
ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
RecordSlot(weak_ref, slot, target);
}
}
WeakCell weak_cell;
while (weak_objects_.weak_cells.Pop(kMainThreadTask, &weak_cell)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
if (target.IsHeapObject()) {
RecordSlot(object, slot, HeapObject::cast(target));
}
};
HeapObject target = HeapObject::cast(weak_cell.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
DCHECK(!target.IsUndefined());
// The value of the WeakCell is dead.
JSFinalizationRegistry finalization_registry =
JSFinalizationRegistry::cast(weak_cell.finalization_registry());
if (!finalization_registry.scheduled_for_cleanup()) {
heap()->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
gc_notify_updated_slot);
}
// We're modifying the pointers in WeakCell and JSFinalizationRegistry
// during GC; thus we need to record the slots it writes. The normal write
// barrier is not enough, since it's disabled before GC.
weak_cell.Nullify(isolate(), gc_notify_updated_slot);
DCHECK(finalization_registry.NeedsCleanup());
DCHECK(finalization_registry.scheduled_for_cleanup());
} else {
// The value of the WeakCell is alive.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
HeapObject unregister_token =
HeapObject::cast(weak_cell.unregister_token());
if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
// The unregister token is dead. Remove any corresponding entries in the
// key map. Multiple WeakCell with the same token will have all their
// unregister_token field set to undefined when processing the first
// WeakCell. Like above, we're modifying pointers during GC, so record the
// slots.
HeapObject undefined = ReadOnlyRoots(isolate()).undefined_value();
JSFinalizationRegistry finalization_registry =
JSFinalizationRegistry::cast(weak_cell.finalization_registry());
finalization_registry.RemoveUnregisterToken(
JSReceiver::cast(unregister_token), isolate(),
[undefined](WeakCell matched_cell) {
matched_cell.set_unregister_token(undefined);
},
gc_notify_updated_slot);
// The following is necessary because in the case that weak_cell has
// already been popped and removed from the FinalizationRegistry, the call
// to JSFinalizationRegistry::RemoveUnregisterToken above will not find
// weak_cell itself to clear its unregister token.
weak_cell.set_unregister_token(undefined);
} else {
// The unregister_token is alive.
ObjectSlot slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
}
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.transition_arrays.Clear();
weak_objects_