blob: a1d4680b02e9d22a3804f88b4249b10986044ade [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/heap.h"
#include <atomic>
#include <cinttypes>
#include <iomanip>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/accessors.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/embedder-state.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/base/stack.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-range.h"
#include "src/heap/code-stats.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/finalization-registry-cleanup-task.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap-layout-tracer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-barrier-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
#include "src/init/bootstrapper.h"
#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/numbers/conversions.h"
#include "src/objects/data-handler.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/strings/string-stream.h"
#include "src/strings/unicode-decoder.h"
#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
#include "src/utils/utils-inl.h"
#include "src/utils/utils.h"
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
#include "src/heap/conservative-stack-visitor.h"
#endif
#include "src/base/platform/wrappers.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
return reinterpret_cast<Isolate*>(
third_party_heap::Heap::GetIsolate(object.address()));
}
#endif
// These are outside the Heap class so they can be forward-declared
// in heap-write-barrier-inl.h.
bool Heap_PageFlagsAreConsistent(HeapObject object) {
return Heap::PageFlagsAreConsistent(object);
}
bool Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value) {
if (!value.IsCode()) return true;
// Code objects are never in new space and thus don't require generational
// write barrier.
DCHECK(!ObjectInYoungGeneration(value));
return false;
}
void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
Heap::GenerationalBarrierSlow(object, slot, value);
}
void Heap_WriteBarrierForCodeSlow(Code host) {
Heap::WriteBarrierForCodeSlow(host);
}
void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
}
void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
EphemeronHashTable table,
Address slot) {
heap->RecordEphemeronKeyWrite(table, slot);
}
void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::zero(), construct_stub_invoke_deopt_pc_offset());
set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
DCHECK_EQ(Smi::zero(), interpreter_entry_return_pc_offset());
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetSerializedObjects(FixedArray objects) {
DCHECK(isolate()->serializer_enabled());
set_serialized_objects(objects);
}
void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
DCHECK(isolate()->serializer_enabled());
set_serialized_global_proxy_sizes(sizes);
}
void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
set_basic_block_profiling_data(*list);
}
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
}
class ScavengeTaskObserver : public AllocationObserver {
public:
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_->ScheduleScavengeTaskIfNeeded();
}
private:
Heap* heap_;
};
Heap::Heap()
: isolate_(isolate()),
heap_allocator_(this),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(std::make_unique<IsolateSafepoint>(this)),
external_string_table_(this),
allocation_type_for_in_place_internalizable_strings_(
isolate()->OwnsStringTables() ? AllocationType::kOld
: AllocationType::kSharedOld),
collection_barrier_(new CollectionBarrier(this)) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
max_regular_code_object_size_ = MemoryChunkLayout::MaxRegularCodeObjectSize();
set_native_contexts_list(Smi::zero());
set_allocation_sites_list(Smi::zero());
set_dirty_js_finalization_registries_list(Smi::zero());
set_dirty_js_finalization_registries_list_tail(Smi::zero());
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(kNullAddress, false);
}
Heap::~Heap() = default;
size_t Heap::MaxReserved() {
const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
return static_cast<size_t>(2 * max_semi_space_size_ +
kMaxNewLargeObjectSpaceSize +
max_old_generation_size());
}
size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
// Compute the semi space size and cap it.
size_t ratio = old_generation <= kOldGenerationLowMemory
? kOldGenerationToSemiSpaceRatioLowMemory
: kOldGenerationToSemiSpaceRatio;
size_t semi_space = old_generation / ratio;
semi_space = std::min({semi_space, kMaxSemiSpaceSize});
semi_space = std::max({semi_space, kMinSemiSpaceSize});
semi_space = RoundUp(semi_space, Page::kPageSize);
return YoungGenerationSizeFromSemiSpaceSize(semi_space);
}
size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
// Compute the old generation size and cap it.
uint64_t old_generation = physical_memory /
kPhysicalMemoryToOldGenerationRatio *
kHeapLimitMultiplier;
old_generation =
std::min(old_generation,
static_cast<uint64_t>(MaxOldGenerationSize(physical_memory)));
old_generation =
std::max({old_generation, static_cast<uint64_t>(V8HeapTrait::kMinSize)});
old_generation = RoundUp(old_generation, Page::kPageSize);
size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
static_cast<size_t>(old_generation));
return static_cast<size_t>(old_generation) + young_generation;
}
void Heap::GenerationSizesFromHeapSize(size_t heap_size,
size_t* young_generation_size,
size_t* old_generation_size) {
// Initialize values for the case when the given heap size is too small.
*young_generation_size = 0;
*old_generation_size = 0;
// Binary search for the largest old generation size that fits to the given
// heap limit considering the correspondingly sized young generation.
size_t lower = 0, upper = heap_size;
while (lower + 1 < upper) {
size_t old_generation = lower + (upper - lower) / 2;
size_t young_generation =
YoungGenerationSizeFromOldGenerationSize(old_generation);
if (old_generation + young_generation <= heap_size) {
// This size configuration fits into the given heap limit.
*young_generation_size = young_generation;
*old_generation_size = old_generation;
lower = old_generation;
} else {
upper = old_generation;
}
}
}
size_t Heap::MinYoungGenerationSize() {
return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
}
size_t Heap::MinOldGenerationSize() {
size_t paged_space_count =
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
return paged_space_count * Page::kPageSize;
}
size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
#ifdef V8_COMPRESS_POINTERS
// Isolate and the young generation are also allocated on the heap.
return kPtrComprCageReservationSize -
YoungGenerationSizeFromSemiSpaceSize(kMaxSemiSpaceSize) -
RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
#else
return std::numeric_limits<size_t>::max();
#endif
}
size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
size_t max_size = V8HeapTrait::kMaxSize;
// Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
// systems with physical memory bigger than 16GB. The physical memory
// is rounded up to GB.
constexpr bool x64_bit = Heap::kHeapLimitMultiplier >= 2;
if (FLAG_huge_max_old_generation_size && x64_bit &&
(physical_memory + 512 * MB) / GB >= 16) {
DCHECK_EQ(max_size / GB, 2);
max_size *= 2;
}
return std::min(max_size, AllocatorLimitOnMaxOldGenerationSize());
}
size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
}
size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
size_t young_generation_size) {
return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
}
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
if (FLAG_enable_third_party_heap) return tp_heap_->Capacity();
return NewSpaceCapacity() + OldGenerationCapacity();
}
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
PagedSpaceIterator spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
total += space->Capacity();
}
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
PagedSpaceIterator spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
total += space->CommittedMemory();
}
return total + lo_space_->Size() + code_lo_space_->Size();
}
size_t Heap::CommittedMemoryOfUnmapper() {
if (!HasBeenSetUp()) return 0;
return memory_allocator()->unmapper()->CommittedBufferedMemory();
}
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
return new_space_committed + new_lo_space_committed +
CommittedOldGenerationMemory();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
for (SpaceIterator it(this); it.HasNext();) {
total += it.Next()->CommittedPhysicalMemory();
}
return total;
}
size_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
return static_cast<size_t>(memory_allocator()->SizeExecutable());
}
void Heap::UpdateMaximumCommitted() {
if (!HasBeenSetUp()) return;
const size_t current_committed_memory = CommittedMemory();
if (current_committed_memory > maximum_committed_) {
maximum_committed_ = current_committed_memory;
}
}
size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
for (SpaceIterator it(this); it.HasNext();) {
total += it.Next()->Available();
}
total += memory_allocator()->Available();
return total;
}
bool Heap::CanExpandOldGeneration(size_t size) {
if (force_oom_ || force_gc_on_next_allocation_) return false;
if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
// the total allocated memory size over the maximum heap size.
return memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
size_t size) {
if (force_oom_) return false;
// When the heap is tearing down, then GC requests from background threads
// are not served and the threads are allowed to expand the heap to avoid OOM.
return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
size_t new_space_capacity = NewSpaceCapacity();
size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
// Over-estimate the new space size using capacity to allow some slack.
return CanExpandOldGeneration(size + new_space_capacity +
new_lo_space_capacity);
}
bool Heap::HasBeenSetUp() const {
// We will always have an old space when the heap is set up.
return old_space_ != nullptr;
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
if (space != NEW_SPACE && space != NEW_LO_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return GarbageCollector::MARK_COMPACTOR;
}
if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
*reason = "GC in old space forced by flags";
return GarbageCollector::MARK_COMPACTOR;
}
if (incremental_marking()->NeedsFinalization() &&
AllocationLimitOvershotByLargeMargin()) {
*reason = "Incremental marking needs finalization";
return GarbageCollector::MARK_COMPACTOR;
}
if (FLAG_separate_gc_phases && incremental_marking()->IsMarking()) {
// TODO(v8:12503): Remove previous condition when flag gets removed.
*reason = "Incremental marking forced finalization";
return GarbageCollector::MARK_COMPACTOR;
}
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "scavenge might not succeed";
return GarbageCollector::MARK_COMPACTOR;
}
DCHECK(!FLAG_single_generation);
DCHECK(!FLAG_gc_global);
// Default
*reason = nullptr;
return YoungGenerationCollector();
}
void Heap::SetGCState(HeapState state) {
gc_state_.store(state, std::memory_order_relaxed);
}
bool Heap::IsGCWithoutStack() const {
return local_embedder_heap_tracer()->embedder_stack_state() ==
cppgc::EmbedderStackState::kNoHeapPointers;
}
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_,
"Memory allocator, used: %6zu KB,"
" available: %6zu KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
PrintIsolate(isolate_,
"Read-only space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
read_only_space_->Size() / KB, size_t{0},
read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
NewSpaceSize() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New large object space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
new_lo_space_->SizeOfObjects() / KB,
new_lo_space_->Available() / KB,
new_lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"Old space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"Code space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
if (map_space()) {
PrintIsolate(isolate_,
"Map space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
}
PrintIsolate(isolate_,
"Large object space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"Code large object space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
code_lo_space_->SizeOfObjects() / KB,
code_lo_space_->Available() / KB,
code_lo_space_->CommittedMemory() / KB);
ReadOnlySpace* const ro_space = read_only_space_;
PrintIsolate(isolate_,
"All spaces, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
(this->SizeOfObjects() + ro_space->Size()) / KB,
(this->Available()) / KB,
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_,
"Unmapper buffering %zu chunks of committed: %6zu KB\n",
memory_allocator()->unmapper()->NumberOfCommittedChunks(),
CommittedMemoryOfUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_.total() / KB);
PrintIsolate(isolate_, "Backing store memory: %6" PRIu64 " KB\n",
backing_store_bytes() / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
external_memory_callback_() / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
void Heap::PrintFreeListsStats() {
DCHECK(FLAG_trace_gc_freelists);
if (FLAG_trace_gc_freelists_verbose) {
PrintIsolate(isolate_,
"Freelists statistics per Page: "
"[category: length || total free bytes]\n");
}
std::vector<int> categories_lengths(
old_space()->free_list()->number_of_categories(), 0);
std::vector<size_t> categories_sums(
old_space()->free_list()->number_of_categories(), 0);
unsigned int pageCnt = 0;
// This loops computes freelists lengths and sum.
// If FLAG_trace_gc_freelists_verbose is enabled, it also prints
// the stats of each FreeListCategory of each Page.
for (Page* page : *old_space()) {
std::ostringstream out_str;
if (FLAG_trace_gc_freelists_verbose) {
out_str << "Page " << std::setw(4) << pageCnt;
}
for (int cat = kFirstCategory;
cat <= old_space()->free_list()->last_category(); cat++) {
FreeListCategory* free_list =
page->free_list_category(static_cast<FreeListCategoryType>(cat));
int length = free_list->FreeListLength();
size_t sum = free_list->SumFreeList();
if (FLAG_trace_gc_freelists_verbose) {
out_str << "[" << cat << ": " << std::setw(4) << length << " || "
<< std::setw(6) << sum << " ]"
<< (cat == old_space()->free_list()->last_category() ? "\n"
: ", ");
}
categories_lengths[cat] += length;
categories_sums[cat] += sum;
}
if (FLAG_trace_gc_freelists_verbose) {
PrintIsolate(isolate_, "%s", out_str.str().c_str());
}
pageCnt++;
}
// Print statistics about old_space (pages, free/wasted/used memory...).
PrintIsolate(
isolate_,
"%d pages. Free space: %.1f MB (waste: %.2f). "
"Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
pageCnt, static_cast<double>(old_space_->Available()) / MB,
static_cast<double>(old_space_->Waste()) / MB,
static_cast<double>(old_space_->Size()) / MB,
static_cast<double>(old_space_->Capacity()) / MB,
static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
// Print global statistics of each FreeListCategory (length & sum).
PrintIsolate(isolate_,
"FreeLists global statistics: "
"[category: length || total free KB]\n");
std::ostringstream out_str;
for (int cat = kFirstCategory;
cat <= old_space()->free_list()->last_category(); cat++) {
out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
<< std::fixed << std::setprecision(2)
<< static_cast<double>(categories_sums[cat]) / KB << " KB]"
<< (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
}
PrintIsolate(isolate_, "%s", out_str.str().c_str());
}
void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
HeapStatistics stats;
reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
// clang-format off
#define DICT(s) "{" << s << "}"
#define LIST(s) "[" << s << "]"
#define QUOTE(s) "\"" << s << "\""
#define MEMBER(s) QUOTE(s) << ":"
auto SpaceStatistics = [this](int space_index) {
HeapSpaceStatistics space_stats;
reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
&space_stats, space_index);
std::stringstream stream;
stream << DICT(
MEMBER("name")
<< QUOTE(BaseSpace::GetSpaceName(
static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
MEMBER("used_size") << space_stats.space_used_size() << ","
MEMBER("available_size") << space_stats.space_available_size() << ","
MEMBER("physical_size") << space_stats.physical_space_size());
return stream.str();
};
stream << DICT(
MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
MEMBER("id") << gc_count() << ","
MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
MEMBER("total_heap_size") << stats.total_heap_size() << ","
MEMBER("total_heap_size_executable")
<< stats.total_heap_size_executable() << ","
MEMBER("total_physical_size") << stats.total_physical_size() << ","
MEMBER("total_available_size") << stats.total_available_size() << ","
MEMBER("used_heap_size") << stats.used_heap_size() << ","
MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
MEMBER("malloced_memory") << stats.malloced_memory() << ","
MEMBER("external_memory") << stats.external_memory() << ","
MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
MEMBER("spaces") << LIST(
SpaceStatistics(RO_SPACE) << "," <<
SpaceStatistics(NEW_SPACE) << "," <<
SpaceStatistics(OLD_SPACE) << "," <<
SpaceStatistics(CODE_SPACE) << "," <<
SpaceStatistics(MAP_SPACE) << "," <<
SpaceStatistics(LO_SPACE) << "," <<
SpaceStatistics(CODE_LO_SPACE) << "," <<
SpaceStatistics(NEW_LO_SPACE)));
#undef DICT
#undef LIST
#undef QUOTE
#undef MEMBER
// clang-format on
}
void Heap::ReportStatisticsAfterGC() {
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i),
deferred_counters_[i]);
deferred_counters_[i] = 0;
}
}
class Heap::AllocationTrackerForDebugging final
: public HeapObjectAllocationTracker {
public:
static bool IsNeeded() {
return FLAG_verify_predictable || FLAG_fuzzer_gc_analysis ||
(FLAG_trace_allocation_stack_interval > 0);
}
explicit AllocationTrackerForDebugging(Heap* heap) : heap_(heap) {
CHECK(IsNeeded());
heap_->AddHeapObjectAllocationTracker(this);
}
~AllocationTrackerForDebugging() final {
heap_->RemoveHeapObjectAllocationTracker(this);
if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}
}
void AllocationEvent(Address addr, int size) final {
if (FLAG_verify_predictable) {
allocations_count_.fetch_add(1, std::memory_order_relaxed);
// Advance synthetic time by making a time request.
heap_->MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(HeapObject::FromAddress(addr));
UpdateAllocationsHash(size);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
allocations_count_.fetch_add(1, std::memory_order_relaxed);
} else if (FLAG_trace_allocation_stack_interval > 0) {
allocations_count_.fetch_add(1, std::memory_order_relaxed);
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
heap_->isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
}
}
void MoveEvent(Address source, Address target, int size) final {
if (FLAG_verify_predictable) {
allocations_count_.fetch_add(1, std::memory_order_relaxed);
// Advance synthetic time by making a time request.
heap_->MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(HeapObject::FromAddress(source));
UpdateAllocationsHash(HeapObject::FromAddress(target));
UpdateAllocationsHash(size);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
allocations_count_.fetch_add(1, std::memory_order_relaxed);
}
}
void UpdateObjectSizeEvent(Address, int) final {}
private:
void UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
static_cast<uint32_t>(object_address - memory_chunk->address()) |
(static_cast<uint32_t>(allocation_space) << kPageSizeBits);
UpdateAllocationsHash(value);
}
void UpdateAllocationsHash(uint32_t value) {
const uint16_t c1 = static_cast<uint16_t>(value);
const uint16_t c2 = static_cast<uint16_t>(value >> 16);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
void PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %zu, hash = 0x%08x\n",
allocations_count_.load(std::memory_order_relaxed), hash);
}
Heap* const heap_;
// Count of all allocations performed through C++ bottlenecks. This needs to
// be atomic as objects are moved in parallel in the GC which counts as
// allocations.
std::atomic<size_t> allocations_count_{0};
// Running hash over allocations performed.
uint32_t raw_allocations_hash_ = 0;
};
void Heap::AddHeapObjectAllocationTracker(
HeapObjectAllocationTracker* tracker) {
if (allocation_trackers_.empty() && FLAG_inline_new) {
DisableInlineAllocation();
}
allocation_trackers_.push_back(tracker);
}
void Heap::RemoveHeapObjectAllocationTracker(
HeapObjectAllocationTracker* tracker) {
allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
allocation_trackers_.end(), tracker),
allocation_trackers_.end());
if (allocation_trackers_.empty() && FLAG_inline_new) {
EnableInlineAllocation();
}
}
void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
RetainingPathOption option) {
if (!FLAG_track_retaining_path) {
PrintF("Retaining path tracking requires --track-retaining-path\n");
} else {
Handle<WeakArrayList> array(retaining_path_targets(), isolate());
int index = array->length();
array = WeakArrayList::AddToEnd(isolate(), array,
MaybeObjectHandle::Weak(object));
set_retaining_path_targets(*array);
DCHECK_EQ(array->length(), index + 1);
retaining_path_target_option_[index] = option;
}
}
bool Heap::IsRetainingPathTarget(HeapObject object,
RetainingPathOption* option) {
WeakArrayList targets = retaining_path_targets();
int length = targets.length();
MaybeObject object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
MaybeObject target = targets.Get(i);
DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
*option = retaining_path_target_option_[i];
return true;
}
}
return false;
}
void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr()));
HeapObject object = target;
std::vector<std::pair<HeapObject, bool>> retaining_path;
Root root = Root::kUnknown;
bool ephemeron = false;
while (true) {
retaining_path.push_back(std::make_pair(object, ephemeron));
if (option == RetainingPathOption::kTrackEphemeronPath &&
ephemeron_retainer_.count(object)) {
object = ephemeron_retainer_[object];
ephemeron = true;
} else if (retainer_.count(object)) {
object = retainer_[object];
ephemeron = false;
} else {
if (retaining_root_.count(object)) {
root = retaining_root_[object];
}
break;
}
}
int distance = static_cast<int>(retaining_path.size());
for (auto node : retaining_path) {
HeapObject node_object = node.first;
bool node_ephemeron = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Distance from root %d%s: ", distance,
node_ephemeron ? " (ephemeron)" : "");
node_object.ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
node_object.Print();
PrintF("\n");
#endif
--distance;
}
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Root: %s\n", RootVisitor::RootName(root));
PrintF("-------------------------------------------------\n");
}
void UpdateRetainersMapAfterScavenge(
std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
for (auto pair : *map) {
HeapObject object = pair.first;
HeapObject retainer = pair.second;
if (Heap::InFromPage(object)) {
MapWord map_word = object.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
object = map_word.ToForwardingAddress();
}
if (Heap::InFromPage(retainer)) {
MapWord map_word = retainer.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
retainer = map_word.ToForwardingAddress();
}
updated_map[object] = retainer;
}
*map = std::move(updated_map);
}
void Heap::UpdateRetainersAfterScavenge() {
if (!incremental_marking()->IsMarking()) return;
// This isn't supported for Minor MC.
DCHECK(!FLAG_minor_mc);
UpdateRetainersMapAfterScavenge(&retainer_);
UpdateRetainersMapAfterScavenge(&ephemeron_retainer_);
std::unordered_map<HeapObject, Root, Object::Hasher> updated_retaining_root;
for (auto pair : retaining_root_) {
HeapObject object = pair.first;
if (Heap::InFromPage(object)) {
MapWord map_word = object.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) continue;
object = map_word.ToForwardingAddress();
}
updated_retaining_root[object] = pair.second;
}
retaining_root_ = std::move(updated_retaining_root);
}
void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
if (retainer_.count(object)) return;
retainer_[object] = retainer;
RetainingPathOption option = RetainingPathOption::kDefault;
if (IsRetainingPathTarget(object, &option)) {
// Check if the retaining path was already printed in
// AddEphemeronRetainer().
if (ephemeron_retainer_.count(object) == 0 ||
option == RetainingPathOption::kDefault) {
PrintRetainingPath(object, option);
}
}
}
void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
if (ephemeron_retainer_.count(object)) return;
ephemeron_retainer_[object] = retainer;
RetainingPathOption option = RetainingPathOption::kDefault;
if (IsRetainingPathTarget(object, &option) &&
option == RetainingPathOption::kTrackEphemeronPath) {
// Check if the retaining path was already printed in AddRetainer().
if (retainer_.count(object) == 0) {
PrintRetainingPath(object, option);
}
}
}
void Heap::AddRetainingRoot(Root root, HeapObject object) {
if (retaining_root_.count(object)) return;
retaining_root_[object] = root;
RetainingPathOption option = RetainingPathOption::kDefault;
if (IsRetainingPathTarget(object, &option)) {
PrintRetainingPath(object, option);
}
}
void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++;
}
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::GarbageCollectionPrologue(
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
is_current_gc_for_heap_profiler_ =
gc_reason == GarbageCollectionReason::kHeapProfiler;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
heap_allocator_.UpdateAllocationTimeout();
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
semi_space_copied_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
nodes_promoted_ = 0;
UpdateMaximumCommitted();
#ifdef DEBUG
DCHECK(!AllowGarbageCollection::IsAllowed());
DCHECK_EQ(gc_state(), NOT_IN_GC);
if (FLAG_gc_verbose) Print();
#endif // DEBUG
if (new_space_ && new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
}
memory_allocator()->unmapper()->PrepareForGC();
}
void Heap::GarbageCollectionPrologueInSafepoint() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
gc_count_++;
if (new_space_) {
UpdateNewSpaceAllocationCounter();
CheckNewSpaceExpansionCriteria();
new_space_->ResetParkedAllocationBuffers();
}
}
void Heap::UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ +
(new_space_ ? new_space()->AllocatedSinceLastGC() : 0);
}
size_t Heap::SizeOfObjects() {
size_t total = 0;
for (SpaceIterator it(this); it.HasNext();) {
total += it.Next()->SizeOfObjects();
}
return total;
}
size_t Heap::TotalGlobalHandlesSize() {
return isolate_->global_handles()->TotalSize();
}
size_t Heap::UsedGlobalHandlesSize() {
return isolate_->global_handles()->UsedSize();
}
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
PtrComprCageBase cage_base(isolate());
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
// We have not validated the allocation site yet, since we have not
// dereferenced the site during collecting information.
// This is an inlined check of AllocationMemento::IsValid.
if (!site.IsAllocationSite() || site.IsZombie()) continue;
const int value = static_cast<int>(site_and_count.second);
DCHECK_LT(0, value);
if (site.IncrementMementoFoundCount(value)) {
// For sites in the global map the count is accessed through the site.
global_pretenuring_feedback_.insert(std::make_pair(site, 0));
}
}
}
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
if (space == new_space()) {
space->AddAllocationObserver(new_space_observer);
} else {
space->AddAllocationObserver(observer);
}
}
}
void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
if (space == new_space()) {
space->RemoveAllocationObserver(new_space_observer);
} else {
space->RemoveAllocationObserver(observer);
}
}
}
void Heap::PublishPendingAllocations() {
if (FLAG_enable_third_party_heap) return;
if (new_space_) new_space_->MarkLabStartInitialized();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->MoveOriginalTopForward();
}
lo_space_->ResetPendingObject();
if (new_lo_space_) new_lo_space_->ResetPendingObject();
code_lo_space_->ResetPendingObject();
}
namespace {
inline bool MakePretenureDecision(
AllocationSite site, AllocationSite::PretenureDecision current_decision,
double ratio, bool maximum_size_scavenge) {
// Here we just allow state transitions from undecided or maybe tenure
// to don't tenure, maybe tenure, or tenure.
if ((current_decision == AllocationSite::kUndecided ||
current_decision == AllocationSite::kMaybeTenure)) {
if (ratio >= AllocationSite::kPretenureRatio) {
// We just transition into tenure state when the semi-space was at
// maximum capacity.
if (maximum_size_scavenge) {
site.set_deopt_dependent_code(true);
site.set_pretenure_decision(AllocationSite::kTenure);
// Currently we just need to deopt when we make a state transition to
// tenure.
return true;
}
site.set_pretenure_decision(AllocationSite::kMaybeTenure);
} else {
site.set_pretenure_decision(AllocationSite::kDontTenure);
}
}
return false;
}
// Clear feedback calculation fields until the next gc.
inline void ResetPretenuringFeedback(AllocationSite site) {
site.set_memento_found_count(0);
site.set_memento_create_count(0);
}
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) {
bool deopt = false;
int create_count = site.memento_create_count();
int found_count = site.memento_found_count();
bool minimum_mementos_created =
create_count >= AllocationSite::kPretenureMinimumCreated;
double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
? static_cast<double>(found_count) / create_count
: 0.0;
AllocationSite::PretenureDecision current_decision =
site.pretenure_decision();
if (minimum_mementos_created) {
deopt = MakePretenureDecision(site, current_decision, ratio,
maximum_size_scavenge);
}
if (FLAG_trace_pretenuring_statistics) {
PrintIsolate(isolate,
"pretenuring: AllocationSite(%p): (created, found, ratio) "
"(%d, %d, %f) %s => %s\n",
reinterpret_cast<void*>(site.ptr()), create_count, found_count,
ratio, site.PretenureDecisionName(current_decision),
site.PretenureDecisionName(site.pretenure_decision()));
}
ResetPretenuringFeedback(site);
return deopt;
}
bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
AllocationSite::PretenureDecision current_decision =
site.pretenure_decision();
bool deopt = true;
if (current_decision == AllocationSite::kUndecided ||
current_decision == AllocationSite::kMaybeTenure) {
site.set_deopt_dependent_code(true);
site.set_pretenure_decision(AllocationSite::kTenure);
} else {
deopt = false;
}
if (FLAG_trace_pretenuring_statistics) {
PrintIsolate(isolate,
"pretenuring manually requested: AllocationSite(%p): "
"%s => %s\n",
reinterpret_cast<void*>(site.ptr()),
site.PretenureDecisionName(current_decision),
site.PretenureDecisionName(site.pretenure_decision()));
}
ResetPretenuringFeedback(site);
return deopt;
}
} // namespace
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
global_pretenuring_feedback_.erase(site);
}
bool Heap::DeoptMaybeTenuredAllocationSites() {
return new_space_ && new_space_->IsAtMaximumCapacity() &&
maximum_size_scavenges_ == 0;
}
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
int allocation_mementos_found = 0;
int allocation_sites = 0;
int active_allocation_sites = 0;
AllocationSite site;
// Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
for (auto& site_and_count : global_pretenuring_feedback_) {
allocation_sites++;
site = site_and_count.first;
// Count is always access through the site.
DCHECK_EQ(0, site_and_count.second);
int found_count = site.memento_found_count();
// An entry in the storage does not imply that the count is > 0 because
// allocation sites might have been reset due to too many objects dying
// in old space.
if (found_count > 0) {
DCHECK(site.IsAllocationSite());
active_allocation_sites++;
allocation_mementos_found += found_count;
if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
trigger_deoptimization = true;
}
if (site.GetAllocationType() == AllocationType::kOld) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
}
}
}
// Step 2: Pretenure allocation sites for manual requests.
if (allocation_sites_to_pretenure_) {
while (!allocation_sites_to_pretenure_->empty()) {
auto pretenure_site = allocation_sites_to_pretenure_->Pop();
if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
trigger_deoptimization = true;
}
}
allocation_sites_to_pretenure_.reset();
}
// Step 3: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) {
ForeachAllocationSite(
allocation_sites_list(),
[&allocation_sites, &trigger_deoptimization](AllocationSite site) {
DCHECK(site.IsAllocationSite());
allocation_sites++;
if (site.IsMaybeTenure()) {
site.set_deopt_dependent_code(true);
trigger_deoptimization = true;
}
});
}
if (trigger_deoptimization) {
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
PrintIsolate(isolate(),
"pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
"active_sites=%d "
"mementos=%d tenured=%d not_tenured=%d\n",
deopt_maybe_tenured ? 1 : 0, allocation_sites,
active_allocation_sites, allocation_mementos_found,
tenure_decisions, dont_tenure_decisions);
}
global_pretenuring_feedback_.clear();
global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
}
}
void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
if (!allocation_sites_to_pretenure_) {
allocation_sites_to_pretenure_.reset(
new GlobalHandleVector<AllocationSite>(this));
}
allocation_sites_to_pretenure_->Push(site);
}
void Heap::InvalidateCodeDeoptimizationData(Code code) {
CodePageMemoryModificationScope modification_scope(code);
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
if (site.deopt_dependent_code()) {
site.dependent_code().MarkCodeForDeoptimization(
DependentCode::kAllocationSiteTenuringChangedGroup);
site.set_deopt_dependent_code(false);
}
});
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (collector == GarbageCollector::MARK_COMPACTOR) {
memory_pressure_level_.store(MemoryPressureLevel::kNone,
std::memory_order_relaxed);
}
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->InvokeGCEpilogueCallbacksInSafepoint();
});
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
isolate_->counters()->space##_bytes_committed()->Set( \
static_cast<int>(space()->CommittedMemory())); \
isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
if (space()->CommittedMemory() > 0) { \
isolate_->counters()->external_fragmentation_##space()->AddSample( \
static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
space()->CommittedMemory())); \
}
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
if (new_space()) {
UPDATE_COUNTERS_FOR_SPACE(new_space)
}
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
if (map_space()) {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
}
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG
// Old-to-new slot sets must be empty after each collection.
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
chunk = chunk->list_node().next())
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
}
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
ZapFromSpace();
}
if (new_space()) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
// Remove CollectionRequested flag from main thread state, as the collection
// was just performed.
safepoint()->AssertActive();
LocalHeap::ThreadState old_state =
main_thread_local_heap()->state_.ClearCollectionRequested();
CHECK(old_state.IsRunning());
// Resume all threads waiting for the GC.
collection_barrier_->ResumeThreadsAwaitingCollection();
}
void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
AllowGarbageCollection for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
if (FLAG_track_retaining_path &&
collector == GarbageCollector::MARK_COMPACTOR) {
retainer_.clear();
ephemeron_retainer_.clear();
retaining_root_.clear();
}
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
static_cast<int>(SizeOfObjects() / KB));
if (map_space()) {
isolate_->counters()->heap_sample_map_space_committed()->AddSample(
static_cast<int>(map_space()->CommittedMemory() / KB));
}
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_maximum_committed()->AddSample(
static_cast<int>(MaximumCommittedMemory() / KB));
}
#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
}
GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
heap_->gc_callbacks_depth_++;
}
GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
bool GCCallbacksScope::CheckReenter() const {
return heap_->gc_callbacks_depth_ == 1;
}
void Heap::HandleGCRequest() {
if (IsStressingScavenge() && stress_scavenge_observer_->HasRequestedGC()) {
CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
stress_scavenge_observer_->RequestedGCDone();
} else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (CollectionRequested()) {
CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::GCRequestType::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
current_gc_callback_flags_);
} else if (incremental_marking()->request_type() ==
IncrementalMarking::GCRequestType::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
FinalizeIncrementalMarkingIncrementally(
GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
}
}
void Heap::ScheduleScavengeTaskIfNeeded() {
DCHECK_NOT_NULL(scavenge_job_);
scavenge_job_->ScheduleTaskIfNeeded(this);
}
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
set_current_gc_flags(flags);
CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
set_current_gc_flags(kNoGCFlags);
}
namespace {
intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
int slots = size / kTaggedSize;
DCHECK_EQ(a.Size(), size);
DCHECK_EQ(b.Size(), size);
Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
for (int i = 0; i < slots; i++) {
if (*slot_a != *slot_b) {
return *slot_a - *slot_b;
}
slot_a++;
slot_b++;
}
return 0;
}
void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
if (objects->size() == 0) return;
sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
std::vector<std::pair<int, HeapObject>> duplicates;
HeapObject current = (*objects)[0];
int count = 1;
for (size_t i = 1; i < objects->size(); i++) {
if (CompareWords(size, current, (*objects)[i]) == 0) {
count++;
} else {
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
count = 1;
current = (*objects)[i];
}
}
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
int threshold = FLAG_trace_duplicate_threshold_kb * KB;
sort(duplicates.begin(), duplicates.end());
for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
int duplicate_bytes = it->first * size;
if (duplicate_bytes < threshold) break;
PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
duplicate_bytes / KB);
PrintF("Sample object: ");
it->second.Print();
PrintF("============================\n");
}
}
} // anonymous namespace
void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
if (gc_reason == GarbageCollectionReason::kLastResort) {
InvokeNearHeapLimitCallback();
}
RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
set_current_gc_flags(
kReduceMemoryFootprintMask |
(gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
: 0));
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
}
set_current_gc_flags(kNoGCFlags);
EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject>> objects_by_size;
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
PagedSpaceObjectIterator it(this, space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
{
LargeObjectSpaceObjectIterator it(lo_space());
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
ReportDuplicates(it->first, &it->second);
}
}
}
void Heap::PreciseCollectAllGarbage(int flags,
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
if (!incremental_marking()->IsStopped()) {
FinalizeIncrementalMarkingAtomically(gc_reason);
}
CollectAllGarbage(flags, gc_reason, gc_callback_flags);
}
void Heap::ReportExternalMemoryPressure() {
const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
int64_t current = external_memory_.total();
int64_t baseline = external_memory_.low_since_mark_compact();
int64_t limit = external_memory_.limit();
TRACE_EVENT2(
"devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
static_cast<int>((limit - baseline) / MB));
if (current > baseline + external_memory_hard_limit()) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagsForExternalMemory));
return;
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
StartIncrementalMarking(GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagsForExternalMemory);
} else {
CollectAllGarbage(i::Heap::kNoGCFlags,
GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagsForExternalMemory);
}
} else {
// Incremental marking is turned on an has already been started.
const double kMinStepSize = 5;
const double kMaxStepSize = 10;
const double ms_step = std::min(
kMaxStepSize, std::max(kMinStepSize, static_cast<double>(current) /
limit * kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
const char* event_name,
const char* event_type)
: heap_(heap), event_name_(event_name) {
TRACE_EVENT_BEGIN2("devtools.timeline,v8", event_name_, "usedHeapSizeBefore",
heap_->SizeOfObjects(), "type", event_type);
}
Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
TRACE_EVENT_END1("devtools.timeline,v8", event_name_, "usedHeapSizeAfter",
heap_->SizeOfObjects());
}
static GCType GetGCTypeFromGarbageCollector(GarbageCollector collector) {
switch (collector) {
case GarbageCollector::MARK_COMPACTOR:
return kGCTypeMarkSweepCompact;
case GarbageCollector::SCAVENGER:
return kGCTypeScavenge;
case GarbageCollector::MINOR_MARK_COMPACTOR:
return kGCTypeMinorMarkCompact;
default:
UNREACHABLE();
}
}
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
if (V8_UNLIKELY(!deserialization_complete_)) {
// During isolate initialization heap always grows. GC is only requested
// if a new page allocation fails. In such a case we should crash with
// an out-of-memory instead of performing GC because the prologue/epilogue
// callbacks may see objects that are not yet deserialized.
CHECK(always_allocate());
FatalProcessOutOfMemory("GC during deserialization");
}
// CollectGarbage consists of three parts:
// 1. The prologue part which may execute callbacks. These callbacks may
// allocate and trigger another garbage collection.
// 2. The main garbage collection phase.
// 3. The epilogue part which may execute callbacks. These callbacks may
// allocate and trigger another garbage collection
// Part 1: Invoke all callbacks which should happen before the actual garbage
// collection is triggered. Note that these callbacks may trigger another
// garbage collection since they may allocate.
DCHECK(AllowGarbageCollection::IsAllowed());
// Ensure that all pending phantom callbacks are invoked.
isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
GCType gc_type = GetGCTypeFromGarbageCollector(collector);
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
// their own state on the stack and recursively trigger GC.
EmbedderStackStateScope embedder_scope(
this, EmbedderStackStateScope::kExplicitInvocation,
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
if (scope.CheckReenter()) {
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
}
// Part 2: The main garbage collection phase.
DisallowGarbageCollection no_gc_during_gc;
size_t freed_global_handles = 0;
size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
? CommittedOldGenerationMemory()
: 0;
{
tracer()->StartObservablePause();
VMState<GC> state(isolate());
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
// Filter on-stack reference below this method.
isolate()
->global_handles()
->CleanupOnStackReferencesBelowCurrentStackPosition();
if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
// deterministic passes over the stack. E.g., a verifier that should only
// find a subset of references of the marker.
//
// TODO(chromium:1056170): Consider adding a component that keeps track
// of relevant GC stack regions where interesting pointers can be found.
static_cast<v8::internal::CppHeap*>(cpp_heap())
->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
}
GarbageCollectionPrologue(gc_reason, gc_callback_flags);
{
GCTracer::RecordGCPhasesInfo record_gc_phases_info(this, collector);
base::Optional<TimedHistogramScope> histogram_timer_scope;
base::Optional<OptionalTimedHistogramScope>
histogram_timer_priority_scope;
TRACE_EVENT0("v8", record_gc_phases_info.trace_event_name());
if (record_gc_phases_info.type_timer()) {
histogram_timer_scope.emplace(record_gc_phases_info.type_timer(),
isolate_);
}
if (record_gc_phases_info.type_priority_timer()) {
OptionalTimedHistogramScopeMode mode =
isolate_->IsMemorySavingsModeActive()
? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
: OptionalTimedHistogramScopeMode::TAKE_TIME;
histogram_timer_priority_scope.emplace(
record_gc_phases_info.type_priority_timer(), isolate_, mode);
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
tp_heap_->CollectGarbage();
} else {
freed_global_handles += PerformGarbageCollection(
collector, gc_reason, collector_reason, gc_callback_flags);
}
// Clear flags describing the current GC now that the current GC is
// complete. Do this before GarbageCollectionEpilogue() since that could
// trigger another unforced GC.
is_current_gc_forced_ = false;
is_current_gc_for_heap_profiler_ = false;
if (collector == GarbageCollector::MARK_COMPACTOR ||
collector == GarbageCollector::SCAVENGER) {
tracer()->RecordGCPhasesHistograms(record_gc_phases_info.mode());
}
}
GarbageCollectionEpilogue(collector);
if (collector == GarbageCollector::MARK_COMPACTOR &&
FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
}
if (collector == GarbageCollector::MARK_COMPACTOR) {
// Calculate used memory first, then committed memory. Following code
// assumes that committed >= used, which might not hold when this is
// calculated in the wrong order and background threads allocate
// in-between.
size_t used_memory_after = OldGenerationSizeOfObjects();
size_t committed_memory_after = CommittedOldGenerationMemory();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after);
event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
if (initial_max_old_generation_size_ < max_old_generation_size() &&
used_memory_after < initial_max_old_generation_size_threshold_) {
set_max_old_generation_size(initial_max_old_generation_size_);
}
}
tracer()->StopAtomicPause();
tracer()->StopObservablePause();
tracer()->UpdateStatistics(collector);
// Young generation cycles finish atomically. It is important that
// StopObservablePause, UpdateStatistics and StopCycle are called in this
// order; the latter may replace the current event with that of an
// interrupted full cycle.
if (IsYoungGenerationCollector(collector)) {
tracer()->StopYoungCycleIfNeeded();
} else {
tracer()->StopFullCycleIfNeeded();
}
}
// Part 3: Invoke all callbacks which should happen after the actual garbage
// collection is triggered. Note that these callbacks may trigger another
// garbage collection since they may allocate.
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
gc_post_processing_depth_++;
{
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, gc_callback_flags);
}
gc_post_processing_depth_--;
}
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
}
if (collector == GarbageCollector::MARK_COMPACTOR &&
(gc_callback_flags & (kGCCallbackFlagForced |
kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
StartIncrementalMarkingIfAllocationLimitIsReached(
GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
if (!CanExpandOldGeneration(0)) {
InvokeNearHeapLimitCallback();
if (!CanExpandOldGeneration(0)) {
FatalProcessOutOfMemory("Reached heap limit");
}
}
return freed_global_handles > 0;
}
int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
set_old_generation_allocation_limit(initial_old_generation_size_);
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
if (!isolate()->context().is_null()) {
RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
isolate()->raw_native_context().set_retained_maps(
ReadOnlyRoots(this).empty_weak_array_list());
}
return ++contexts_disposed_;
}
void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
// Sweeping needs to be completed such that markbits are all cleared before
// starting marking again.
CompleteSweepingFull();
base::Optional<SafepointScope> safepoint_scope;
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
#ifdef DEBUG
VerifyCountersAfterSweeping();
#endif
// Now that sweeping is completed, we can start the next full GC cycle.
tracer()->StartCycle(GarbageCollector::MARK_COMPACTOR, gc_reason, nullptr,
GCTracer::MarkingType::kIncremental);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
}
void Heap::CompleteSweepingFull() {
array_buffer_sweeper()->EnsureFinished();
mark_compact_collector()->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kUnifiedHeap);
DCHECK(!mark_compact_collector()->sweeping_in_progress());
DCHECK_IMPLIES(cpp_heap(),
!CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
DCHECK(!tracer()->IsSweepingInProgress());
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags, const GCCallbackFlags gc_callback_flags) {
if (incremental_marking()->IsStopped()) {
switch (IncrementalMarkingLimitReached()) {
case IncrementalMarkingLimit::kHardLimit:
StartIncrementalMarking(
gc_flags,
OldGenerationSpaceAvailable() <= NewSpaceCapacity()
? GarbageCollectionReason::kAllocationLimit
: GarbageCollectionReason::kGlobalAllocationLimit,
gc_callback_flags);
break;
case IncrementalMarkingLimit::kSoftLimit:
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
break;
case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
// This is a fallback case where no appropriate limits have been
// configured yet.
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer()->NotifyPossibleGarbage(event);
break;
case IncrementalMarkingLimit::kNoLimit:
break;
}
}
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
if (!incremental_marking()->IsStopped() ||
!incremental_marking()->CanBeActivated()) {
return;
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
if (old_generation_space_available < NewSpaceCapacity()) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
gc_callback_flags);
}
void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
const ObjectSlot src_slot, int len,
WriteBarrierMode mode) {
DCHECK_NE(len, 0);
DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
const ObjectSlot dst_end(dst_slot + len);
// Ensure no range overflow.
DCHECK(dst_slot < dst_end);
DCHECK(src_slot < src_slot + len);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst_slot < src_slot) {
// Copy tagged values forward using relaxed load/stores that do not
// involve value decompression.
const AtomicSlot atomic_dst_end(dst_end);
AtomicSlot dst(dst_slot);
AtomicSlot src(src_slot);
while (dst < atomic_dst_end) {
*dst = *src;
++dst;
++src;
}
} else {
// Copy tagged values backwards using relaxed load/stores that do not
// involve value decompression.
const AtomicSlot atomic_dst_begin(dst_slot);
AtomicSlot dst(dst_slot + len - 1);
AtomicSlot src(src_slot + len - 1);
while (dst >= atomic_dst_begin) {
*dst = *src;
--dst;
--src;
}
}
} else {
MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
// Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot.
template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object,
ObjectSlot dst_slot,
ObjectSlot src_slot, int len,
WriteBarrierMode mode);
template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object,
MaybeObjectSlot dst_slot,
MaybeObjectSlot src_slot,
int len, WriteBarrierMode mode);
template <typename TSlot>
void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
const TSlot src_slot, int len, WriteBarrierMode mode) {
DCHECK_NE(len, 0);
DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
const TSlot dst_end(dst_slot + len);
// Ensure ranges do not overlap.
DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
// Copy tagged values using relaxed load/stores that do not involve value
// decompression.
const AtomicSlot atomic_dst_end(dst_end);
AtomicSlot dst(dst_slot);
AtomicSlot src(src_slot);
while (dst < atomic_dst_end) {
*dst = *src;
++dst;
++src;
}
} else {
MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
void Heap::EnsureFromSpaceIsCommitted() {
if (!new_space_) return;
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
FatalProcessOutOfMemory("Committing semi space failed.");
}
bool Heap::CollectionRequested() {
return collection_barrier_->WasGCRequested();
}
void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
CHECK(local_heap->is_main_thread());
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_);
}
void Heap::CheckCollectionRequested() {
if (!CollectionRequested()) return;
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kBackgroundAllocationFailure,
current_gc_callback_flags_);
}
#if V8_ENABLE_WEBASSEMBLY
void Heap::EnsureWasmCanonicalRttsSize(int length) {
Handle<WeakArrayList> current_rtts = handle(wasm_canonical_rtts(), isolate_);
if (length <= current_rtts->length()) return;
Handle<WeakArrayList> result = WeakArrayList::EnsureSpace(
isolate(), current_rtts, length, AllocationType::kOld);
result->set_length(length);
set_wasm_canonical_rtts(*result);
}
#endif
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
if (previous_semi_space_copied_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
static_cast<double>(previous_semi_space_copied_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRatio(survival_rate);
}
namespace {
GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
switch (collector) {
case GarbageCollector::MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
case GarbageCollector::MINOR_MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR;
case GarbageCollector::SCAVENGER:
return GCTracer::Scope::ScopeId::SCAVENGER;
}
UNREACHABLE();
}
} // namespace
size_t Heap::PerformGarbageCollection(
GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// If heap verification is enabled, we want to ensure that sweeping is
// completed here, as it will be triggered from Heap::Verify anyway.
// In this way, sweeping finalization is accounted to the corresponding
// full GC cycle.
CompleteSweepingFull();
}
#endif // VERIFY_HEAP
tracer()->StartCycle(collector, gc_reason, collector_reason,
GCTracer::MarkingType::kAtomic);
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
// If incremental marking has been activated, the full GC cycle has already
// started, so don't start a new one.
if (!incremental_marking_->WasActivated()) {
tracer()->StartCycle(collector, gc_reason, collector_reason,
GCTracer::MarkingType::kAtomic);
}
}
tracer()->StartAtomicPause();
if (!Heap::IsYoungGenerationCollector(collector) &&
incremental_marking_->WasActivated()) {
tracer()->UpdateCurrentEvent(gc_reason, collector_reason);
}
DCHECK(tracer()->IsConsistentWithCollector(collector));
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
base::Optional<SafepointScope> safepoint_scope;
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
safepoint_scope.emplace(this);
}
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// We don't really perform a GC here but need this scope for the nested
// SafepointScope inside Verify().
AllowGarbageCollection allow_gc;
Verify();
}
#endif // VERIFY_HEAP
tracer()->StartInSafepoint();
GarbageCollectionPrologueInSafepoint();
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
switch (collector) {
case GarbageCollector::MARK_COMPACTOR:
MarkCompact();
break;
case GarbageCollector::MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case GarbageCollector::SCAVENGER:
Scavenge();
break;
}
ProcessPretenuringFeedback();
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
if (collector != GarbageCollector::MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
start_young_generation_size - SurvivedYoungObjectSize());
}
if (!fast_promotion_mode_ || collector == GarbageCollector::MARK_COMPACTOR) {
ComputeFastPromotionMode();
}
isolate_->counters()->objs_since_last_young()->Set(0);
isolate_->eternal_handles()->PostGarbageCollectionProcessing();
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
size_t freed_global_handles;
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
// First round weak callbacks are not supposed to allocate and trigger
// nested GCs.
freed_global_handles =
isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
}
if (collector == GarbageCollector::MARK_COMPACTOR) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
// TraceEpilogue may trigger operations that invalidate global handles. It
// has to be called *after* all other operations that potentially touch and
// reset global handles. It is also still part of the main garbage
// collection pause and thus needs to be called *before* any operation that
// can potentially trigger recursive garbage
local_embedder_heap_tracer()->TraceEpilogue();
}
#if defined(CPPGC_YOUNG_GENERATION)
// Schedule Oilpan's Minor GC. Since the minor GC doesn't support conservative
// stack scanning, do it only when Scavenger runs from task, which is
// non-nestable.
if (cpp_heap() && IsYoungGenerationCollector(collector)) {
const bool with_stack = (gc_reason != GarbageCollectionReason::kTask);
CppHeap::From(cpp_heap())
->RunMinorGC(with_stack ? CppHeap::StackState::kMayContainHeapPointers
: CppHeap::StackState::kNoHeapPointers);
}
#endif // defined(CPPGC_YOUNG_GENERATION)
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// We don't really perform a GC here but need this scope for the nested
// SafepointScope inside Verify().
AllowGarbageCollection allow_gc;
Verify();
}
#endif // VERIFY_HEAP
RecomputeLimits(collector);
GarbageCollectionEpilogueInSafepoint(collector);
tracer()->StopInSafepoint();
return freed_global_handles;
}
void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
CHECK(deserialization_complete());
DCHECK(!IsShared());
DCHECK_NOT_NULL(isolate()->shared_isolate());
isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
isolate(), gc_reason);
}
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
GarbageCollectionReason gc_reason) {
DCHECK(IsShared());
// Stop all client isolates attached to this isolate
GlobalSafepointScope global_safepoint(initiator);
// Migrate shared isolate to the main thread of the initiator isolate.
v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
tracer()->StartObservablePause();
DCHECK(!incremental_marking_->WasActivated());
DCHECK_NOT_NULL(isolate()->global_safepoint());
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
client->heap()->FreeSharedLinearAllocationAreas();
// As long as we need to iterate the client heap to find references into the
// shared heap, all client heaps need to be iterable.
client->heap()->MakeHeapIterable();
if (FLAG_concurrent_marking) {
client->heap()->concurrent_marking()->Pause();
}
});
const GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
PerformGarbageCollection(collector, gc_reason, nullptr);
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
if (FLAG_concurrent_marking &&
client->heap()->incremental_marking()->IsMarking()) {
client->heap()->concurrent_marking()->RescheduleJobIfNeeded();
}
});
tracer()->StopAtomicPause();
tracer()->StopObservablePause();
tracer()->UpdateStatistics(collector);
tracer()->StopFullCycleIfNeeded();
}
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
GCTracer::Scope::ScopeId scope_id;
switch (collector) {
case GarbageCollector::MINOR_MARK_COMPACTOR:
scope_id = GCTracer::Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
break;
case GarbageCollector::SCAVENGER:
scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
break;
default:
UNREACHABLE();
}
{
TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
array_buffer_sweeper()->EnsureFinished();
}
// If sweeping is in progress and there are no sweeper tasks running, finish
// the sweeping here, to avoid having to pause and resume during the young
// generation GC.
mark_compact_collector()->FinishSweepingIfOutOfWork();
#if defined(CPPGC_YOUNG_GENERATION)
// Always complete sweeping if young generation is enabled.
if (cpp_heap()) CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
#endif // defined(CPPGC_YOUNG_GENERATION)
}
void Heap::EnsureSweepingCompleted(HeapObject object) {
if (!mark_compact_collector()->sweeping_in_progress()) return;
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = MemoryChunk::cast(basic_chunk);
if (chunk->SweepingDone()) return;
// SweepingDone() is always true for large pages.
DCHECK(!chunk->IsLargePage());
Page* page = Page::cast(chunk);
mark_compact_collector()->EnsurePageIsSwept(page);
}
void Heap::RecomputeLimits(GarbageCollector collector) {
if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
(HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_))) {
return;
}
double v8_gc_speed =
tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
double v8_mutator_speed =
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
this, max_old_generation_size(), v8_gc_speed, v8_mutator_speed);
double global_growing_factor = 0;
if (UseGlobalMemoryScheduling()) {
DCHECK_NOT_NULL(local_embedder_heap_tracer());
double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
double embedder_speed =
tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
double embedder_growing_factor =
(embedder_gc_speed > 0 && embedder_speed > 0)
? MemoryController<GlobalMemoryTrait>::GrowingFactor(
this, max_global_memory_size_, embedder_gc_speed,
embedder_speed)
: 0;
global_growing_factor =
std::max(v8_growing_factor, embedder_growing_factor);
}
size_t old_gen_size = OldGenerationSizeOfObjects();
size_t new_space_capacity = NewSpaceCapacity();
HeapGrowingMode mode = CurrentHeapGrowingMode();
if (collector == GarbageCollector::MARK_COMPACTOR) {
external_memory_.ResetAfterGC();
set_old_generation_allocation_limit(
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
mode));
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
this, GlobalSizeOfObjects(), min_global_memory_size_,
max_global_memory_size_, new_space_capacity,
global_growing_factor, mode);
}
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
size_t new_old_generation_limit =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
mode);
if (new_old_generation_limit < old_generation_allocation_limit()) {
set_old_generation_allocation_limit(new_old_generation_limit);
}
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
size_t new_global_limit =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
this, GlobalSizeOfObjects(), min_global_memory_size_,
max_global_memory_size_, new_space_capacity,
global_growing_factor, mode);
if (new_global_limit < global_allocation_limit_) {
global_allocation_limit_ = new_global_limit;
}
}
}
}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
info.callback(isolate, gc_type, flags, info.data);
}
}
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
info.callback(isolate, gc_type, flags, info.data);
}
}
}
void Heap::MarkCompact() {
PauseAllocationObserversScope pause_observers(this);
SetGCState(MARK_COMPACT);
PROFILE(isolate_, CodeMovingGCEvent());
CodeSpaceMemoryModificationScope code_modification(this);
// Disable soft allocation limits in the shared heap, if one exists, as
// promotions into the shared heap should always succeed.
OptionalAlwaysAllocateScope always_allocate_shared_heap(
isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
: nullptr);
UpdateOldGenerationAllocationCounter();
uint64_t size_of_objects_before_gc = SizeOfObjects();
mark_compact_collector()->Prepare();
ms_count_++;
contexts_disposed_ = 0;
MarkCompactPrologue();
mark_compact_collector()->CollectGarbage();
MarkCompactEpilogue();
if (FLAG_allocation_site_pretenuring) {
EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
}
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during
// GC.
old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
global_memory_at_last_gc_ = GlobalSizeOfObjects();
}
void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
DCHECK(new_space());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] MinorMarkCompact during marking.\n");
}
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(this);
// Disable soft allocation limits in the shared heap, if one exists, as
// promotions into the shared heap should always succeed.
OptionalAlwaysAllocateScope always_allocate_shared_heap(
isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
: nullptr);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
minor_mark_compact_collector_->CollectGarbage();
SetGCState(NOT_IN_GC);
}
void Heap::MarkCompactEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
SetGCState(NOT_IN_GC);
isolate_->counters()->objs_since_last_full()->Set(0);
incremental_marking()->Epilogue();
DCHECK(incremental_marking()->IsStopped());
}
void Heap::MarkCompactPrologue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
RegExpResultsCache::Clear(regexp_multiple_cache());
isolate_->compilation_cache()->MarkCompactPrologue();
FlushNumberStringCache();
}
void Heap::CheckNewSpaceExpansionCriteria() {
if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
survived_since_last_expansion_ > new_space_->TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
new_lo_space()->SetCapacity(new_space()->Capacity());
}
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
base::MutexGuard guard(relocation_mutex());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page();
new_space()->from_space().RemovePage(p);
Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p);
}
// Reset new space.
if (!new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
LargePage* page = *it;
// Increment has to happen after we save the page, because it is going to
// be removed below.
it++;
lo_space()->PromoteNewLargeObject(page);
}
// Fix up special trackers.
external_string_table_.PromoteYoung();
// GlobalHandles are updated in PostGarbageCollectonProcessing
size_t promoted = new_space()->Size() + new_lo_space()->Size();
IncrementYoungSurvivorsCounter(promoted);
IncrementPromotedObjectsSize(promoted);
IncrementSemiSpaceCopiedObjectSize(0);
}
void Heap::Scavenge() {
DCHECK_NOT_NULL(new_space());
DCHECK_IMPLIES(FLAG_separate_gc_phases, !incremental_marking()->IsMarking());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
}
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
return;
}
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kRegularScavenge);
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
// Young generation garbage collection is orthogonal from full GC marking. It
// is possible that objects that are currently being processed for marking are
// reclaimed in the young generation GC that interleaves concurrent marking.
// Pause concurrent markers to allow processing them using
// `UpdateMarkingWorklistAfterYoungGenGC()`.
ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
CppHeap::From(cpp_heap_));
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(this);
// Disable soft allocation limits in the shared heap, if one exists, as
// promotions into the shared heap should always succeed.
OptionalAlwaysAllocateScope always_allocate_shared_heap(
isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
: nullptr);
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space()->Flip();
new_space()->ResetLinearAllocationArea();
// We also flip the young generation large object space. All large objects
// will be in the from space.
new_lo_space()->Flip();
new_lo_space()->ResetPendingObject();
// Implements Cheney's copying algorithm
scavenger_collector_->CollectGarbage();
SetGCState(NOT_IN_GC);
}
void Heap::ComputeFastPromotionMode() {
if (!new_space_) return;
const size_t survived_in_new_space =
survived_last_scavenge_ * 100 / NewSpaceCapacity();
fast_promotion_mode_ =
!FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
fast_promotion_mode_ ? "true" : "false",
survived_in_new_space);
}
}
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
if (!write_protect_code_memory()) return;
if (code_page_collection_memory_modification_scope_depth_ > 0) {
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetCodeModificationPermissions();
}
} else {
DCHECK_GT(code_space_memory_modification_scope_depth_, 0);
}
}
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
UnprotectMemoryOrigin origin) {
if (!write_protect_code_memory()) return;
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
}
void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
unprotected_memory_chunks_.erase(chunk);
}
void Heap::ProtectUnprotectedMemoryChunks() {
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
(*chunk)->SetDefaultCodePermissions();
}
unprotected_memory_chunks_.clear();
}
bool Heap::ExternalStringTable::Contains(String string) {
for (size_t i = 0; i < young_strings_.size(); ++i) {
if (young_strings_[i] == string) return true;
}
for (size_t i = 0; i < old_strings_.size(); ++i) {
if (old_strings_[i] == string) return true;
}
return false;
}
void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string.IsExternalString());
if (FLAG_enable_third_party_heap) return;
Page* page = Page::FromHeapObject(string);
if (old_payload > new_payload) {
page->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString, old_payload - new_payload);
} else {
page->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString, new_payload - old_payload);
}
}
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
PtrComprCageBase cage_base(heap->isolate());
HeapObject obj = HeapObject::cast(*p);
MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
String new_string;
if (InFromPage(obj)) {
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(obj);
if (!string.IsExternalString(cage_base)) {
// Original external string has been internalized.
DCHECK(string.IsThinString(cage_base));
return String();
}
heap->FinalizeExternalString(string);
return String();
}
new_string = String::cast(first_word.ToForwardingAddress());
} else {
new_string = String::cast(obj);
}
// String is still reachable.
if (new_string.IsThinString(cage_base)) {
// Filtering Thin strings out of the external string table.
return String();
} else if (new_string.IsExternalString(cage_base)) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
ExternalString::cast(new_string).ExternalPayloadSize());
return new_string;
}
// Internalization can replace external strings with non-external strings.
return new_string.IsExternalString(cage_base) ? new_string : String();
}
void Heap::ExternalStringTable::VerifyYoung() {
#ifdef DEBUG
std::set<String> visited_map;