blob: 23f610419c3fc9b86d9de04171b1c760847f22fb [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/heap.h"
#include <unordered_map>
#include <unordered_set>
#include "src/accessors.h"
#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/feedback-vector.h"
#include "src/global-handles.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-stats.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/utils.h"
#include "src/v8.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
struct Heap::StrongRootsList {
Object** start;
Object** end;
StrongRootsList* next;
};
class IdleScavengeObserver : public AllocationObserver {
public:
IdleScavengeObserver(Heap& heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
private:
Heap& heap_;
};
Heap::Heap()
: external_memory_(0),
external_memory_limit_(kExternalAllocationSoftLimit),
external_memory_at_last_mark_compact_(0),
isolate_(nullptr),
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
survived_since_last_expansion_(0),
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
out_of_memory_callback_(nullptr),
out_of_memory_callback_data_(nullptr),
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
new_space_(nullptr),
old_space_(NULL),
code_space_(NULL),
map_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
tracer_(nullptr),
promoted_objects_size_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
semi_space_copied_rate_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
last_idle_notification_time_(0.0),
last_gc_time_(0.0),
scavenge_collector_(nullptr),
mark_compact_collector_(nullptr),
minor_mark_compact_collector_(nullptr),
memory_allocator_(nullptr),
store_buffer_(nullptr),
incremental_marking_(nullptr),
concurrent_marking_(nullptr),
gc_idle_time_handler_(nullptr),
memory_reducer_(nullptr),
live_object_stats_(nullptr),
dead_object_stats_(nullptr),
scavenge_job_(nullptr),
idle_scavenge_observer_(nullptr),
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0),
global_pretenuring_feedback_(nullptr),
ring_buffer_full_(false),
ring_buffer_end_(0),
promotion_queue_(this),
configured_(false),
current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
external_string_table_(this),
gc_callbacks_depth_(0),
deserialization_complete_(false),
strong_roots_list_(NULL),
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
#if defined(V8_MAX_SEMISPACE_SIZE)
max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
set_allocation_sites_list(Smi::kZero);
set_encountered_weak_collections(Smi::kZero);
set_encountered_weak_cells(Smi::kZero);
set_encountered_transition_arrays(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
return new_space_->Capacity() + OldGenerationCapacity();
}
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
return old_space_->Capacity() + code_space_->Capacity() +
map_space_->Capacity() + lo_space_->SizeOfObjects();
}
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
map_space_->CommittedMemory() + lo_space_->Size();
}
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_->CommittedPhysicalMemory() +
old_space_->CommittedPhysicalMemory() +
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
size_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
return static_cast<size_t>(memory_allocator()->SizeExecutable());
}
void Heap::UpdateMaximumCommitted() {
if (!HasBeenSetUp()) return;
const size_t current_committed_memory = CommittedMemory();
if (current_committed_memory > maximum_committed_) {
maximum_committed_ = current_committed_memory;
}
}
size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Available();
}
return total;
}
bool Heap::HasBeenSetUp() {
return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
lo_space_ != NULL;
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
}
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
if (incremental_marking()->NeedsFinalization() &&
AllocationLimitOvershotByLargeMargin()) {
*reason = "Incremental marking needs finalization";
return MARK_COMPACTOR;
}
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
// Note that MemoryAllocator->MaxAvailable() undercounts the memory available
// for object promotion. It counts only the bytes that the memory
// allocator has not yet allocated from the OS and assigned to any space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "scavenge might not succeed";
return MARK_COMPACTOR;
}
// Default
*reason = NULL;
return YoungGenerationCollector();
}
void Heap::SetGCState(HeapState state) {
gc_state_ = state;
}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsBeforeGC() {
// Heap::ReportHeapStatistics will also log NewSpace statistics when
// compiled --log-gc is set. The following logic is used to avoid
// double logging.
#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
} else if (FLAG_log_gc) {
new_space_->ReportStatistics();
}
if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
#else
if (FLAG_log_gc) {
new_space_->CollectStatistics();
new_space_->ReportStatistics();
new_space_->ClearHistograms();
}
#endif // DEBUG
}
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
" KB,"
" available: %6" PRIuS " KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
PrintIsolate(isolate_, "New space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS " KB\n",
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Old space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS " KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Code space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS "KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Map space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS " KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS " KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
" KB"
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS "KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
// Similar to the before GC, we use some complicated logic to ensure that
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_->CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_->ReportStatistics();
}
#else
if (FLAG_log_gc) new_space_->ReportStatistics();
#endif // DEBUG
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
int count = deferred_counters_[i];
deferred_counters_[i] = 0;
while (count > 0) {
count--;
isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
}
}
}
void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++;
}
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::GarbageCollectionPrologue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
{
AllowHeapAllocation for_the_first_part_of_prologue;
gc_count_++;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
}
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
semi_space_copied_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
nodes_promoted_ = 0;
UpdateMaximumCommitted();
#ifdef DEBUG
DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
ReportStatisticsBeforeGC();
#endif // DEBUG
if (new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
}
size_t Heap::SizeOfObjects() {
size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
}
return total;
}
const char* Heap::GetSpaceName(int idx) {
switch (idx) {
case NEW_SPACE:
return "new_space";
case OLD_SPACE:
return "old_space";
case MAP_SPACE:
return "map_space";
case CODE_SPACE:
return "code_space";
case LO_SPACE:
return "large_object_space";
default:
UNREACHABLE();
}
return nullptr;
}
void Heap::SetRootCodeStubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
void Heap::RepairFreeListsAfterDeserialization() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterDeserialization();
}
}
void Heap::MergeAllocationSitePretenuringFeedback(
const base::HashMap& local_pretenuring_feedback) {
AllocationSite* site = nullptr;
for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
local_entry != nullptr;
local_entry = local_pretenuring_feedback.Next(local_entry)) {
site = reinterpret_cast<AllocationSite*>(local_entry->key);
MapWord map_word = site->map_word();
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
// We have not validated the allocation site yet, since we have not
// dereferenced the site during collecting information.
// This is an inlined check of AllocationMemento::IsValid.
if (!site->IsAllocationSite() || site->IsZombie()) continue;
int value =
static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
DCHECK_GT(value, 0);
if (site->IncrementMementoFoundCount(value)) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
}
}
}
class Heap::SkipStoreBufferScope {
public:
explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
: store_buffer_(store_buffer) {
store_buffer_->MoveAllEntriesToRememberedSet();
store_buffer_->SetMode(StoreBuffer::IN_GC);
}
~SkipStoreBufferScope() {
DCHECK(store_buffer_->Empty());
store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
}
private:
StoreBuffer* store_buffer_;
};
class Heap::PretenuringScope {
public:
explicit PretenuringScope(Heap* heap) : heap_(heap) {
heap_->global_pretenuring_feedback_ =
new base::HashMap(kInitialFeedbackCapacity);
}
~PretenuringScope() {
delete heap_->global_pretenuring_feedback_;
heap_->global_pretenuring_feedback_ = nullptr;
}
private:
Heap* heap_;
};
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
int allocation_mementos_found = 0;
int allocation_sites = 0;
int active_allocation_sites = 0;
AllocationSite* site = nullptr;
// Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
allocation_sites++;
site = reinterpret_cast<AllocationSite*>(e->key);
int found_count = site->memento_found_count();
// An entry in the storage does not imply that the count is > 0 because
// allocation sites might have been reset due to too many objects dying
// in old space.
if (found_count > 0) {
DCHECK(site->IsAllocationSite());
active_allocation_sites++;
allocation_mementos_found += found_count;
if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
trigger_deoptimization = true;
}
if (site->GetPretenureMode() == TENURED) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
}
}
}
// Step 2: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) {
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
site = AllocationSite::cast(list_element);
DCHECK(site->IsAllocationSite());
allocation_sites++;
if (site->IsMaybeTenure()) {
site->set_deopt_dependent_code(true);
trigger_deoptimization = true;
}
list_element = site->weak_next();
}
}
if (trigger_deoptimization) {
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
PrintIsolate(isolate(),
"pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
"active_sites=%d "
"mementos=%d tenured=%d not_tenured=%d\n",
deopt_maybe_tenured ? 1 : 0, allocation_sites,
active_allocation_sites, allocation_mementos_found,
tenure_decisions, dont_tenure_decisions);
}
}
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(list_element);
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
site->set_deopt_dependent_code(false);
}
list_element = site->weak_next();
}
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
ZapFromSpace();
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
if (FLAG_deopt_every_n_garbage_collections > 0) {
// TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
// the topmost optimized frame can be deoptimized safely, because it
// might not have a lazy bailout point right after its current PC.
if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
Deoptimizer::DeoptimizeAll(isolate());
gcs_since_last_deopt_ = 0;
}
}
UpdateMaximumCommitted();
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
isolate_->counters()->string_table_capacity()->Set(
string_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
(new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
(old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_code_space()->AddSample(
static_cast<int>((code_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
(lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
static_cast<int>(SizeOfObjects() / KB));
isolate_->counters()->heap_sample_map_space_committed()->AddSample(
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_maximum_committed()->AddSample(
static_cast<int>(MaximumCommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
isolate_->counters()->space##_bytes_committed()->Set( \
static_cast<int>(space()->CommittedMemory())); \
isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
if (space()->CommittedMemory() > 0) { \
isolate_->counters()->external_fragmentation_##space()->AddSample( \
static_cast<int>(100 - \
(space()->SizeOfObjects() * 100.0) / \
space()->CommittedMemory())); \
}
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
UPDATE_COUNTERS_FOR_SPACE(new_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
// Remember the last top pointer so that we can later find out
// whether we allocated in new space since the last GC.
new_space_top_after_last_gc_ = new_space()->top();
last_gc_time_ = MonotonicallyIncreasingTimeInMs();
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
}
void Heap::PreprocessStackTraces() {
WeakFixedArray::Iterator iterator(weak_stack_trace_list());
FixedArray* elements;
while ((elements = iterator.Next<FixedArray>()) != nullptr) {
for (int j = 1; j < elements->length(); j += 4) {
Object* maybe_code = elements->get(j + 2);
// If GC happens while adding a stack trace to the weak fixed array,
// which has been copied into a larger backing store, we may run into
// a stack trace that has already been preprocessed. Guard against this.
if (!maybe_code->IsAbstractCode()) break;
AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
int offset = Smi::cast(elements->get(j + 3))->value();
int pos = abstract_code->SourcePosition(offset);
elements->set(j + 2, Smi::FromInt(pos));
}
}
// We must not compact the weak fixed list here, as we may be in the middle
// of writing to it, when the GC triggered. Instead, we reset the root value.
set_weak_stack_trace_list(Smi::kZero);
}
class GCCallbacksScope {
public:
explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
heap_->gc_callbacks_depth_++;
}
~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
private:
Heap* heap_;
};
void Heap::HandleGCRequest() {
if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
current_gc_callback_flags_);
} else if (incremental_marking()->request_type() ==
IncrementalMarking::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
FinalizeIncrementalMarking(
GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
}
}
void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
}
void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] (%s).\n",
Heap::GarbageCollectionReasonToString(gc_reason));
}
HistogramTimerScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
incremental_marking()->FinalizeIncrementally();
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
}
HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
} else {
if (!incremental_marking()->IsStopped()) {
if (ShouldReduceMemory()) {
return isolate_->counters()->gc_finalize_reduce_memory();
} else {
return isolate_->counters()->gc_finalize();
}
} else {
return isolate_->counters()->gc_compactor();
}
}
}
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
set_current_gc_flags(flags);
CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
set_current_gc_flags(kNoGCFlags);
}
void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
if (gc_reason == GarbageCollectionReason::kLastResort) {
InvokeOutOfMemoryCallback();
}
RuntimeCallTimerScope runtime_timer(
isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compile_dispatcher()->Flush(
OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
}
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
}
void Heap::ReportExternalMemoryPressure() {
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagCollectAllExternalMemory));
return;
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
StartIncrementalMarking(
i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory));
} else {
CollectAllGarbage(i::Heap::kNoGCFlags,
GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagSynchronousPhantomCallbackProcessing);
}
} else {
// Incremental marking is turned on an has already been started.
const double pressure =
static_cast<double>(external_memory_ -
external_memory_at_last_mark_compact_ -
kExternalAllocationSoftLimit) /
external_memory_hard_limit();
DCHECK_GE(1, pressure);
const double kMaxStepSizeOnExternalLimit = 25;
const double deadline = MonotonicallyIncreasingTimeInMs() +
pressure * kMaxStepSizeOnExternalLimit;
incremental_marking()->AdvanceIncrementalMarking(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
}
void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
Address to_top = new_space_->top();
Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
}
}
bool Heap::CollectGarbage(GarbageCollector collector,
GarbageCollectionReason gc_reason,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
RuntimeCallTimerScope runtime_timer(isolate(), &RuntimeCallStats::GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
// allow at least a few allocations after a collection. The reason
// for this is that we have a lot of allocation sequences and we
// assume that a garbage collection will allow the subsequent
// allocation attempts to go through.
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
EnsureFillerObjectAtTop();
if (IsYoungGenerationCollector(collector) &&
!incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
}
}
bool next_gc_likely_to_collect_more = false;
size_t committed_memory_before = 0;
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
}
{
tracer()->Start(collector, gc_reason, collector_reason);
DCHECK(AllowHeapAllocation::IsAllowed());
DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
{
HistogramTimer* gc_type_timer = GCTypeTimer(collector);
HistogramTimerScope histogram_timer_scope(gc_type_timer);
TRACE_EVENT0("v8", gc_type_timer->name());
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
GarbageCollectionEpilogue();
if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
}
if (collector == MARK_COMPACTOR) {
size_t committed_memory_after = CommittedOldGenerationMemory();
size_t used_memory_after = PromotedSpaceSizeOfObjects();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
// - there are live detached contexts.
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
}
tracer()->Stop(collector);
}
if (collector == MARK_COMPACTOR &&
(gc_callback_flags & (kGCCallbackFlagForced |
kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
// We do this only for scavenger to avoid a loop where mark-compact
// causes another mark-compact.
if (IsYoungGenerationCollector(collector) &&
!ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
kNoGCCallbackFlags);
}
return next_gc_likely_to_collect_more;
}
int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compile_dispatcher()->Flush(
OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
AgeInlineCaches();
number_of_disposed_maps_ = retained_maps()->Length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags, const GCCallbackFlags gc_callback_flags) {
if (incremental_marking()->IsStopped()) {
IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
StartIncrementalMarking(gc_flags,
GarbageCollectionReason::kAllocationLimit,
gc_callback_flags);
}
}
}
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
gc_idle_time_handler_->ResetNoProgressCounter();
StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
gc_callback_flags);
}
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len) {
if (len == 0) return;
DCHECK(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
#ifdef VERIFY_HEAP
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*p);
Isolate* isolate = object->GetIsolate();
// Check that the string is actually internalized.
CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
object->IsInternalizedString());
}
}
}
};
static void VerifyStringTable(Heap* heap) {
StringTableVerifier verifier;
heap->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) continue;
bool perform_gc = false;
if (space == MAP_SPACE) {
// We allocate each map individually to avoid fragmentation.
maps->Clear();
DCHECK_EQ(1, reservation->size());
int num_maps = reservation->at(0).size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
// The deserializer will update the skip list.
AllocationResult allocation = map_space()->AllocateRawUnaligned(
Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
HeapObject* free_space = nullptr;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, Map::kSize,
ClearRecordedSlots::kNo);
maps->Add(free_space_address);
} else {
perform_gc = true;
break;
}
}
} else if (space == LO_SPACE) {
// Just check that we can allocate during deserialization.
DCHECK_EQ(1, reservation->size());
perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
} else {
for (auto& chunk : *reservation) {
AllocationResult allocation;
int size = chunk.size;
DCHECK_LE(static_cast<size_t>(size),
MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(
size, PagedSpace::IGNORE_SKIP_LIST);
}
HeapObject* free_space = nullptr;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
perform_gc = true;
break;
}
}
}
if (perform_gc) {
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
if (counter > 1) {
CollectAllGarbage(
kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
GarbageCollectionReason::kDeserializer);
} else {
CollectAllGarbage(kAbortIncrementalMarkingMask,
GarbageCollectionReason::kDeserializer);
}
}
gc_performed = true;
break; // Abort for-loop over spaces and retry.
}
}
}
return !gc_performed;
}
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
V8::FatalProcessOutOfMemory("Committing semi space failed.");
}
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
return;
}
Object* context = native_contexts_list();
while (!context->IsUndefined(isolate())) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
Object* cache =
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
if (!cache->IsUndefined(isolate())) {
NormalizedMapCache::cast(cache)->Clear();
}
context = Context::cast(context)->next_context_link();
}
}
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
if (previous_semi_space_copied_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
static_cast<double>(previous_semi_space_copied_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRatio(survival_rate);
}
bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
int freed_global_handles = 0;
if (!IsYoungGenerationCollector(collector)) {
PROFILE(isolate_, CodeMovingGCEvent());
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyStringTable(this);
}
#endif
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
}
EnsureFromSpaceIsCommitted();
int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
{
Heap::PretenuringScope pretenuring_scope(this);
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
switch (collector) {
case MARK_COMPACTOR:
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during
// GC.
old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
break;
case MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
CanExpandOldGeneration(new_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
} else {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kRegularScavenge);
Scavenge();
}
break;
}
ProcessPretenuringFeedback();
}
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
}
isolate_->counters()->objs_since_last_young()->Set(0);
gc_post_processing_depth_++;
{
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, gc_callback_flags);
}
gc_post_processing_depth_--;
isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
double mutator_speed =
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
size_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
}
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyStringTable(this);
}
#endif
return freed_global_handles > 0;
}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
RuntimeCallTimerScope runtime_timer(isolate(),
&RuntimeCallStats::GCPrologueCallback);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
if (!gc_prologue_callbacks_[i].pass_isolate) {
v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
gc_prologue_callbacks_[i].callback);
callback(gc_type, flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
}
}
}
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
RuntimeCallTimerScope runtime_timer(isolate(),
&RuntimeCallStats::GCEpilogueCallback);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate) {
v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
gc_epilogue_callbacks_[i].callback);
callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
}
}
}
}
void Heap::MarkCompact() {
PauseAllocationObserversScope pause_observers(this);
SetGCState(MARK_COMPACT);
LOG(isolate_, ResourceEvent("markcompact", "begin"));
uint64_t size_of_objects_before_gc = SizeOfObjects();
mark_compact_collector()->Prepare();
ms_count_++;
MarkCompactPrologue();
mark_compact_collector()->CollectGarbage();
LOG(isolate_, ResourceEvent("markcompact", "end"));
MarkCompactEpilogue();
if (FLAG_allocation_site_pretenuring) {
EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
}
}
void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
SetGCState(MINOR_MARK_COMPACT);
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(isolate());
PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
minor_mark_compact_collector()->CollectGarbage();
LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
}
void Heap::MarkCompactEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
SetGCState(NOT_IN_GC);
isolate_->counters()->objs_since_last_full()->Set(0);
incremental_marking()->Epilogue();
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
mark_compact_collector()->marking_deque()->StopUsing();
}
void Heap::MarkCompactPrologue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
RegExpResultsCache::Clear(regexp_multiple_cache());
isolate_->compilation_cache()->MarkCompactPrologue();
CompletelyClearInstanceofCache();
FlushNumberStringCache();
ClearNormalizedMapCaches();
}
void Heap::CheckNewSpaceExpansionCriteria() {
if (FLAG_experimental_new_space_growth_heuristic) {
if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
// Grow the size of new space if there is room to grow, and more than 10%
// have survived the last scavenge.
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
} else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
survived_since_last_expansion_ > new_space_->TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
return heap->InNewSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
front_ = rear_ =
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
->area_start());
emergency_stack_ = NULL;
}
void PromotionQueue::Destroy() {
DCHECK(is_empty());
delete emergency_stack_;
emergency_stack_ = NULL;
}
void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
struct Entry* head_start = rear_;
struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
int entries_count =
static_cast<int>(head_end - head_start) / sizeof(struct Entry);
emergency_stack_ = new List<Entry>(2 * entries_count);
while (head_start != head_end) {
struct Entry* entry = head_start++;
// New space allocation in SemiSpaceCopyObject marked the region
// overlapping with promotion queue as uninitialized.
MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
emergency_stack_->Add(*entry);
}
rear_ = head_end;
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
virtual Object* RetainAs(Object* object) {
if (!heap_->InFromSpace(object)) {
return object;
}
MapWord map_word = HeapObject::cast(object)->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
return NULL;
}
private:
Heap* heap_;
};
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Move pages from new->old generation.
PageRange range(new_space()->bottom(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page();
p->Unlink();
Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p);
}
// Reset new space.
if (!new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
new_space()->ResetAllocationInfo();
new_space()->set_age_mark(new_space()->top());
// Fix up special trackers.
external_string_table_.PromoteAllNewSpaceStrings();
// GlobalHandles are updated in PostGarbageCollectonProcessing
IncrementYoungSurvivorsCounter(new_space()->Size());
IncrementPromotedObjectsSize(new_space()->Size());
IncrementSemiSpaceCopiedObjectSize(0);
LOG(isolate_, ResourceEvent("scavenge", "end"));
SetGCState(NOT_IN_GC);
}
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(isolate());
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Used for updating survived_since_last_expansion_ at function end.
size_t survived_watermark = PromotedSpaceSizeOfObjects();
scavenge_collector_->SelectScavengingVisitorsTable();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
new_space_->ResetAllocationInfo();
// We need to sweep newly copied objects which can be either in the
// to space or promoted to the old generation. For to-space
// objects, we treat the bottom of the to space as a queue. Newly
// copied and unswept objects lie between a 'front' mark and the
// allocation pointer.
//
// Promoted objects can go into various old-generation spaces, and
// can be allocated internally in the spaces (from the free list).
// We treat the top of the to space as a queue of addresses of
// promoted objects. The addresses of newly promoted and unswept
// objects lie between a 'front' mark and a 'rear' mark that is
// updated as a side effect of promoting an object.
//
// There is guaranteed to be enough room at the top of the to space
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
RootScavengeVisitor root_scavenge_visitor(this);
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
&JSObject::IsUnmodifiedApiObject);
{
// Copy roots.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
}
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
RememberedSet<OLD_TO_NEW>::Iterate(
this, SYNCHRONIZED, [this](Address addr) {
return Scavenger::CheckAndScavengeObject(this, addr);
});
RememberedSet<OLD_TO_NEW>::IterateTyped(
this, SYNCHRONIZED,
[this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) {
// We expect that objects referenced by code are long living.
// If we do not force promotion, then we need to clear
// old_to_new slots in dead code objects after mark-compact.
return Scavenger::CheckAndScavengeObject(
this, reinterpret_cast<Address>(addr));
});
});
}
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
IterateEncounteredWeakCollections(&root_scavenge_visitor);
}
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
new_space_front = DoScavenge(new_space_front);
}
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnscavengedHeapObject);
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&root_scavenge_visitor);
new_space_front = DoScavenge(new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
promotion_queue_.Destroy();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessYoungWeakReferences(&weak_object_retainer);
DCHECK(new_space_front == new_space_->top());
// Set age mark.
new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
new_space_->Size() - survived_watermark);
// Scavenger may find new wrappers by iterating objects promoted onto a black
// page.
local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
LOG(isolate_, ResourceEvent("scavenge", "end"));
SetGCState(NOT_IN_GC);
}
void Heap::ComputeFastPromotionMode(double survival_rate) {
const size_t survived_in_new_space =
survived_last_scavenge_ * 100 / new_space_->Capacity();
fast_promotion_mode_ =
!FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
if (FLAG_trace_gc_verbose) {
PrintIsolate(
isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
}
}
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String* string = String::cast(*p);
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
return NULL;
}
heap->FinalizeExternalString(string);
return NULL;
}
// String is still reachable.
String* string = String::cast(first_word.ToForwardingAddress());
if (string->IsThinString()) string = ThinString::cast(string)->actual();
// Internalization can replace external strings with non-external strings.
return string->IsExternalString() ? string : nullptr;
}
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
if (external_string_table_.new_space_strings_.is_empty()) return;
Object** start = &external_string_table_.new_space_strings_[0];
Object** end = start + external_string_table_.new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
String* target = updater_func(this, p);
if (target == NULL) continue;
DCHECK(target->IsExternalString());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
external_string_table_.AddOldString(target);
}
}
DCHECK(last <= end);
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
}
void Heap::UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
// Update old space string references.
if (external_string_table_.old_space_strings_.length() > 0) {
Object** start = &external_string_table_.old_space_strings_[0];
Object** end = start + external_string_table_.old_space_strings_.length();
for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
}
UpdateNewSpaceReferencesInExternalStringTable(updater_func);
}
void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
ProcessNativeContexts(retainer);
ProcessAllocationSites(retainer);
}
void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
ProcessNativeContexts(retainer);
}
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
Object* allocation_site_obj =
VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
set_allocation_sites_list(allocation_site_obj);
}
void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
}
void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
DisallowHeapAllocation no_allocation_scope;
Object* cur = allocation_sites_list();
bool marked = false;
while (cur->IsAllocationSite()) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
casted->set_deopt_dependent_code(true);
marked = true;
RemoveAllocationSitePretenuringFeedback(casted);
}
cur = casted->weak_next();
}
if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
void Heap::EvaluateOldSpaceLocalPretenuring(
uint64_t size_of_objects_before_gc) {
uint64_t size_of_objects_after_gc = SizeOfObjects();
double old_generation_survival_rate =
(static_cast<double>(size_of_objects_after_gc) * 100) /
static_cast<double>(size_of_objects_before_gc);
if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
// Too many objects died in the old generation, pretenuring of wrong
// allocation sites may be the cause for that. We have to deopt all
// dependent code registered in the allocation sites to re-evaluate
// our pretenuring decisions.
ResetAllAllocationSitesDependentCode(TENURED);
if (FLAG_trace_pretenuring) {
PrintF(
"Deopt all allocation sites dependent code due to low survival "
"rate in the old generation %f\n",
old_generation_survival_rate);
}
}
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
// All external strings are listed in the external string table.
class ExternalStringTableVisitorAdapter : public RootVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
virtual void VisitRootPointers(Root root, Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
Utils::ToLocal(Handle<String>(String::cast(*p))));
}
}
private:
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
external_string_table_.IterateAll(&external_string_table_visitor);
}
Address Heap::DoScavenge(Address new_space_front) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front != new_space_->top()) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front +=
StaticScavengeVisitor::IterateBody(object->map(), object);
} else {
new_space_front = Page::FromAllocationAreaAddress(new_space_front)
->next_page()
->area_start();
}
}
// Promote and process all the to-be-promoted objects.
{
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int32_t size;
promotion_queue()->remove(&target, &size);
// Promoted object might be already partially visited
// during old space pointer iteration. Thus we search specifically
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, static_cast<int>(size));
}
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
} while (new_space_front != new_space_->top());
return new_space_front;
}
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
0); // NOLINT
#ifdef V8_HOST_ARCH_32_BIT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
0); // NOLINT
#endif
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
case kWordAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
return kDoubleSize - kPointerSize;
default:
UNREACHABLE();
}
return 0;
}
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
intptr_t offset = OffsetFrom(address);
if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0;
}
HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
return HeapObject::FromAddress(object->address() + filler_size);
}
HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK(filler_size > 0);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size)
CreateFillerObjectAt(object->address() + object_size, filler_size,
ClearRecordedSlots::kNo);
return object;
}
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
}
void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
ArrayBufferTracker::RegisterNew(this, buffer);
}
void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
ArrayBufferTracker::Unregister(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(MinimumAllocationLimitGrowingStep(),
static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
}
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map_after_allocation(
reinterpret_cast<Map*>(root(kMetaMapRootIndex)), SKIP_WRITE_BARRIER);
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
reinterpret_cast<Map*>(result)->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
if (FLAG_unbox_double_fields) {
reinterpret_cast<Map*>(result)
->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
reinterpret_cast<Map*>(result)->clear_unused();
reinterpret_cast<Map*>(result)
->set_inobject_properties_or_constructor_function_index(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
return result;
}
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
isolate()->counters()->maps_created()->Increment();
result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
Map* map = Map::cast(result);
map->set_instance_type(instance_type);
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
map->clear_unused();
map->set_inobject_properties_or_constructor_function_index(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->set_weak_cell_cache(Smi::kZero);
map->set_raw_transitions(Smi::kZero);
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
map->set_new_target_is_base(true);
return map;
}
AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
AllocationSpace space) {
HeapObject* obj = nullptr;
{
AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
AllocationResult allocation = AllocateRaw(size, space, align);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{ type, size, k##camel_name##MapRootIndex } \
,
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
const Heap::ConstantStringTable Heap::constant_string_table[] = {
{"", kempty_stringRootIndex},
#define CONSTANT_STRING_ELEMENT(name, contents) \
{ contents, k##name##RootIndex } \
,
INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
#undef CONSTANT_STRING_ELEMENT
};
const Heap::StructTable Heap::struct_table[] = {
#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
{ NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
,
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
};
namespace {
void FinalizePartialMap(Heap* heap, Map* map) {
map->set_code_cache(heap->empty_fixed_array());
map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
map->set_raw_transitions(Smi::kZero);
map->set_instance_descriptors(heap->empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
map->set_prototype(heap->null_value());
map->set_constructor_or_backpointer(heap->null_value());
}
} // namespace
bool Heap::CreateInitialMaps() {
HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map_after_allocation(new_meta_map);
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
Map* map; \
if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
set_##field_name##_map(map); \
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
#undef ALLOCATE_PARTIAL_MAP
}
// Allocate the empty array.
{
AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_fixed_array(FixedArray::cast(obj));
{
AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
{
AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
DCHECK(!InNewSpace(undefined_value()));
{
AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kTheHole);
// Set preliminary exception sentinel value before actually initializing it.
set_exception(null_value());
// Allocate the empty descriptor array.
{
AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(this, meta_map());
FinalizePartialMap(this, fixed_array_map());
FinalizePartialMap(this, undefined_map());
undefined_map()->set_is_undetectable();
FinalizePartialMap(this, null_map());
null_map()->set_is_undetectable();
FinalizePartialMap(this, the_hole_map());
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
{ \
Map* map; \
if (!AllocateMap((instance_type), size).To(&map)) return false; \
set_##field_name##_map(map); \
}
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
constructor_function_index) \
{ \
ALLOCATE_MAP((instance_type), (size), field_name); \
field_name##_map()->SetConstructorFunctionIndex( \
(constructor_function_index)); \
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
mutable_heap_number)
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
Context::BOOLEAN_FUNCTION_INDEX);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
js_promise_capability);
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
{
AllocationResult allocation = AllocateMap(entry.type, entry.size);
if (!allocation.To(&obj)) return false;
}
Map* map = Map::cast(obj);
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
if (StringShape(entry.type).IsCons()) map->mark_unstable();
roots_[entry.index] = map;
}
{ // Create a separate external one byte string map for native sources.
AllocationResult allocation =
AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
ExternalOneByteString::kShortSize);
if (!allocation.To(&obj)) return false;
Map* map = Map::cast(obj);
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
set_native_source_string_map(map);
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
Map* map;
if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
roots_[entry.index] = map;
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, unseeded_number_dictionary)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
native_context_map()->set_dictionary_map(true);
native_context_map()->set_visitor_id(kVisitNativeContext);
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
{
AllocationResult allocation = AllocateEmptyScopeInfo();
if (!allocation.To(&obj)) return false;
}
set_empty_scope_info(ScopeInfo::cast(obj));
{
AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kTrue);
{
AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kFalse);
{ // Empty arrays
{
ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
}
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
FixedTypedArrayBase* obj; \
if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
return false; \
set_empty_fixed_##type##_array(obj); \
}
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
DCHECK(!InNewSpace(empty_fixed_array()));
return true;
}
AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
Cell::cast(result)->set_value(value);
return result;
}
AllocationResult Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_after_allocation(global_property_cell_map(),
SKIP_WRITE_BARRIER);
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_property_details(PropertyDetails(Smi::kZero));
cell->set_value(the_hole_value());
return result;
}
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
int size = WeakCell::kSize;
STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
WeakCell::cast(result)->initialize(value);
WeakCell::cast(result)->clear_next(the_hole_value());
return result;
}
AllocationResult Heap::AllocateTransitionArray(int capacity) {
DCHECK(capacity > 0);
HeapObject* raw_array = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
if (!allocation.To(&raw_array)) return allocation;
}
raw_array->set_map_after_allocation(transition_array_map(),
SKIP_WRITE_BARRIER);
TransitionArray* array = TransitionArray::cast(raw_array);
array->set_length(capacity);
MemsetPointer(array->data_start(), undefined_value(), capacity);
// Transition arrays are tenured. When black allocation is on we have to
// add the transition array to the list of encountered_transition_arrays.
if (incremental_marking()->black_allocation()) {
array->set_next_link(encountered_transition_arrays(),
UPDATE_WEAK_WRITE_BARRIER);
set_encountered_transition_arrays(array);
} else {
array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
}
return array;
}
bool Heap::CreateApiObjects() {
HandleScope scope(isolate());
set_message_listeners(*TemplateList::New(isolate(), 2));
HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
if (!allocation.To(&obj)) return false;
}
InterceptorInfo* info = InterceptorInfo::cast(obj);
info->set_flags(0);
set_noop_interceptor_info(info);
return true;
}
void Heap::CreateJSEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
}
void Heap::CreateJSConstructEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
set_js_construct_entry_code(*stub.GetCode());
}
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate());
// Create stubs that should be there, so we don't unexpectedly have to
// create them if we need them during the creation of another stub.
// Stub creation mixes raw pointers and handles in an unsafe manner so
// we cannot create stubs while we are creating stubs.
CodeStub::GenerateStubsAheadOfTime(isolate());
// MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
// CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
// is created.
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
// }
// { JSConstructEntryStub stub;
// js_construct_entry_code_ = *stub.GetCode();
// }
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
}
void Heap::CreateInitialObjects() {
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
// The -0 value must be set before NewNumber works.
set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));