| // Copyright 2020 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/heap/memory-allocator.h" |
| |
| #include <cinttypes> |
| |
| #include "src/base/address-region.h" |
| #include "src/common/globals.h" |
| #include "src/execution/isolate.h" |
| #include "src/flags/flags.h" |
| #include "src/heap/gc-tracer.h" |
| #include "src/heap/heap-inl.h" |
| #include "src/heap/memory-chunk.h" |
| #include "src/heap/read-only-spaces.h" |
| #include "src/logging/log.h" |
| #include "src/utils/allocation.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| // ----------------------------------------------------------------------------- |
| // MemoryAllocator |
| // |
| |
| size_t MemoryAllocator::commit_page_size_ = 0; |
| size_t MemoryAllocator::commit_page_size_bits_ = 0; |
| |
| MemoryAllocator::MemoryAllocator(Isolate* isolate, |
| v8::PageAllocator* code_page_allocator, |
| size_t capacity) |
| : isolate_(isolate), |
| data_page_allocator_(isolate->page_allocator()), |
| code_page_allocator_(code_page_allocator), |
| capacity_(RoundUp(capacity, Page::kPageSize)), |
| size_(0), |
| size_executable_(0), |
| lowest_ever_allocated_(static_cast<Address>(-1ll)), |
| highest_ever_allocated_(kNullAddress), |
| unmapper_(isolate->heap(), this) { |
| DCHECK_NOT_NULL(code_page_allocator); |
| } |
| |
| void MemoryAllocator::TearDown() { |
| unmapper()->TearDown(); |
| |
| // Check that spaces were torn down before MemoryAllocator. |
| DCHECK_EQ(size_, 0u); |
| // TODO(gc) this will be true again when we fix FreeMemory. |
| // DCHECK_EQ(0, size_executable_); |
| capacity_ = 0; |
| |
| if (last_chunk_.IsReserved()) { |
| last_chunk_.Free(); |
| } |
| |
| code_page_allocator_ = nullptr; |
| data_page_allocator_ = nullptr; |
| } |
| |
| class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask { |
| public: |
| explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper) |
| : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {} |
| |
| UnmapFreeMemoryJob(const UnmapFreeMemoryJob&) = delete; |
| UnmapFreeMemoryJob& operator=(const UnmapFreeMemoryJob&) = delete; |
| |
| void Run(JobDelegate* delegate) override { |
| if (delegate->IsJoiningThread()) { |
| TRACE_GC(tracer_, GCTracer::Scope::UNMAPPER); |
| RunImpl(delegate); |
| |
| } else { |
| TRACE_GC1(tracer_, GCTracer::Scope::BACKGROUND_UNMAPPER, |
| ThreadKind::kBackground); |
| RunImpl(delegate); |
| } |
| } |
| |
| size_t GetMaxConcurrency(size_t worker_count) const override { |
| const size_t kTaskPerChunk = 8; |
| return std::min<size_t>( |
| kMaxUnmapperTasks, |
| worker_count + |
| (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) / |
| kTaskPerChunk); |
| } |
| |
| private: |
| void RunImpl(JobDelegate* delegate) { |
| unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled, |
| delegate); |
| if (FLAG_trace_unmapper) { |
| PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n"); |
| } |
| } |
| Unmapper* const unmapper_; |
| GCTracer* const tracer_; |
| }; |
| |
| void MemoryAllocator::Unmapper::FreeQueuedChunks() { |
| if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) { |
| if (job_handle_ && job_handle_->IsValid()) { |
| job_handle_->NotifyConcurrencyIncrease(); |
| } else { |
| job_handle_ = V8::GetCurrentPlatform()->PostJob( |
| TaskPriority::kUserVisible, |
| std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this)); |
| if (FLAG_trace_unmapper) { |
| PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n"); |
| } |
| } |
| } else { |
| PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled); |
| } |
| } |
| |
| void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() { |
| if (job_handle_ && job_handle_->IsValid()) job_handle_->Join(); |
| |
| if (FLAG_trace_unmapper) { |
| PrintIsolate( |
| heap_->isolate(), |
| "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n"); |
| } |
| } |
| |
| void MemoryAllocator::Unmapper::PrepareForGC() { |
| // Free non-regular chunks because they cannot be re-used. |
| PerformFreeMemoryOnQueuedNonRegularChunks(); |
| } |
| |
| void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() { |
| CancelAndWaitForPendingTasks(); |
| PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled); |
| } |
| |
| void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks( |
| JobDelegate* delegate) { |
| MemoryChunk* chunk = nullptr; |
| while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) { |
| allocator_->PerformFreeMemory(chunk); |
| if (delegate && delegate->ShouldYield()) return; |
| } |
| } |
| |
| void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks( |
| MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) { |
| MemoryChunk* chunk = nullptr; |
| if (FLAG_trace_unmapper) { |
| PrintIsolate( |
| heap_->isolate(), |
| "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n", |
| NumberOfChunks()); |
| } |
| // Regular chunks. |
| while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) { |
| bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); |
| allocator_->PerformFreeMemory(chunk); |
| if (pooled) AddMemoryChunkSafe(kPooled, chunk); |
| if (delegate && delegate->ShouldYield()) return; |
| } |
| if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) { |
| // The previous loop uncommitted any pages marked as pooled and added them |
| // to the pooled list. In case of kFreePooled we need to free them though as |
| // well. |
| while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) { |
| allocator_->FreePooledChunk(chunk); |
| if (delegate && delegate->ShouldYield()) return; |
| } |
| } |
| PerformFreeMemoryOnQueuedNonRegularChunks(); |
| } |
| |
| void MemoryAllocator::Unmapper::TearDown() { |
| CHECK(!job_handle_ || !job_handle_->IsValid()); |
| PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled); |
| for (int i = 0; i < kNumberOfChunkQueues; i++) { |
| DCHECK(chunks_[i].empty()); |
| } |
| } |
| |
| size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() { |
| base::MutexGuard guard(&mutex_); |
| return chunks_[kRegular].size() + chunks_[kNonRegular].size(); |
| } |
| |
| int MemoryAllocator::Unmapper::NumberOfChunks() { |
| base::MutexGuard guard(&mutex_); |
| size_t result = 0; |
| for (int i = 0; i < kNumberOfChunkQueues; i++) { |
| result += chunks_[i].size(); |
| } |
| return static_cast<int>(result); |
| } |
| |
| size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() { |
| base::MutexGuard guard(&mutex_); |
| |
| size_t sum = 0; |
| // kPooled chunks are already uncommited. We only have to account for |
| // kRegular and kNonRegular chunks. |
| for (auto& chunk : chunks_[kRegular]) { |
| sum += chunk->size(); |
| } |
| for (auto& chunk : chunks_[kNonRegular]) { |
| sum += chunk->size(); |
| } |
| return sum; |
| } |
| |
| bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) { |
| Address base = reservation->address(); |
| size_t size = reservation->size(); |
| if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) { |
| return false; |
| } |
| UpdateAllocatedSpaceLimits(base, base + size); |
| return true; |
| } |
| |
| bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) { |
| size_t size = reservation->size(); |
| if (!reservation->SetPermissions(reservation->address(), size, |
| PageAllocator::kNoAccess)) { |
| return false; |
| } |
| return true; |
| } |
| |
| void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator, |
| Address base, size_t size) { |
| FreePages(page_allocator, reinterpret_cast<void*>(base), size); |
| } |
| |
| Address MemoryAllocator::AllocateAlignedMemory( |
| size_t reserve_size, size_t commit_size, size_t alignment, |
| Executability executable, void* hint, VirtualMemory* controller) { |
| v8::PageAllocator* page_allocator = this->page_allocator(executable); |
| DCHECK(commit_size <= reserve_size); |
| VirtualMemory reservation(page_allocator, reserve_size, hint, alignment); |
| if (!reservation.IsReserved()) return kNullAddress; |
| Address base = reservation.address(); |
| size_ += reservation.size(); |
| |
| if (executable == EXECUTABLE) { |
| if (!CommitExecutableMemory(&reservation, base, commit_size, |
| reserve_size)) { |
| base = kNullAddress; |
| } |
| } else { |
| if (reservation.SetPermissions(base, commit_size, |
| PageAllocator::kReadWrite)) { |
| UpdateAllocatedSpaceLimits(base, base + commit_size); |
| } else { |
| base = kNullAddress; |
| } |
| } |
| |
| if (base == kNullAddress) { |
| // Failed to commit the body. Free the mapping and any partially committed |
| // regions inside it. |
| reservation.Free(); |
| size_ -= reserve_size; |
| return kNullAddress; |
| } |
| |
| *controller = std::move(reservation); |
| return base; |
| } |
| |
| V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk( |
| size_t reserve_area_size, size_t commit_area_size, Executability executable, |
| BaseSpace* owner) { |
| DCHECK_LE(commit_area_size, reserve_area_size); |
| |
| size_t chunk_size; |
| Heap* heap = isolate_->heap(); |
| Address base = kNullAddress; |
| VirtualMemory reservation; |
| Address area_start = kNullAddress; |
| Address area_end = kNullAddress; |
| #ifdef V8_COMPRESS_POINTERS |
| // When pointer compression is enabled, spaces are expected to be at a |
| // predictable address (see mkgrokdump) so we don't supply a hint and rely on |
| // the deterministic behaviour of the BoundedPageAllocator. |
| void* address_hint = nullptr; |
| #else |
| void* address_hint = |
| AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment); |
| #endif |
| |
| // |
| // MemoryChunk layout: |
| // |
| // Executable |
| // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| // | Header | |
| // +----------------------------+<- base + CodePageGuardStartOffset |
| // | Guard | |
| // +----------------------------+<- area_start_ |
| // | Area | |
| // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| // | Committed but not used | |
| // +----------------------------+<- aligned at OS page boundary |
| // | Reserved but not committed | |
| // +----------------------------+<- aligned at OS page boundary |
| // | Guard | |
| // +----------------------------+<- base + chunk_size |
| // |
| // Non-executable |
| // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| // | Header | |
| // +----------------------------+<- area_start_ (base + area_start_) |
| // | Area | |
| // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| // | Committed but not used | |
| // +----------------------------+<- aligned at OS page boundary |
| // | Reserved but not committed | |
| // +----------------------------+<- base + chunk_size |
| // |
| |
| if (executable == EXECUTABLE) { |
| chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() + |
| reserve_area_size + |
| MemoryChunkLayout::CodePageGuardSize(), |
| GetCommitPageSize()); |
| |
| // Size of header (not executable) plus area (executable). |
| size_t commit_size = ::RoundUp( |
| MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size, |
| GetCommitPageSize()); |
| base = |
| AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
| executable, address_hint, &reservation); |
| if (base == kNullAddress) return nullptr; |
| // Update executable memory size. |
| size_executable_ += reservation.size(); |
| |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue); |
| ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(), |
| commit_area_size, kZapValue); |
| } |
| |
| area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage(); |
| area_end = area_start + commit_area_size; |
| } else { |
| chunk_size = ::RoundUp( |
| MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size, |
| GetCommitPageSize()); |
| size_t commit_size = ::RoundUp( |
| MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size, |
| GetCommitPageSize()); |
| base = |
| AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
| executable, address_hint, &reservation); |
| |
| if (base == kNullAddress) return nullptr; |
| |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock( |
| base, |
| MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size, |
| kZapValue); |
| } |
| |
| area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage(); |
| area_end = area_start + commit_area_size; |
| } |
| |
| // Use chunk_size for statistics because we assume that treat reserved but |
| // not-yet committed memory regions of chunks as allocated. |
| LOG(isolate_, |
| NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size)); |
| |
| // We cannot use the last chunk in the address space because we would |
| // overflow when comparing top and limit if this chunk is used for a |
| // linear allocation area. |
| if ((base + chunk_size) == 0u) { |
| CHECK(!last_chunk_.IsReserved()); |
| last_chunk_ = std::move(reservation); |
| UncommitMemory(&last_chunk_); |
| size_ -= chunk_size; |
| if (executable == EXECUTABLE) { |
| size_executable_ -= chunk_size; |
| } |
| CHECK(last_chunk_.IsReserved()); |
| return AllocateBasicChunk(reserve_area_size, commit_area_size, executable, |
| owner); |
| } |
| |
| BasicMemoryChunk* chunk = |
| BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
| owner, std::move(reservation)); |
| |
| return chunk; |
| } |
| |
| MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, |
| size_t commit_area_size, |
| Executability executable, |
| PageSize page_size, |
| BaseSpace* owner) { |
| BasicMemoryChunk* basic_chunk = AllocateBasicChunk( |
| reserve_area_size, commit_area_size, executable, owner); |
| |
| if (basic_chunk == nullptr) return nullptr; |
| |
| MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(), |
| executable, page_size); |
| |
| #ifdef DEBUG |
| if (chunk->executable()) RegisterExecutableMemoryChunk(chunk); |
| #endif // DEBUG |
| return chunk; |
| } |
| |
| void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk, |
| Address start_free, |
| size_t bytes_to_free, |
| Address new_area_end) { |
| VirtualMemory* reservation = chunk->reserved_memory(); |
| DCHECK(reservation->IsReserved()); |
| chunk->set_size(chunk->size() - bytes_to_free); |
| chunk->set_area_end(new_area_end); |
| if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { |
| // Add guard page at the end. |
| size_t page_size = GetCommitPageSize(); |
| DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size)); |
| DCHECK_EQ(chunk->address() + chunk->size(), |
| chunk->area_end() + MemoryChunkLayout::CodePageGuardSize()); |
| reservation->SetPermissions(chunk->area_end(), page_size, |
| PageAllocator::kNoAccess); |
| } |
| // On e.g. Windows, a reservation may be larger than a page and releasing |
| // partially starting at |start_free| will also release the potentially |
| // unused part behind the current page. |
| const size_t released_bytes = reservation->Release(start_free); |
| DCHECK_GE(size_, released_bytes); |
| size_ -= released_bytes; |
| } |
| |
| void MemoryAllocator::UnregisterSharedBasicMemoryChunk( |
| BasicMemoryChunk* chunk) { |
| VirtualMemory* reservation = chunk->reserved_memory(); |
| const size_t size = |
| reservation->IsReserved() ? reservation->size() : chunk->size(); |
| DCHECK_GE(size_, static_cast<size_t>(size)); |
| size_ -= size; |
| } |
| |
| void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk, |
| Executability executable) { |
| DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); |
| VirtualMemory* reservation = chunk->reserved_memory(); |
| const size_t size = |
| reservation->IsReserved() ? reservation->size() : chunk->size(); |
| DCHECK_GE(size_, static_cast<size_t>(size)); |
| |
| size_ -= size; |
| if (executable == EXECUTABLE) { |
| DCHECK_GE(size_executable_, size); |
| size_executable_ -= size; |
| #ifdef DEBUG |
| UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk)); |
| #endif // DEBUG |
| chunk->heap()->UnregisterUnprotectedMemoryChunk( |
| static_cast<MemoryChunk*>(chunk)); |
| } |
| chunk->SetFlag(MemoryChunk::UNREGISTERED); |
| } |
| |
| void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) { |
| UnregisterBasicMemoryChunk(chunk, chunk->executable()); |
| } |
| |
| void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) { |
| DCHECK(!page->executable()); |
| UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE); |
| } |
| |
| void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) { |
| DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| |
| UnregisterSharedBasicMemoryChunk(chunk); |
| |
| v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE); |
| VirtualMemory* reservation = chunk->reserved_memory(); |
| if (reservation->IsReserved()) { |
| reservation->FreeReadOnly(); |
| } else { |
| // Only read-only pages can have a non-initialized reservation object. This |
| // happens when the pages are remapped to multiple locations and where the |
| // reservation would therefore be invalid. |
| FreeMemoryRegion(allocator, chunk->address(), |
| RoundUp(chunk->size(), allocator->AllocatePageSize())); |
| } |
| } |
| |
| void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
| DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| UnregisterMemoryChunk(chunk); |
| isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| chunk->IsEvacuationCandidate()); |
| chunk->SetFlag(MemoryChunk::PRE_FREED); |
| } |
| |
| void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
| DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); |
| DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| DCHECK(!chunk->InReadOnlySpace()); |
| chunk->ReleaseAllAllocatedMemory(); |
| |
| VirtualMemory* reservation = chunk->reserved_memory(); |
| if (chunk->IsFlagSet(MemoryChunk::POOLED)) { |
| UncommitMemory(reservation); |
| } else { |
| DCHECK(reservation->IsReserved()); |
| reservation->Free(); |
| } |
| } |
| |
| void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) { |
| switch (mode) { |
| case kImmediately: |
| PreFreeMemory(chunk); |
| PerformFreeMemory(chunk); |
| break; |
| case kConcurrentlyAndPool: |
| DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); |
| DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); |
| chunk->SetFlag(MemoryChunk::POOLED); |
| V8_FALLTHROUGH; |
| case kConcurrently: |
| PreFreeMemory(chunk); |
| // The chunks added to this queue will be freed by a concurrent thread. |
| unmapper()->AddMemoryChunkSafe(chunk); |
| break; |
| } |
| } |
| |
| void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) { |
| // Pooled pages cannot be touched anymore as their memory is uncommitted. |
| // Pooled pages are not-executable. |
| FreeMemoryRegion(data_page_allocator(), chunk->address(), |
| static_cast<size_t>(MemoryChunk::kPageSize)); |
| } |
| |
| Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode, |
| size_t size, Space* owner, |
| Executability executable) { |
| MemoryChunk* chunk = nullptr; |
| if (alloc_mode == kUsePool) { |
| DCHECK_EQ(size, static_cast<size_t>( |
| MemoryChunkLayout::AllocatableMemoryInMemoryChunk( |
| owner->identity()))); |
| DCHECK_EQ(executable, NOT_EXECUTABLE); |
| chunk = AllocatePagePooled(owner); |
| } |
| if (chunk == nullptr) { |
| chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner); |
| } |
| if (chunk == nullptr) return nullptr; |
| return owner->InitializePage(chunk); |
| } |
| |
| ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size, |
| ReadOnlySpace* owner) { |
| BasicMemoryChunk* chunk = |
| AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner); |
| if (chunk == nullptr) return nullptr; |
| return owner->InitializePage(chunk); |
| } |
| |
| std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> |
| MemoryAllocator::RemapSharedPage( |
| ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) { |
| return shared_memory->RemapTo(reinterpret_cast<void*>(new_address)); |
| } |
| |
| LargePage* MemoryAllocator::AllocateLargePage(size_t size, |
| LargeObjectSpace* owner, |
| Executability executable) { |
| MemoryChunk* chunk = |
| AllocateChunk(size, size, executable, PageSize::kLarge, owner); |
| if (chunk == nullptr) return nullptr; |
| return LargePage::Initialize(isolate_->heap(), chunk, executable); |
| } |
| |
| MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) { |
| MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe(); |
| if (chunk == nullptr) return nullptr; |
| const int size = MemoryChunk::kPageSize; |
| const Address start = reinterpret_cast<Address>(chunk); |
| const Address area_start = |
| start + |
| MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity()); |
| const Address area_end = start + size; |
| // Pooled pages are always regular data pages. |
| DCHECK_NE(CODE_SPACE, owner->identity()); |
| VirtualMemory reservation(data_page_allocator(), start, size); |
| if (!CommitMemory(&reservation)) return nullptr; |
| if (Heap::ShouldZapGarbage()) { |
| ZapBlock(start, size, kZapValue); |
| } |
| BasicMemoryChunk* basic_chunk = |
| BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start, |
| area_end, owner, std::move(reservation)); |
| MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE, |
| PageSize::kRegular); |
| size_ += size; |
| return chunk; |
| } |
| |
| void MemoryAllocator::ZapBlock(Address start, size_t size, |
| uintptr_t zap_value) { |
| DCHECK(IsAligned(start, kTaggedSize)); |
| DCHECK(IsAligned(size, kTaggedSize)); |
| MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)), |
| size >> kTaggedSizeLog2); |
| } |
| |
| void MemoryAllocator::InitializeOncePerProcess() { |
| commit_page_size_ = |
| FLAG_v8_os_page_size > 0 ? FLAG_v8_os_page_size * KB : CommitPageSize(); |
| CHECK(base::bits::IsPowerOfTwo(commit_page_size_)); |
| commit_page_size_bits_ = base::bits::WhichPowerOfTwo(commit_page_size_); |
| } |
| |
| base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr, |
| size_t size) { |
| size_t page_size = GetCommitPageSize(); |
| if (size < page_size + FreeSpace::kSize) { |
| return base::AddressRegion(0, 0); |
| } |
| Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size); |
| Address discardable_end = RoundDown(addr + size, page_size); |
| if (discardable_start >= discardable_end) return base::AddressRegion(0, 0); |
| return base::AddressRegion(discardable_start, |
| discardable_end - discardable_start); |
| } |
| |
| bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start, |
| size_t commit_size, |
| size_t reserved_size) { |
| const size_t page_size = GetCommitPageSize(); |
| // All addresses and sizes must be aligned to the commit page size. |
| DCHECK(IsAligned(start, page_size)); |
| DCHECK_EQ(0, commit_size % page_size); |
| DCHECK_EQ(0, reserved_size % page_size); |
| const size_t guard_size = MemoryChunkLayout::CodePageGuardSize(); |
| const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset(); |
| const size_t code_area_offset = |
| MemoryChunkLayout::ObjectStartOffsetInCodePage(); |
| // reserved_size includes two guard regions, commit_size does not. |
| DCHECK_LE(commit_size, reserved_size - 2 * guard_size); |
| const Address pre_guard_page = start + pre_guard_offset; |
| const Address code_area = start + code_area_offset; |
| const Address post_guard_page = start + reserved_size - guard_size; |
| // Commit the non-executable header, from start to pre-code guard page. |
| if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) { |
| // Create the pre-code guard page, following the header. |
| if (vm->SetPermissions(pre_guard_page, page_size, |
| PageAllocator::kNoAccess)) { |
| // Commit the executable code body. |
| if (vm->SetPermissions(code_area, commit_size - pre_guard_offset, |
| MemoryChunk::GetCodeModificationPermission())) { |
| // Create the post-code guard page. |
| if (vm->SetPermissions(post_guard_page, page_size, |
| PageAllocator::kNoAccess)) { |
| UpdateAllocatedSpaceLimits(start, code_area + commit_size); |
| return true; |
| } |
| vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess); |
| } |
| } |
| vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess); |
| } |
| return false; |
| } |
| |
| } // namespace internal |
| } // namespace v8 |