[heap] Merge NewSpacePage into Page

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1900423002

Cr-Commit-Position: refs/heads/master@{#35768}
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index da6a481..9643739 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -395,7 +395,7 @@
 
 
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
-  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Page* page = Page::FromAddress(old_address);
   Address age_mark = new_space_.age_mark();
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
@@ -476,7 +476,7 @@
   Address object_address = object->address();
   Address memento_address = object_address + object->Size();
   Address last_memento_word_address = memento_address + kPointerSize;
-  if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
+  if (!Page::OnSamePage(object_address, last_memento_word_address)) {
     return nullptr;
   }
   HeapObject* candidate = HeapObject::FromAddress(memento_address);
@@ -504,7 +504,7 @@
       top = NewSpaceTop();
       DCHECK(memento_address == top ||
              memento_address + HeapObject::kHeaderSize <= top ||
-             !NewSpacePage::OnSamePage(memento_address, top - 1));
+             !Page::OnSamePage(memento_address, top - 1));
       if ((memento_address != top) && memento_candidate->IsValid()) {
         return memento_candidate;
       }
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 51d09ed..d95da5e 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -945,7 +945,7 @@
   // may be uninitialized memory behind top. We fill the remainder of the page
   // with a filler.
   Address to_top = new_space_.top();
-  NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
+  Page* page = Page::FromAddress(to_top - kPointerSize);
   if (page->Contains(to_top)) {
     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
@@ -1552,7 +1552,8 @@
   front_ = rear_ =
       reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
   limit_ = reinterpret_cast<struct Entry*>(
-      Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
+      Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
+          ->area_start());
   emergency_stack_ = NULL;
 }
 
@@ -1560,7 +1561,7 @@
 void PromotionQueue::RelocateQueueHead() {
   DCHECK(emergency_stack_ == NULL);
 
-  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
   struct Entry* head_start = rear_;
   struct Entry* head_end =
       Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
@@ -1909,13 +1910,14 @@
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
     while (new_space_front != new_space_.top()) {
-      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+      if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
         new_space_front +=
             StaticScavengeVisitor::IterateBody(object->map(), object);
       } else {
-        new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+        new_space_front = Page::FromAllocationAreaAddress(new_space_front)
+                              ->next_page()
+                              ->area_start();
       }
     }
 
@@ -4629,7 +4631,7 @@
   NewSpacePageIterator it(new_space_.FromSpaceStart(),
                           new_space_.FromSpaceEnd());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
diff --git a/src/heap/heap.h b/src/heap/heap.h
index f933294..2628898 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -322,7 +322,7 @@
   }
 
   Page* GetHeadPage() {
-    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+    return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
   }
 
   void SetNewLimit(Address limit) {
@@ -330,7 +330,7 @@
     if (emergency_stack_) return;
 
     // If the limit is not on the same page, we can ignore it.
-    if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
+    if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
 
     limit_ = reinterpret_cast<struct Entry*>(limit);
 
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index 167c13a..471f7a6 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -348,7 +348,7 @@
     NewSpace* space) {
   NewSpacePageIterator it(space);
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     SetNewSpacePageFlags(p, false);
   }
 }
@@ -361,7 +361,7 @@
   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
 
   LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
+  while (LargePage::IsValid(lop)) {
     SetOldSpacePageFlags(lop, false, false);
     lop = lop->next_page();
   }
@@ -380,7 +380,7 @@
 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     SetNewSpacePageFlags(p, true);
   }
 }
@@ -393,7 +393,7 @@
   ActivateIncrementalWriteBarrier(heap_->new_space());
 
   LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
+  while (LargePage::IsValid(lop)) {
     SetOldSpacePageFlags(lop, true, is_compacting_);
     lop = lop->next_page();
   }
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index a332b88..9c5a3b5 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -185,7 +185,7 @@
     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
   }
 
-  inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
+  inline void SetNewSpacePageFlags(Page* chunk) {
     SetNewSpacePageFlags(chunk, IsMarking());
   }
 
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 4fa3341..d39c9ee 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -134,10 +134,9 @@
   NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
   // page->area_start() as start of range on all pages.
-  CHECK_EQ(space->bottom(),
-           NewSpacePage::FromAddress(space->bottom())->area_start());
+  CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address limit = it.has_next() ? page->area_end() : end;
     CHECK(limit == end || !page->Contains(end));
     VerifyMarking(space->heap(), page->area_start(), limit);
@@ -209,7 +208,7 @@
   VerifyEvacuationVisitor visitor;
 
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address current = page->area_start();
     Address limit = it.has_next() ? page->area_end() : space->top();
     CHECK(limit == space->top() || !page->Contains(space->top()));
@@ -375,7 +374,7 @@
   NewSpacePageIterator it(space->bottom(), space->top());
 
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -1803,9 +1802,9 @@
  public:
   EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
 
-  static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
+  static void MoveToOldSpace(Page* page, PagedSpace* owner) {
     page->heap()->new_space()->ReplaceWithEmptyPage(page);
-    Page* new_page = Page::Convert(page, owner);
+    Page* new_page = Page::ConvertNewToOld(page, owner);
     new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
   }
 
@@ -1884,7 +1883,7 @@
   NewSpace* space = heap()->new_space();
   NewSpacePageIterator it(space->bottom(), space->top());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     DiscoverGreyObjectsOnPage(page);
     if (marking_deque()->IsFull()) return;
   }
@@ -3050,9 +3049,8 @@
   // evacuation.
   static int PageEvacuationThreshold() {
     if (FLAG_page_promotion)
-      return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
-             100;
-    return NewSpacePage::kAllocatableMemory + kPointerSize;
+      return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+    return Page::kAllocatableMemory + kPointerSize;
   }
 
   explicit Evacuator(MarkCompactCollector* collector)
@@ -3067,7 +3065,7 @@
         duration_(0.0),
         bytes_compacted_(0) {}
 
-  inline bool EvacuatePage(MemoryChunk* chunk);
+  inline bool EvacuatePage(Page* chunk);
 
   // Merge back locally cached info sequentially. Note that this method needs
   // to be called from the main thread.
@@ -3101,7 +3099,7 @@
   }
 
   template <IterationMode mode, class Visitor>
-  inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
+  inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
 
   MarkCompactCollector* collector_;
 
@@ -3120,7 +3118,7 @@
 };
 
 template <MarkCompactCollector::IterationMode mode, class Visitor>
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
+bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
                                                          Visitor* visitor) {
   bool success = false;
   DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
@@ -3154,28 +3152,27 @@
   return success;
 }
 
-bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
   bool result = false;
-  DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
-            NewSpacePage::kSweepingDone);
-  switch (ComputeEvacuationMode(chunk)) {
+  DCHECK(page->SweepingDone());
+  switch (ComputeEvacuationMode(page)) {
     case kObjectsNewToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
+      result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
       DCHECK(result);
       USE(result);
       break;
     case kPageNewToOld:
-      result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
+      result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
       DCHECK(result);
       USE(result);
       break;
     case kObjectsOldToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
+      result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
       if (!result) {
         // Aborted compaction page. We can record slots here to have them
         // processed in parallel later on.
-        EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
-        result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
+        EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
+        result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
         DCHECK(result);
         USE(result);
         // We need to return failure here to indicate that we want this page
@@ -3244,7 +3241,7 @@
 
   static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
                                     MemoryChunk* chunk, PerPageData) {
-    return evacuator->EvacuatePage(chunk);
+    return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
   }
 
   static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
@@ -3288,8 +3285,9 @@
     live_bytes += page->LiveBytes();
     job.AddPage(page, &abandoned_pages);
   }
+
   const Address age_mark = heap()->new_space()->age_mark();
-  for (NewSpacePage* page : newspace_evacuation_candidates_) {
+  for (Page* page : newspace_evacuation_candidates_) {
     live_bytes += page->LiveBytes();
     if (!page->NeverEvacuate() &&
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
@@ -3674,7 +3672,7 @@
   Address space_end = heap->new_space()->top();
   NewSpacePageIterator it(space_start, space_end);
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address start =
         page->Contains(space_start) ? space_start : page->area_start();
     Address end = page->Contains(space_end) ? space_end : page->area_end();
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index 58bf2a5..8c8955e 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -862,7 +862,7 @@
   bool have_code_to_deoptimize_;
 
   List<Page*> evacuation_candidates_;
-  List<NewSpacePage*> newspace_evacuation_candidates_;
+  List<Page*> newspace_evacuation_candidates_;
 
   // True if we are collecting slots to perform evacuation from evacuation
   // candidates.
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 3213d17..26d9560 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -56,8 +56,8 @@
 
 HeapObject* SemiSpaceIterator::Next() {
   while (current_ != limit_) {
-    if (NewSpacePage::IsAtEnd(current_)) {
-      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+    if (Page::IsAlignedToPageSize(current_)) {
+      Page* page = Page::FromAllocationAreaAddress(current_);
       page = page->next_page();
       DCHECK(!page->is_anchor());
       current_ = page->area_start();
@@ -80,9 +80,9 @@
 // NewSpacePageIterator
 
 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
-      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
+    : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(Page::FromAddress(space->ToSpaceStart())),
+      last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
 
 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     : prev_page_(space->anchor()),
@@ -90,17 +90,16 @@
       last_page_(prev_page_->prev_page()) {}
 
 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
-      next_page_(NewSpacePage::FromAddress(start)),
-      last_page_(NewSpacePage::FromLimit(limit)) {
+    : prev_page_(Page::FromAddress(start)->prev_page()),
+      next_page_(Page::FromAddress(start)),
+      last_page_(Page::FromAllocationAreaAddress(limit)) {
   SemiSpace::AssertValidRange(start, limit);
 }
 
 
 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
 
-
-NewSpacePage* NewSpacePageIterator::next() {
+Page* NewSpacePageIterator::next() {
   DCHECK(has_next());
   prev_page_ = next_page_;
   next_page_ = next_page_->next_page();
@@ -244,20 +243,18 @@
 bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
 size_t NewSpace::AllocatedSinceLastGC() {
-  const intptr_t age_mark_offset =
-      NewSpacePage::OffsetInPage(to_space_.age_mark());
-  const intptr_t top_offset =
-      NewSpacePage::OffsetInPage(allocation_info_.top());
+  const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
+  const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
   const intptr_t age_mark_delta =
-      age_mark_offset >= NewSpacePage::kObjectStartOffset
-          ? age_mark_offset - NewSpacePage::kObjectStartOffset
-          : NewSpacePage::kAllocatableMemory;
-  const intptr_t top_delta = top_offset >= NewSpacePage::kObjectStartOffset
-                                 ? top_offset - NewSpacePage::kObjectStartOffset
-                                 : NewSpacePage::kAllocatableMemory;
+      age_mark_offset >= Page::kObjectStartOffset
+          ? age_mark_offset - Page::kObjectStartOffset
+          : Page::kAllocatableMemory;
+  const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
+                                 ? top_offset - Page::kObjectStartOffset
+                                 : Page::kAllocatableMemory;
   DCHECK((allocated_since_last_gc_ > 0) ||
-         (NewSpacePage::FromLimit(allocation_info_.top()) ==
-          NewSpacePage::FromLimit(to_space_.age_mark())));
+         (Page::FromAllocationAreaAddress(allocation_info_.top()) ==
+          Page::FromAllocationAreaAddress(to_space_.age_mark())));
   return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
                              age_mark_delta);
 }
@@ -270,16 +267,15 @@
   return static_cast<AllocationSpace>(Smi::cast(object_)->value());
 }
 
-NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
-                                       Executability executable,
-                                       SemiSpace* owner) {
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+                       SemiSpace* owner) {
   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
   bool in_to_space = (owner->id() != kFromSpace);
   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
                              : MemoryChunk::IN_FROM_SPACE);
   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
                                        : MemoryChunk::IN_TO_SPACE));
-  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  Page* page = static_cast<Page*>(chunk);
   heap->incremental_marking()->SetNewSpacePageFlags(page);
   return page;
 }
@@ -309,7 +305,8 @@
   return page;
 }
 
-Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
+Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+  DCHECK(old_page->InNewSpace());
   old_page->set_owner(new_owner);
   old_page->SetFlags(0, ~0);
   new_owner->AccountCommitted(old_page->size());
@@ -359,14 +356,14 @@
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
-  if (!p->is_valid()) return false;
+  if (!Page::IsValid(p)) return false;
   return p->owner() == this;
 }
 
 bool PagedSpace::Contains(Object* o) {
   if (!o->IsHeapObject()) return false;
   Page* p = Page::FromAddress(HeapObject::cast(o)->address());
-  if (!p->is_valid()) return false;
+  if (!Page::IsValid(p)) return false;
   return p->owner() == this;
 }
 
@@ -472,16 +469,6 @@
   return nullptr;
 }
 
-void Page::set_next_page(Page* page) {
-  DCHECK(page->owner() == owner());
-  set_next_chunk(page);
-}
-
-void Page::set_prev_page(Page* page) {
-  DCHECK(page->owner() == owner());
-  set_prev_chunk(page);
-}
-
 Page* FreeListCategory::page() {
   return Page::FromAddress(reinterpret_cast<Address>(this));
 }
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index ff5a704..98c0c4a 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -425,20 +425,12 @@
   return base;
 }
 
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
-  set_owner(owner);
-  set_prev_page(this);
-  set_next_page(this);
-}
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
-  set_owner(semi_space);
+void Page::InitializeAsAnchor(Space* space) {
+  set_owner(space);
   set_next_chunk(this);
   set_prev_chunk(this);
-  // Flags marks this invalid page as not being in new-space.
-  // All real new-space pages will be in new-space.
   SetFlags(0, ~0);
+  SetFlag(ANCHOR);
 }
 
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
@@ -772,12 +764,11 @@
 template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
     MemoryChunk* chunk);
 
-template <typename PageType, MemoryAllocator::AllocationMode mode,
-          typename SpaceType>
-PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
-                                        Executability executable) {
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+                                    Executability executable) {
   MemoryChunk* chunk = nullptr;
-  if (mode == kPooled) {
+  if (alloc_mode == kPooled) {
     DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
     DCHECK_EQ(executable, NOT_EXECUTABLE);
     chunk = AllocatePagePooled(owner);
@@ -786,20 +777,26 @@
     chunk = AllocateChunk(size, size, executable, owner);
   }
   if (chunk == nullptr) return nullptr;
-  return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
 }
 
-template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
-                                             PagedSpace>(intptr_t, PagedSpace*,
-                                                         Executability);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+    intptr_t size, PagedSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
 
-template LargePage*
-MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>(
-    intptr_t, Space*, Executability);
-
-template NewSpacePage* MemoryAllocator::AllocatePage<
-    NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
-                                                       Executability);
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+                                              LargeObjectSpace* owner,
+                                              Executability executable) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+  if (chunk == nullptr) return nullptr;
+  return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
+}
 
 template <typename SpaceType>
 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
@@ -1041,13 +1038,11 @@
 
 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
-    : Space(heap, space, executable), free_list_(this) {
+    : Space(heap, space, executable), anchor_(this), free_list_(this) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
   allocation_info_.Reset(nullptr, nullptr);
-
-  anchor_.InitializeAsAnchor(this);
 }
 
 
@@ -1180,8 +1175,7 @@
 
   if (!heap()->CanExpandOldGeneration(size)) return false;
 
-  Page* p =
-      heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
+  Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
   if (p == nullptr) return false;
 
   AccountCommitted(static_cast<intptr_t>(p->size()));
@@ -1240,7 +1234,7 @@
   free_list_.EvictFreeListItems(page);
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
-  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+  if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
     allocation_info_.Reset(nullptr, nullptr);
   }
 
@@ -1269,7 +1263,7 @@
   while (page_iterator.has_next()) {
     Page* page = page_iterator.next();
     CHECK(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top())) {
+    if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
     CHECK(page->SweepingDone());
@@ -1488,14 +1482,14 @@
 
 bool NewSpace::AddFreshPage() {
   Address top = allocation_info_.top();
-  DCHECK(!NewSpacePage::IsAtStart(top));
+  DCHECK(!Page::IsAtObjectStart(top));
   if (!to_space_.AdvancePage()) {
     // No more pages left to advance.
     return false;
   }
 
   // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  Address limit = Page::FromAllocationAreaAddress(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
   }
@@ -1503,7 +1497,7 @@
   int remaining_in_page = static_cast<int>(limit - top);
   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
   pages_used_++;
-  allocated_since_last_gc_ += NewSpacePage::kAllocatableMemory;
+  allocated_since_last_gc_ += Page::kAllocatableMemory;
   UpdateAllocationInfo();
 
   return true;
@@ -1622,9 +1616,9 @@
   CHECK_EQ(current, to_space_.space_start());
 
   while (current != top()) {
-    if (!NewSpacePage::IsAtEnd(current)) {
+    if (!Page::IsAlignedToPageSize(current)) {
       // The allocation pointer should not be in the middle of an object.
-      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+      CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
             current < top());
 
       HeapObject* object = HeapObject::FromAddress(current);
@@ -1650,7 +1644,7 @@
       current += size;
     } else {
       // At end of page, switch to next page.
-      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      Page* page = Page::FromAllocationAreaAddress(current)->next_page();
       // Next page should be valid.
       CHECK(!page->is_anchor());
       current = page->area_start();
@@ -1686,14 +1680,12 @@
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  NewSpacePage* current = anchor();
+  Page* current = anchor();
   const int num_pages = current_capacity_ / Page::kPageSize;
   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(current, pages_added);
       return false;
@@ -1740,20 +1732,18 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_LE(new_capacity, maximum_capacity_);
   DCHECK_GT(new_capacity, current_capacity_);
   const int delta = new_capacity - current_capacity_;
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-  const int delta_pages = delta / NewSpacePage::kPageSize;
-  NewSpacePage* last_page = anchor()->prev_page();
+  const int delta_pages = delta / Page::kPageSize;
+  Page* last_page = anchor()->prev_page();
   DCHECK_NE(last_page, anchor());
   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(last_page, pages_added);
       return false;
@@ -1761,8 +1751,7 @@
     new_page->InsertAfter(last_page);
     Bitmap::Clear(new_page);
     // Duplicate the flags that was set on the old page.
-    new_page->SetFlags(last_page->GetFlags(),
-                       NewSpacePage::kCopyOnFlipFlagsMask);
+    new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
     last_page = new_page;
   }
   AccountCommitted(static_cast<intptr_t>(delta));
@@ -1770,9 +1759,9 @@
   return true;
 }
 
-void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
-  NewSpacePage* new_last_page = nullptr;
-  NewSpacePage* last_page = start;
+void SemiSpace::RewindPages(Page* start, int num_pages) {
+  Page* new_last_page = nullptr;
+  Page* last_page = start;
   while (num_pages > 0) {
     DCHECK_NE(last_page, anchor());
     new_last_page = last_page->prev_page();
@@ -1784,15 +1773,15 @@
 }
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_GE(new_capacity, minimum_capacity_);
   DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
     const int delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-    int delta_pages = delta / NewSpacePage::kPageSize;
-    NewSpacePage* new_last_page;
-    NewSpacePage* last_page;
+    int delta_pages = delta / Page::kPageSize;
+    Page* new_last_page;
+    Page* last_page;
     while (delta_pages > 0) {
       last_page = anchor()->prev_page();
       new_last_page = last_page->prev_page();
@@ -1809,13 +1798,12 @@
 
 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
   anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes when we swap.
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
   NewSpacePageIterator it(this);
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     page->set_owner(this);
     page->SetFlags(flags, mask);
     if (id_ == kToSpace) {
@@ -1838,12 +1826,11 @@
   current_page_ = anchor_.next_page();
 }
 
-void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) {
-  NewSpacePage* new_page =
-      heap()->memory_allocator()->AllocatePage<NewSpacePage>(
-          NewSpacePage::kAllocatableMemory, this, executable());
+void SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
+  Page* new_page = heap()->memory_allocator()->AllocatePage(
+      Page::kAllocatableMemory, this, executable());
   Bitmap::Clear(new_page);
-  new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags);
+  new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
   new_page->set_next_page(old_page->next_page());
   new_page->set_prev_page(old_page->prev_page());
   old_page->next_page()->set_prev_page(new_page);
@@ -1868,13 +1855,13 @@
   std::swap(from->anchor_, to->anchor_);
   std::swap(from->current_page_, to->current_page_);
 
-  to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
   from->FixPagesFlags(0, 0);
 }
 
 
 void SemiSpace::set_age_mark(Address mark) {
-  DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
+  DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
   NewSpacePageIterator it(space_start(), mark);
@@ -1891,10 +1878,10 @@
 #ifdef VERIFY_HEAP
 void SemiSpace::Verify() {
   bool is_from_space = (id_ == kFromSpace);
-  NewSpacePage* page = anchor_.next_page();
-  CHECK(anchor_.semi_space() == this);
+  Page* page = anchor_.next_page();
+  CHECK(anchor_.owner() == this);
   while (page != &anchor_) {
-    CHECK_EQ(page->semi_space(), this);
+    CHECK_EQ(page->owner(), this);
     CHECK(page->InNewSpace());
     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                         : MemoryChunk::IN_TO_SPACE));
@@ -1922,10 +1909,10 @@
 #ifdef DEBUG
 void SemiSpace::AssertValidRange(Address start, Address end) {
   // Addresses belong to same semi-space
-  NewSpacePage* page = NewSpacePage::FromLimit(start);
-  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
-  SemiSpace* space = page->semi_space();
-  CHECK_EQ(space, end_page->semi_space());
+  Page* page = Page::FromAllocationAreaAddress(start);
+  Page* end_page = Page::FromAllocationAreaAddress(end);
+  SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+  CHECK_EQ(space, end_page->owner());
   // Start address is before end address, either on same page,
   // or end address is on a later page in the linked list of
   // semi-space pages.
@@ -2599,7 +2586,7 @@
 void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
 
-  if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+  if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2908,7 +2895,7 @@
     return AllocationResult::Retry(identity());
   }
 
-  LargePage* page = heap()->memory_allocator()->AllocatePage<LargePage>(
+  LargePage* page = heap()->memory_allocator()->AllocateLargePage(
       object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
   DCHECK(page->area_size() >= object_size);
@@ -2977,7 +2964,7 @@
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
-    DCHECK(page->is_valid());
+    DCHECK(LargePage::IsValid(page));
     if (page->Contains(a)) {
       return page;
     }
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 791ee6c..e995cc9 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -27,7 +27,6 @@
 class Isolate;
 class MemoryAllocator;
 class MemoryChunk;
-class NewSpacePage;
 class Page;
 class PagedSpace;
 class SemiSpace;
@@ -445,6 +444,9 @@
     //   has been aborted and needs special handling by the sweeper.
     COMPACTION_WAS_ABORTED,
 
+    // |ANCHOR|: Flag is set if page is an anchor.
+    ANCHOR,
+
     // Last flag, keep at bottom.
     NUM_MEMORY_CHUNK_FLAGS
   };
@@ -556,7 +558,7 @@
     if (mark == nullptr) return;
     // Need to subtract one from the mark because when a chunk is full the
     // top points to the next address after the chunk, which effectively belongs
-    // to another chunk. See the comment to Page::FromAllocationTop.
+    // to another chunk. See the comment to Page::FromTopOrLimit.
     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
     intptr_t old_mark = 0;
@@ -566,9 +568,9 @@
              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
   }
 
-  Address address() { return reinterpret_cast<Address>(this); }
+  static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
 
-  bool is_valid() { return address() != NULL; }
+  Address address() { return reinterpret_cast<Address>(this); }
 
   base::Mutex* mutex() { return mutex_; }
 
@@ -825,61 +827,15 @@
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
-//   Page* p = Page::FromAllocationTop(top);
+//   Page* p = Page::FromTopOrLimit(top);
 class Page : public MemoryChunk {
  public:
-  static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner);
+  static const intptr_t kCopyAllFlags = ~0;
 
-  // Returns the page containing a given address. The address ranges
-  // from [page_addr .. page_addr + kPageSize[
-  // This only works if the object is in fact in a page.  See also MemoryChunk::
-  // FromAddress() and FromAnyAddress().
-  INLINE(static Page* FromAddress(Address a)) {
-    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
-  }
-
-  // Only works for addresses in pointer spaces, not code space.
-  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
-
-  // Returns the page containing an allocation top. Because an allocation
-  // top address can be the upper bound of the page, we need to subtract
-  // it with kPointerSize first. The address ranges from
-  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
-  INLINE(static Page* FromAllocationTop(Address top)) {
-    Page* p = FromAddress(top - kPointerSize);
-    return p;
-  }
-
-  // Returns the next page in the chain of pages owned by a space.
-  inline Page* next_page() {
-    DCHECK(next_chunk()->owner() == owner());
-    return static_cast<Page*>(next_chunk());
-  }
-  inline Page* prev_page() {
-    DCHECK(prev_chunk()->owner() == owner());
-    return static_cast<Page*>(prev_chunk());
-  }
-  inline void set_next_page(Page* page);
-  inline void set_prev_page(Page* page);
-
-  // Checks whether an address is page aligned.
-  static bool IsAlignedToPageSize(Address a) {
-    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
-  }
-
-  // Returns the offset of a given address to this page.
-  INLINE(int Offset(Address a)) {
-    int offset = static_cast<int>(a - address());
-    return offset;
-  }
-
-  // Returns the address for a given offset to the this page.
-  Address OffsetToAddress(int offset) {
-    DCHECK_PAGE_OFFSET(offset);
-    return address() + offset;
-  }
-
-  // ---------------------------------------------------------------------
+  // Page flags copied from from-space to to-space when flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
   // Maximum object size that gets allocated into regular pages. Objects larger
   // than that size are allocated in large object space and are never moved in
@@ -890,9 +846,71 @@
   // short living objects >256K.
   static const int kMaxRegularHeapObjectSize = 600 * KB;
 
-  inline void ClearGCFields();
+  static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
 
-  void InitializeAsAnchor(PagedSpace* owner);
+  // Returns the page containing a given address. The address ranges
+  // from [page_addr .. page_addr + kPageSize[. This only works if the object
+  // is in fact in a page.
+  static Page* FromAddress(Address addr) {
+    return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
+  }
+
+  // Returns the page containing the address provided. The address can
+  // potentially point righter after the page. To be also safe for tagged values
+  // we subtract a hole word. The valid address ranges from
+  // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
+  static Page* FromAllocationAreaAddress(Address address) {
+    return Page::FromAddress(address - kPointerSize);
+  }
+
+  // Checks if address1 and address2 are on the same new space page.
+  static bool OnSamePage(Address address1, Address address2) {
+    return Page::FromAddress(address1) == Page::FromAddress(address2);
+  }
+
+  // Checks whether an address is page aligned.
+  static bool IsAlignedToPageSize(Address addr) {
+    return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
+  }
+
+  static bool IsAtObjectStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
+           kObjectStartOffset;
+  }
+
+  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
+  // Create a Page object that is only used as anchor for the doubly-linked
+  // list of real pages.
+  explicit Page(Space* owner) { InitializeAsAnchor(owner); }
+
+  inline void MarkNeverAllocateForTesting();
+  inline void MarkEvacuationCandidate();
+  inline void ClearEvacuationCandidate();
+
+  Page* next_page() { return static_cast<Page*>(next_chunk()); }
+  Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
+  void set_next_page(Page* page) { set_next_chunk(page); }
+  void set_prev_page(Page* page) { set_prev_chunk(page); }
+
+  template <typename Callback>
+  inline void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      callback(&categories_[i]);
+    }
+  }
+
+  // Returns the offset of a given address to this page.
+  inline int Offset(Address a) {
+    int offset = static_cast<int>(a - address());
+    return offset;
+  }
+
+  // Returns the address for a given offset to the this page.
+  Address OffsetToAddress(int offset) {
+    DCHECK_PAGE_OFFSET(offset);
+    return address() + offset;
+  }
 
   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
   // progress. In particular, when we know that right before this call a
@@ -914,48 +932,39 @@
                             available_in_free_list());
   }
 
-  template <typename Callback>
-  inline void ForAllFreeListCategories(Callback callback) {
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      callback(&categories_[i]);
-    }
-  }
-
   FreeListCategory* free_list_category(FreeListCategoryType type) {
     return &categories_[type];
   }
 
-#define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
-  type name() { return name##_.Value(); }                \
-  void set_##name(type name) { name##_.SetValue(name); } \
-  void add_##name(type name) { name##_.Increment(name); }
+  bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
 
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
-
-#undef FRAGMENTATION_STATS_ACCESSORS
+  intptr_t wasted_memory() { return wasted_memory_.Value(); }
+  void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
+  intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
+  void add_available_in_free_list(intptr_t available) {
+    available_in_free_list_.Increment(available);
+  }
 
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
 
-  inline void MarkNeverAllocateForTesting();
-  inline void MarkEvacuationCandidate();
-  inline void ClearEvacuationCandidate();
-
  private:
   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
 
   template <InitializationMode mode = kFreeMemory>
   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
                                  Executability executable, PagedSpace* owner);
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, SemiSpace* owner);
 
   inline void InitializeFreeListCategories();
 
+  void InitializeAsAnchor(Space* owner);
+
   friend class MemoryAllocator;
 };
 
-
 class LargePage : public MemoryChunk {
  public:
   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
@@ -1275,13 +1284,15 @@
 
   void TearDown();
 
-  // Allocates either Page or NewSpacePage from the allocator. AllocationMode
-  // is used to indicate whether pooled allocation, which only works for
-  // MemoryChunk::kPageSize, should be tried first.
-  template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+  // Allocates a Page from the allocator. AllocationMode is used to indicate
+  // whether pooled allocation, which only works for MemoryChunk::kPageSize,
+  // should be tried first.
+  template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
             typename SpaceType>
-  PageType* AllocatePage(intptr_t size, SpaceType* owner,
-                         Executability executable);
+  Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
+
+  LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
+                               Executability executable);
 
   // PreFree logically frees the object, i.e., it takes care of the size
   // bookkeeping and calls the allocation callback.
@@ -1592,7 +1603,8 @@
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
-    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+    return (Page::FromAllocationAreaAddress(top_) ==
+            Page::FromAllocationAreaAddress(limit_)) &&
            (top_ <= limit_);
   }
 #endif
@@ -2303,86 +2315,8 @@
   const char* name_;
 };
 
-
 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
 
-
-class NewSpacePage : public MemoryChunk {
- public:
-  static bool IsAtStart(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
-           kObjectStartOffset;
-  }
-
-  static bool IsAtEnd(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
-  }
-
-  // Finds the NewSpacePage containing the given address.
-  static inline NewSpacePage* FromAddress(Address address_in_page) {
-    Address page_start =
-        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
-                                  ~Page::kPageAlignmentMask);
-    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
-    return page;
-  }
-
-  // Find the page for a limit address. A limit address is either an address
-  // inside a page, or the address right after the last byte of a page.
-  static inline NewSpacePage* FromLimit(Address address_limit) {
-    return NewSpacePage::FromAddress(address_limit - 1);
-  }
-
-  // Checks if address1 and address2 are on the same new space page.
-  static inline bool OnSamePage(Address address1, Address address2) {
-    return NewSpacePage::FromAddress(address1) ==
-           NewSpacePage::FromAddress(address2);
-  }
-
-  inline NewSpacePage* next_page() {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
-  inline NewSpacePage* prev_page() {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
-  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
-  bool is_anchor() { return !this->InNewSpace(); }
-
- private:
-  static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
-                                         Executability executable,
-                                         SemiSpace* owner);
-
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-  static const intptr_t kCopyAllFlags = ~0;
-
-  // Create a NewSpacePage object that is only used as anchor
-  // for the doubly-linked list of real pages.
-  explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
-
-  // Intialize a fake NewSpacePage used as sentinel at the ends
-  // of a doubly-linked list of real NewSpacePages.
-  // Only uses the prev/next links, and sets flags to not be in new-space.
-  void InitializeAsAnchor(SemiSpace* owner);
-
-  friend class MemoryAllocator;
-  friend class SemiSpace;
-  friend class SemiSpaceIterator;
-};
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
@@ -2431,8 +2365,8 @@
     return anchor_.next_page()->area_start();
   }
 
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
+  Page* first_page() { return anchor_.next_page(); }
+  Page* current_page() { return current_page_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
@@ -2444,7 +2378,7 @@
   Address page_high() { return current_page_->area_end(); }
 
   bool AdvancePage() {
-    NewSpacePage* next_page = current_page_->next_page();
+    Page* next_page = current_page_->next_page();
     if (next_page == anchor()) return false;
     current_page_ = next_page;
     return true;
@@ -2453,7 +2387,7 @@
   // Resets the space to using the first page.
   void Reset();
 
-  void ReplaceWithEmptyPage(NewSpacePage* page);
+  void ReplaceWithEmptyPage(Page* page);
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
@@ -2504,9 +2438,9 @@
 #endif
 
  private:
-  void RewindPages(NewSpacePage* start, int num_pages);
+  void RewindPages(Page* start, int num_pages);
 
-  inline NewSpacePage* anchor() { return &anchor_; }
+  inline Page* anchor() { return &anchor_; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2526,8 +2460,8 @@
   bool committed_;
   SemiSpaceId id_;
 
-  NewSpacePage anchor_;
-  NewSpacePage* current_page_;
+  Page anchor_;
+  Page* current_page_;
 
   friend class SemiSpaceIterator;
   friend class NewSpacePageIterator;
@@ -2575,15 +2509,15 @@
   inline NewSpacePageIterator(Address start, Address limit);
 
   inline bool has_next();
-  inline NewSpacePage* next();
+  inline Page* next();
 
  private:
-  NewSpacePage* prev_page_;  // Previous page returned.
+  Page* prev_page_;  // Previous page returned.
   // Next page that will be returned.  Cached here so that we can use this
   // iterator for operations that deallocate pages.
-  NewSpacePage* next_page_;
+  Page* next_page_;
   // Last page returned.
-  NewSpacePage* last_page_;
+  Page* last_page_;
 };
 
 
@@ -2633,7 +2567,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * NewSpacePage::kAllocatableMemory +
+    return pages_used_ * Page::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2646,7 +2580,7 @@
   intptr_t Capacity() {
     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return (to_space_.current_capacity() / Page::kPageSize) *
-           NewSpacePage::kAllocatableMemory;
+           Page::kAllocatableMemory;
   }
 
   // Return the current size of a semispace, allocatable and non-allocatable
@@ -2675,7 +2609,7 @@
 
   inline size_t AllocatedSinceLastGC();
 
-  void ReplaceWithEmptyPage(NewSpacePage* page) {
+  void ReplaceWithEmptyPage(Page* page) {
     // This method is called after flipping the semispace.
     DCHECK(page->InFromSpace());
     from_space_.ReplaceWithEmptyPage(page);
diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc
index 290ecdd..89b0c6f 100644
--- a/test/cctest/heap/test-heap.cc
+++ b/test/cctest/heap/test-heap.cc
@@ -6622,15 +6622,14 @@
     CHECK_GT(handles.size(), 0u);
     // First object in handle should be on the first page.
     Handle<FixedArray> first_object = handles.front();
-    NewSpacePage* first_page =
-        NewSpacePage::FromAddress(first_object->address());
+    Page* first_page = Page::FromAddress(first_object->address());
     // The age mark should not be on the first page.
     CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
     // To perform a sanity check on live bytes we need to mark the heap.
     SimulateIncrementalMarking(heap, true);
     // Sanity check that the page meets the requirements for promotion.
     const int threshold_bytes =
-        FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
+        FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
     CHECK_GE(first_page->LiveBytes(), threshold_bytes);
 
     // Actual checks: The page is in new space first, but is moved to old space
diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc
index 711b2a5..1ed07c5 100644
--- a/test/cctest/heap/test-spaces.cc
+++ b/test/cctest/heap/test-spaces.cc
@@ -315,12 +315,12 @@
   {
     int total_pages = 0;
     OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
-    Page* first_page = memory_allocator->AllocatePage<Page>(
+    Page* first_page = memory_allocator->AllocatePage(
         faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
         NOT_EXECUTABLE);
 
     first_page->InsertAfter(faked_space.anchor()->prev_page());
-    CHECK(first_page->is_valid());
+    CHECK(Page::IsValid(first_page));
     CHECK(first_page->next_page() == faked_space.anchor());
     total_pages++;
 
@@ -329,10 +329,10 @@
     }
 
     // Again, we should get n or n - 1 pages.
-    Page* other = memory_allocator->AllocatePage<Page>(
+    Page* other = memory_allocator->AllocatePage(
         faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
         NOT_EXECUTABLE);
-    CHECK(other->is_valid());
+    CHECK(Page::IsValid(other));
     total_pages++;
     other->InsertAfter(first_page);
     int page_count = 0;
@@ -343,7 +343,7 @@
     CHECK(total_pages == page_count);
 
     Page* second_page = first_page->next_page();
-    CHECK(second_page->is_valid());
+    CHECK(Page::IsValid(second_page));
 
     // OldSpace's destructor will tear down the space and free up all pages.
   }