Reland "Reland "[Memory] Move VirtualMemory out of base:: platform.""

This is a reland of f2cd10db1414f496984e79bf6f1a23393d60b3c1
Original change's description:
> Reland "[Memory] Move VirtualMemory out of base:: platform."
> 
> This is a reland of 4dd293d922dfaefb2b9d144971070574d0fb9933
> Original change's description:
> > [Memory] Move VirtualMemory out of base:: platform.
> > 
> > - Moves base::VirtualMemory to v8::internal::VirtualMemory.
> > - Makes VirtualMemory platform-independent by moving internals to new
> >   OS:: static methods, for each platform.
> > 
> > This will make it easier to delegate memory management in VirtualMemory
> > to V8::Platform, so that embedders like Blink can override it. We can't
> > depend on V8::Platform in base/platform.
> > 
> > Bug: chromium:756050
> > Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
> > Change-Id: Iadfe230b6850bd917727a373f277afded9883adf
> > Reviewed-on: https://chromium-review.googlesource.com/653214
> > Commit-Queue: Bill Budge <bbudge@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#48048}
> 
> Bug: chromium:756050
> Change-Id: Ib492c7c69f1833be127a571808301e96b84b8aa2
> Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
> Reviewed-on: https://chromium-review.googlesource.com/671125
> Commit-Queue: Bill Budge <bbudge@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#48084}

Bug: chromium:756050
Change-Id: Ie596730b5cefc38137cab7fc1f76613f5af7b825
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/675283
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48113}
diff --git a/src/allocation.cc b/src/allocation.cc
index d64a476..bd92739 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -99,29 +99,106 @@
 #endif
 }
 
-bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result) {
-  base::VirtualMemory first_try(size, hint);
+VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
+
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+    : address_(base::OS::ReserveRegion(size, hint)), size_(size) {}
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
+    : address_(nullptr), size_(0) {
+  address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
+}
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = base::OS::ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+void VirtualMemory::Reset() {
+  address_ = nullptr;
+  size_ = 0;
+}
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  CHECK(InVM(address, size));
+  return base::OS::CommitRegion(address, size, is_executable);
+}
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  CHECK(InVM(address, size));
+  return base::OS::UncommitRegion(address, size);
+}
+
+bool VirtualMemory::Guard(void* address) {
+  CHECK(InVM(address, base::OS::CommitPageSize()));
+  base::OS::Guard(address, base::OS::CommitPageSize());
+  return true;
+}
+
+size_t VirtualMemory::ReleasePartial(void* free_start) {
+  DCHECK(IsReserved());
+  // Notice: Order is important here. The VirtualMemory object might live
+  // inside the allocated region.
+  const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
+                               reinterpret_cast<size_t>(address_));
+  CHECK(InVM(free_start, size));
+  DCHECK_LT(address_, free_start);
+  DCHECK_LT(free_start, reinterpret_cast<void*>(
+                            reinterpret_cast<size_t>(address_) + size_));
+  const bool result =
+      base::OS::ReleasePartialRegion(address_, size_, free_start, size);
+  USE(result);
+  DCHECK(result);
+  size_ -= size;
+  return size;
+}
+
+void VirtualMemory::Release() {
+  DCHECK(IsReserved());
+  // Notice: Order is important here. The VirtualMemory object might live
+  // inside the allocated region.
+  void* address = address_;
+  size_t size = size_;
+  CHECK(InVM(address, size));
+  Reset();
+  bool result = base::OS::ReleaseRegion(address, size);
+  USE(result);
+  DCHECK(result);
+}
+
+void VirtualMemory::TakeControl(VirtualMemory* from) {
+  DCHECK(!IsReserved());
+  address_ = from->address_;
+  size_ = from->size_;
+  from->Reset();
+}
+
+bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
+  VirtualMemory first_try(size, hint);
   if (first_try.IsReserved()) {
     result->TakeControl(&first_try);
     return true;
   }
 
   V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
-  base::VirtualMemory second_try(size, hint);
+  VirtualMemory second_try(size, hint);
   result->TakeControl(&second_try);
   return result->IsReserved();
 }
 
 bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
-                               base::VirtualMemory* result) {
-  base::VirtualMemory first_try(size, alignment, hint);
+                               VirtualMemory* result) {
+  VirtualMemory first_try(size, alignment, hint);
   if (first_try.IsReserved()) {
     result->TakeControl(&first_try);
     return true;
   }
 
   V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
-  base::VirtualMemory second_try(size, alignment, hint);
+  VirtualMemory second_try(size, alignment, hint);
   result->TakeControl(&second_try);
   return result->IsReserved();
 }
diff --git a/src/allocation.h b/src/allocation.h
index 18dbaea..4b63479 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -76,9 +76,88 @@
 void* AlignedAlloc(size_t size, size_t alignment);
 void AlignedFree(void *ptr);
 
-bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result);
+// Represents and controls an area of reserved memory.
+class V8_EXPORT_PRIVATE VirtualMemory {
+ public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
+  // Reserves virtual memory with size.
+  explicit VirtualMemory(size_t size, void* hint);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment, void* hint);
+
+  // Construct a virtual memory by assigning it some already mapped address
+  // and size.
+  VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
+  ~VirtualMemory();
+
+  // Returns whether the memory has been reserved.
+  bool IsReserved() const { return address_ != nullptr; }
+
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
+  // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
+  void* address() const {
+    DCHECK(IsReserved());
+    return address_;
+  }
+
+  void* end() const {
+    DCHECK(IsReserved());
+    return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
+                                   size_);
+  }
+
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
+  size_t size() const { return size_; }
+
+  // Commits real memory. Returns whether the operation succeeded.
+  bool Commit(void* address, size_t size, bool is_executable);
+
+  // Uncommit real memory.  Returns whether the operation succeeded.
+  bool Uncommit(void* address, size_t size);
+
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
+  // Releases the memory after |free_start|. Returns the bytes released.
+  size_t ReleasePartial(void* free_start);
+
+  void Release();
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from);
+
+  bool InVM(void* address, size_t size) {
+    return (reinterpret_cast<uintptr_t>(address_) <=
+            reinterpret_cast<uintptr_t>(address)) &&
+           ((reinterpret_cast<uintptr_t>(address_) + size_) >=
+            (reinterpret_cast<uintptr_t>(address) + size));
+  }
+
+ private:
+  void* address_;  // Start address of the virtual memory.
+  size_t size_;    // Size of the virtual memory.
+};
+
+bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
 bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
-                               base::VirtualMemory* result);
+                               VirtualMemory* result);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/api.cc b/src/api.cc
index e83adbe..59467b4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -485,8 +485,7 @@
   virtual void Free(void* data, size_t) { free(data); }
 
   virtual void* Reserve(size_t length) {
-    return base::VirtualMemory::ReserveRegion(length,
-                                              base::OS::GetRandomMmapAddr());
+    return base::OS::ReserveRegion(length, base::OS::GetRandomMmapAddr());
   }
 
   virtual void Free(void* data, size_t length,
@@ -496,7 +495,7 @@
         return Free(data, length);
       }
       case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
-        base::VirtualMemory::ReleaseRegion(data, length);
+        base::OS::ReleaseRegion(data, length);
         return;
       }
     }
diff --git a/src/base/platform/platform-aix.cc b/src/base/platform/platform-aix.cc
index c14a363..ef86b9f 100644
--- a/src/base/platform/platform-aix.cc
+++ b/src/base/platform/platform-aix.cc
@@ -65,23 +65,106 @@
 
 TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+                     kMmapFdOffset);
 
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
+                      kMmapFd, kMmapFdOffset);
+
+  if (result == MAP_FAILED) return nullptr;
+
+  return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  hint = AlignedAddress(hint, alignment);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
+
+  uint8_t* base = static_cast<uint8_t*>(result);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+
+  if (mprotect(address, size, prot) == -1) return false;
+
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mprotect(address, size, PROT_NONE) != -1;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return munmap(free_start, free_size) == 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() { return true; }
 
 static unsigned StringToLong(char* buffer) {
   return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
 }
 
-
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
   static const int MAP_LENGTH = 1024;
@@ -122,119 +205,7 @@
   return result;
 }
 
-
 void OS::SignalCodeMovingGC() {}
 
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  hint = AlignedAddress(hint, alignment);
-  size_t request_size =
-      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(hint, request_size, PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  uint8_t* base = static_cast<uint8_t*>(reservation);
-  uint8_t* aligned_base = RoundUp(base, alignment);
-  DCHECK_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  DCHECK_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  DCHECK(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
-                      kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
-  if (mprotect(base, size, prot) == -1) return false;
-
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mprotect(base, size, PROT_NONE) != -1;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
-  return munmap(free_start, free_size) == 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() { return true; }
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-cygwin.cc b/src/base/platform/platform-cygwin.cc
index 6868fd9..4aeb712 100644
--- a/src/base/platform/platform-cygwin.cc
+++ b/src/base/platform/platform-cygwin.cc
@@ -26,6 +26,31 @@
 namespace v8 {
 namespace base {
 
+namespace {
+
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
+                                    void* hint) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    base = VirtualAlloc(hint, size, action, protection);
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+}  // namespace
+
 class CygwinTimezoneCache : public PosixTimezoneCache {
   const char* LocalTimezone(double time) override;
 
@@ -65,6 +90,75 @@
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  hint = AlignedAddress(hint, alignment);
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size, hint);
+  if (address == NULL) {
+    *allocated = 0;
+    return nullptr;
+  }
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != nullptr) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size, hint);
+    if (address == nullptr) {
+      *allocated = 0;
+      return nullptr;
+    }
+  }
+
+  *allocated = request_size;
+  return static_cast<void*>(address);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return VirtualFree(address, 0, MEM_RELEASE) != 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
 
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddresses> result;
@@ -131,124 +225,5 @@
   // Nothing to do on Cygwin.
 }
 
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
-                                    void* hint) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    base = VirtualAlloc(hint, size, action, protection);
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  hint = AlignedAddress(hint, alignment);
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size, hint);
-  if (address == NULL) return;
-  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  DCHECK(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    DCHECK(base == static_cast<uint8_t*>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size, hint);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address_, size_);
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  DCHECK(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-freebsd.cc b/src/base/platform/platform-freebsd.cc
index 3910c50..cf8c8f5 100644
--- a/src/base/platform/platform-freebsd.cc
+++ b/src/base/platform/platform-freebsd.cc
@@ -40,23 +40,109 @@
   return new PosixDefaultTimezoneCache();
 }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  void* mbase =
+      mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
 
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  hint = AlignedAddress(hint, alignment);
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
+
+  uint8_t* base = static_cast<uint8_t*>(result);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return munmap(free_start, free_size) == 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
 
 static unsigned StringToLong(char* buffer) {
   return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
 }
 
-
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
   static const int MAP_LENGTH = 1024;
@@ -80,8 +166,7 @@
     bytes_read = -1;
     do {
       bytes_read++;
-      if (bytes_read >= MAP_LENGTH - 1)
-        break;
+      if (bytes_read >= MAP_LENGTH - 1) break;
       bytes_read = read(fd, buffer + bytes_read, 1);
       if (bytes_read < 1) break;
     } while (buffer[bytes_read] != '\n');
@@ -98,135 +183,7 @@
   return result;
 }
 
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  hint = AlignedAddress(hint, alignment);
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(hint, request_size, PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  uint8_t* base = static_cast<uint8_t*>(reservation);
-  uint8_t* aligned_base = RoundUp(base, alignment);
-  DCHECK_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  DCHECK_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  DCHECK(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
-  return munmap(free_start, free_size) == 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
+void OS::SignalCodeMovingGC() {}
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-fuchsia.cc b/src/base/platform/platform-fuchsia.cc
index 904ae5f..da255ac 100644
--- a/src/base/platform/platform-fuchsia.cc
+++ b/src/base/platform/platform-fuchsia.cc
@@ -17,35 +17,45 @@
   return new PosixDefaultTimezoneCache();
 }
 
+// static
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   CHECK(false);  // TODO(scottmg): Port, https://crbug.com/731217.
   return nullptr;
 }
 
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  CHECK(false);  // TODO(scottmg): Port, https://crbug.com/731217.
-  return std::vector<SharedLibraryAddress>();
+// static
+void OS::Guard(void* address, size_t size) {
+  CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
+                                  reinterpret_cast<uintptr_t>(address), size,
+                                  0 /*no permissions*/));
 }
 
-void OS::SignalCodeMovingGC() {
-  CHECK(false);  // TODO(scottmg): Port, https://crbug.com/731217.
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  zx_handle_t vmo;
+  if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
+  uintptr_t result;
+  zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
+                                   0 /*no permissions*/, &result);
+  zx_handle_close(vmo);
+  if (status != ZX_OK) return nullptr;
+  return reinterpret_cast<void*>(result);
 }
 
-VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(nullptr), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
   DCHECK((alignment % OS::AllocateAlignment()) == 0);
   hint = AlignedAddress(hint, alignment);
   size_t request_size =
       RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
 
   zx_handle_t vmo;
-  if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) return;
+  if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
+    *allocated = 0;
+    return nullptr;
+  }
   static const char kVirtualMemoryName[] = "v8-virtualmem";
   zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
                          strlen(kVirtualMemoryName));
@@ -55,7 +65,10 @@
   // Either the vmo is now referenced by the vmar, or we failed and are bailing,
   // so close the vmo either way.
   zx_handle_close(vmo);
-  if (status != ZX_OK) return;
+  if (status != ZX_OK) {
+    *allocated = 0;
+    return nullptr;
+  }
 
   uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
   uint8_t* aligned_base = RoundUp(base, alignment);
@@ -82,83 +95,54 @@
 
   DCHECK(aligned_size == request_size);
 
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = nullptr;
-  size_ = 0;
-}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  CHECK(InVM(address, size));
-  return CommitRegion(address, size, is_executable);
-}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-bool VirtualMemory::Guard(void* address) {
-  return zx_vmar_protect(zx_vmar_root_self(),
-                         reinterpret_cast<uintptr_t>(address),
-                         OS::CommitPageSize(), 0 /*no permissions*/) == ZX_OK;
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
 }
 
 // static
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  zx_handle_t vmo;
-  if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
-  uintptr_t result;
-  zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
-                                   0 /*no permissions*/, &result);
-  zx_handle_close(vmo);
-  if (status != ZX_OK) return nullptr;
-  return reinterpret_cast<void*>(result);
-}
-
-// static
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
   uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
                   (is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
-  return zx_vmar_protect(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
-                         size, prot) == ZX_OK;
+  return zx_vmar_protect(zx_vmar_root_self(),
+                         reinterpret_cast<uintptr_t>(address), size,
+                         prot) == ZX_OK;
 }
 
 // static
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return zx_vmar_protect(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
-                         size, 0 /*no permissions*/) == ZX_OK;
+bool OS::UncommitRegion(void* address, size_t size) {
+  return zx_vmar_protect(zx_vmar_root_self(),
+                         reinterpret_cast<uintptr_t>(address), size,
+                         0 /*no permissions*/) == ZX_OK;
 }
 
 // static
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
   return zx_vmar_unmap(zx_vmar_root_self(),
                        reinterpret_cast<uintptr_t>(free_start),
                        free_size) == ZX_OK;
 }
 
 // static
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
-                       size) == ZX_OK;
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return zx_vmar_unmap(zx_vmar_root_self(),
+                       reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
 }
 
 // static
-bool VirtualMemory::HasLazyCommits() {
+bool OS::HasLazyCommits() {
   // TODO(scottmg): Port, https://crbug.com/731217.
   return false;
 }
 
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  CHECK(false);  // TODO(scottmg): Port, https://crbug.com/731217.
+  return std::vector<SharedLibraryAddress>();
+}
+
+void OS::SignalCodeMovingGC() {
+  CHECK(false);  // TODO(scottmg): Port, https://crbug.com/731217.
+}
+
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-linux.cc b/src/base/platform/platform-linux.cc
index 24e3ef2..326d7b0 100644
--- a/src/base/platform/platform-linux.cc
+++ b/src/base/platform/platform-linux.cc
@@ -97,16 +97,120 @@
   return new PosixDefaultTimezoneCache();
 }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+                     kMmapFdOffset);
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result =
+      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+           kMmapFd, kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(result, size);
+#endif
+  return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  hint = AlignedAddress(hint, alignment);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation =
+      mmap(hint, request_size, PROT_NONE,
+           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
+  if (reservation == MAP_FAILED) {
+    *allocated = 0;
+    return nullptr;
+  }
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(static_cast<void*>(aligned_base), aligned_size);
+#endif
+
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+#if defined(LEAK_SANITIZER)
+  __lsan_unregister_root_region(address, size);
+  __lsan_register_root_region(address, size - free_size);
+#endif
+  return munmap(free_start, free_size) == 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+#if defined(LEAK_SANITIZER)
+  __lsan_unregister_root_region(address, size);
+#endif
+  return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() { return true; }
+
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
   // This function assumes that the layout of the file is as follows:
@@ -190,131 +294,5 @@
   fclose(f);
 }
 
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  hint = AlignedAddress(hint, alignment);
-  size_t request_size =
-      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation =
-      mmap(hint, request_size, PROT_NONE,
-           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  uint8_t* base = static_cast<uint8_t*>(reservation);
-  uint8_t* aligned_base = RoundUp(base, alignment);
-  DCHECK_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  DCHECK_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  DCHECK(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-#if defined(LEAK_SANITIZER)
-  __lsan_register_root_region(address_, size_);
-#endif
-}
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  CHECK(InVM(address, size));
-  return CommitRegion(address, size, is_executable);
-}
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  CHECK(InVM(address, size));
-  return UncommitRegion(address, size);
-}
-
-bool VirtualMemory::Guard(void* address) {
-  CHECK(InVM(address, OS::CommitPageSize()));
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result =
-      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-           kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-#if defined(LEAK_SANITIZER)
-  __lsan_register_root_region(result, size);
-#endif
-  return result;
-}
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base, size, prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  return true;
-}
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base, size, PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
-#if defined(LEAK_SANITIZER)
-  __lsan_unregister_root_region(base, size);
-  __lsan_register_root_region(base, size - free_size);
-#endif
-  return munmap(free_start, free_size) == 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-#if defined(LEAK_SANITIZER)
-  __lsan_unregister_root_region(base, size);
-#endif
-  return munmap(base, size) == 0;
-}
-
-bool VirtualMemory::HasLazyCommits() { return true; }
-
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-macos.cc b/src/base/platform/platform-macos.cc
index 076351e..a5cab97 100644
--- a/src/base/platform/platform-macos.cc
+++ b/src/base/platform/platform-macos.cc
@@ -51,6 +51,7 @@
 static const int kMmapFd = VM_MAKE_TAG(255);
 static const off_t kMmapFdOffset = 0;
 
+// static
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, getpagesize());
@@ -62,58 +63,31 @@
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result =
+      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+           kMmapFd, kMmapFdOffset);
 
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  unsigned int images_count = _dyld_image_count();
-  for (unsigned int i = 0; i < images_count; ++i) {
-    const mach_header* header = _dyld_get_image_header(i);
-    if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
-    uint64_t size;
-    char* code_ptr = getsectdatafromheader_64(
-        reinterpret_cast<const mach_header_64*>(header),
-        SEG_TEXT,
-        SECT_TEXT,
-        &size);
-#else
-    unsigned int size;
-    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
-    if (code_ptr == NULL) continue;
-    const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
-    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
-    result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
-                                          start + size, slide));
-  }
+  if (result == MAP_FAILED) return nullptr;
+
   return result;
 }
 
-
-void OS::SignalCodeMovingGC() {
-}
-
-TimezoneCache* OS::CreateTimezoneCache() {
-  return new PosixDefaultTimezoneCache();
-}
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
   DCHECK((alignment % OS::AllocateAlignment()) == 0);
   hint = AlignedAddress(hint, alignment);
   size_t request_size = RoundUp(size + alignment,
                                 static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation =
-      mmap(hint, request_size, PROT_NONE,
-           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
 
-  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* base = static_cast<uint8_t*>(result);
   uint8_t* aligned_base = RoundUp(base, alignment);
   DCHECK_LE(base, aligned_base);
 
@@ -135,54 +109,12 @@
 
   DCHECK(aligned_size == request_size);
 
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
 }
 
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result =
-      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-           kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
-                                 size_t size,
-                                 bool is_executable) {
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
   if (MAP_FAILED == mmap(address,
                          size,
@@ -195,8 +127,8 @@
   return true;
 }
 
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
   return mmap(address,
               size,
               PROT_NONE,
@@ -205,16 +137,49 @@
               kMmapFdOffset) != MAP_FAILED;
 }
 
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
   return munmap(free_start, free_size) == 0;
 }
 
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
   return munmap(address, size) == 0;
 }
 
-bool VirtualMemory::HasLazyCommits() { return true; }
+// static
+bool OS::HasLazyCommits() { return true; }
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  unsigned int images_count = _dyld_image_count();
+  for (unsigned int i = 0; i < images_count; ++i) {
+    const mach_header* header = _dyld_get_image_header(i);
+    if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+    uint64_t size;
+    char* code_ptr = getsectdatafromheader_64(
+        reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
+        &size);
+#else
+    unsigned int size;
+    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+    if (code_ptr == NULL) continue;
+    const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+    result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
+                                          start + size, slide));
+  }
+  return result;
+}
+
+void OS::SignalCodeMovingGC() {}
+
+TimezoneCache* OS::CreateTimezoneCache() {
+  return new PosixDefaultTimezoneCache();
+}
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-openbsd.cc b/src/base/platform/platform-openbsd.cc
index e7f4406..b274c6b 100644
--- a/src/base/platform/platform-openbsd.cc
+++ b/src/base/platform/platform-openbsd.cc
@@ -38,16 +38,105 @@
   return new PosixDefaultTimezoneCache();
 }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  void* mbase =
+      mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result =
+      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+           kMmapFd, kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  hint = AlignedAddress(hint, alignment);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
+
+  uint8_t* base = static_cast<uint8_t*>(result);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return munmap(free_start, free_size) == 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
 
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
@@ -132,133 +221,5 @@
   fclose(f);
 }
 
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  hint = AlignedAddress(hint, alignment);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation =
-      mmap(hint, request_size, PROT_NONE,
-           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  uint8_t* base = static_cast<uint8_t*>(reservation);
-  uint8_t* aligned_base = RoundUp(base, alignment);
-  DCHECK_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  DCHECK_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  DCHECK(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result =
-      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-           kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
-  return munmap(free_start, free_size) == 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc
index 450c89b..825716d 100644
--- a/src/base/platform/platform-posix.cc
+++ b/src/base/platform/platform-posix.cc
@@ -129,6 +129,7 @@
 
 
 // Create guard pages.
+#if !V8_OS_FUCHSIA
 void OS::Guard(void* address, const size_t size) {
 #if V8_OS_CYGWIN
   DWORD oldprotect;
@@ -137,6 +138,7 @@
   mprotect(address, size, PROT_NONE);
 #endif
 }
+#endif  // !V8_OS_FUCHSIA
 
 // Make a region of memory readable and writable.
 void OS::Unprotect(void* address, const size_t size) {
diff --git a/src/base/platform/platform-qnx.cc b/src/base/platform/platform-qnx.cc
index 2e577d0..687a308 100644
--- a/src/base/platform/platform-qnx.cc
+++ b/src/base/platform/platform-qnx.cc
@@ -89,16 +89,103 @@
   return new PosixDefaultTimezoneCache();
 }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+                     kMmapFdOffset);
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result =
+      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+           kMmapFd, kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  hint = AlignedAddress(hint, alignment);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
+
+  uint8_t* base = static_cast<uint8_t*>(result);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return munmap(free_start, free_size) == 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+// static
+bool OS::HasLazyCommits() { return false; }
 
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
@@ -124,16 +211,16 @@
     return result;
   }
 
-  mapinfos = reinterpret_cast<procfs_mapinfo *>(
-      malloc(num * sizeof(procfs_mapinfo)));
+  mapinfos =
+      reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
   if (mapinfos == NULL) {
     close(proc_fd);
     return result;
   }
 
   /* Fill the map entries.  */
-  if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
-      mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+  if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos,
+             num * sizeof(procfs_mapinfo), &num) != EOK) {
     free(mapinfos);
     close(proc_fd);
     return result;
@@ -146,8 +233,8 @@
       if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
         continue;
       }
-      result.push_back(SharedLibraryAddress(
-          map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+      result.push_back(SharedLibraryAddress(map.info.path, mapinfo->vaddr,
+                                            mapinfo->vaddr + mapinfo->size));
     }
   }
   free(mapinfos);
@@ -155,132 +242,7 @@
   return result;
 }
 
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  hint = AlignedAddress(hint, alignment);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation =
-      mmap(hint, request_size, PROT_NONE,
-           MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  uint8_t* base = static_cast<uint8_t*>(reservation);
-  uint8_t* aligned_base = RoundUp(base, alignment);
-  DCHECK_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  DCHECK_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  DCHECK(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result =
-      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
-           kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return false;
-}
+void OS::SignalCodeMovingGC() {}
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-solaris.cc b/src/base/platform/platform-solaris.cc
index 4b80b78..0d98ffa 100644
--- a/src/base/platform/platform-solaris.cc
+++ b/src/base/platform/platform-solaris.cc
@@ -58,49 +58,47 @@
 
 TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
 
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    OS::MemoryPermission access, void* hint) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = GetProtectionFromMemoryPermission(access);
-  void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  void* mbase =
+      mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
 
   if (mbase == MAP_FAILED) return NULL;
   *allocated = msize;
   return mbase;
 }
 
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  void* result =
+      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+           kMmapFd, kMmapFdOffset);
 
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  return std::vector<SharedLibraryAddress>();
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
 }
 
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
+// static
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
   DCHECK((alignment % OS::AllocateAlignment()) == 0);
   hint = AlignedAddress(hint, alignment);
   size_t request_size = RoundUp(size + alignment,
                                 static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation =
-      mmap(hint, request_size, PROT_NONE,
-           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
+  void* result = ReserveRegion(request_size, hint);
+  if (result == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
 
-  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* base = static_cast<uint8_t*>(result);
   uint8_t* aligned_base = RoundUp(base, alignment);
   DCHECK_LE(base, aligned_base);
 
@@ -122,88 +120,50 @@
 
   DCHECK(aligned_size == request_size);
 
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
+  *allocated = aligned_size;
+  return static_cast<void*>(aligned_base);
 }
 
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  void* result =
-      mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-           kMmapFd, kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
                          kMmapFdOffset)) {
     return false;
   }
   return true;
 }
 
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
               kMmapFdOffset) != MAP_FAILED;
 }
 
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
   return munmap(free_start, free_size) == 0;
 }
 
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
 }
 
-
-bool VirtualMemory::HasLazyCommits() {
+// static
+bool OS::HasLazyCommits() {
   // TODO(alph): implement for the platform.
   return false;
 }
 
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  return std::vector<SharedLibraryAddress>();
+}
+
+void OS::SignalCodeMovingGC() {}
+
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
index 385e747..f1670cb 100644
--- a/src/base/platform/platform-win32.cc
+++ b/src/base/platform/platform-win32.cc
@@ -737,6 +737,8 @@
   return reinterpret_cast<void *>(address);
 }
 
+namespace {
+
 static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
                                     void* hint) {
   LPVOID base = NULL;
@@ -762,6 +764,8 @@
   return base;
 }
 
+}  // namespace
+
 void* OS::Allocate(const size_t requested, size_t* allocated,
                    bool is_executable, void* hint) {
   return OS::Allocate(requested, allocated,
@@ -809,18 +813,15 @@
   USE(size);
 }
 
-
 intptr_t OS::CommitPageSize() {
   return 4096;
 }
 
-
 void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
   VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
 }
 
-
 void OS::Guard(void* address, const size_t size) {
   DWORD oldprotect;
   VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
@@ -831,6 +832,76 @@
   USE(result);
 }
 
+// static
+// static
+void* OS::ReserveRegion(size_t size, void* hint) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
+}
+
+void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                               size_t* allocated) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  hint = AlignedAddress(hint, alignment);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size, hint);
+  if (address == nullptr) {
+    *allocated = 0;
+    return nullptr;
+  }
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != nullptr) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size, hint);
+    if (address == nullptr) {
+      *allocated = 0;
+      return nullptr;
+    }
+  }
+
+  *allocated = request_size;
+  return static_cast<void*>(address);
+}
+
+// static
+bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+// static
+bool OS::UncommitRegion(void* address, size_t size) {
+  return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
+                              size_t free_size) {
+  return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
+}
+
+// static
+bool OS::ReleaseRegion(void* address, size_t size) {
+  return VirtualFree(address, 0, MEM_RELEASE) != 0;
+}
+
+// static
+bool OS::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
 void OS::Sleep(TimeDelta interval) {
   ::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
 }
@@ -1204,108 +1275,6 @@
 #endif
 }
 
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size, void* hint)
-    : address_(ReserveRegion(size, hint)), size_(size) {}
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
-    : address_(NULL), size_(0) {
-  DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  hint = AlignedAddress(hint, alignment);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size, hint);
-  if (address == NULL) return;
-  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  DCHECK(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    DCHECK(base == static_cast<uint8_t*>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size, hint);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    DCHECK(result);
-    USE(result);
-  }
-}
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  DCHECK(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
-                                         void* free_start, size_t free_size) {
-  return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
-}
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-
 // ----------------------------------------------------------------------------
 // Win32 thread support.
 
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
index 7737001..6a34190 100644
--- a/src/base/platform/platform.h
+++ b/src/base/platform/platform.h
@@ -197,6 +197,22 @@
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
+  static void* ReserveRegion(size_t size, void* hint);
+
+  static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
+                                    size_t* allocated);
+
+  static bool CommitRegion(void* address, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* address, size_t size);
+
+  static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
+                                   size_t free_size);
+
+  static bool ReleaseRegion(void* address, size_t size);
+
+  static bool HasLazyCommits();
+
   // Sleep for a specified time interval.
   static void Sleep(TimeDelta interval);
 
@@ -285,141 +301,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by calling TakeControl. This removes the reserved memory from the
-// 'from' instance.
-class V8_BASE_EXPORT VirtualMemory {
- public:
-  // Empty VirtualMemory object, controlling no reserved memory.
-  VirtualMemory();
-
-  // Reserves virtual memory with size.
-  explicit VirtualMemory(size_t size, void* hint);
-
-  // Reserves virtual memory containing an area of the given size that
-  // is aligned per alignment. This may not be at the position returned
-  // by address().
-  VirtualMemory(size_t size, size_t alignment, void* hint);
-
-  // Construct a virtual memory by assigning it some already mapped address
-  // and size.
-  VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
-
-  // Releases the reserved memory, if any, controlled by this VirtualMemory
-  // object.
-  ~VirtualMemory();
-
-  // Returns whether the memory has been reserved.
-  bool IsReserved() const { return address_ != nullptr; }
-
-  // Initialize or resets an embedded VirtualMemory object.
-  void Reset();
-
-  // Returns the start address of the reserved memory.
-  // If the memory was reserved with an alignment, this address is not
-  // necessarily aligned. The user might need to round it up to a multiple of
-  // the alignment to get the start of the aligned block.
-  void* address() const {
-    DCHECK(IsReserved());
-    return address_;
-  }
-
-  void* end() const {
-    DCHECK(IsReserved());
-    return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
-                                   size_);
-  }
-
-  // Returns the size of the reserved memory. The returned value is only
-  // meaningful when IsReserved() returns true.
-  // If the memory was reserved with an alignment, this size may be larger
-  // than the requested size.
-  size_t size() const { return size_; }
-
-  // Commits real memory. Returns whether the operation succeeded.
-  bool Commit(void* address, size_t size, bool is_executable);
-
-  // Uncommit real memory.  Returns whether the operation succeeded.
-  bool Uncommit(void* address, size_t size);
-
-  // Creates a single guard page at the given address.
-  bool Guard(void* address);
-
-  // Releases the memory after |free_start|. Returns the bytes released.
-  size_t ReleasePartial(void* free_start) {
-    DCHECK(IsReserved());
-    // Notice: Order is important here. The VirtualMemory object might live
-    // inside the allocated region.
-    const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
-                                 reinterpret_cast<size_t>(address_));
-    CHECK(InVM(free_start, size));
-    DCHECK_LT(address_, free_start);
-    DCHECK_LT(free_start, reinterpret_cast<void*>(
-                              reinterpret_cast<size_t>(address_) + size_));
-    const bool result = ReleasePartialRegion(address_, size_, free_start, size);
-    USE(result);
-    DCHECK(result);
-    size_ -= size;
-    return size;
-  }
-
-  void Release() {
-    DCHECK(IsReserved());
-    // Notice: Order is important here. The VirtualMemory object might live
-    // inside the allocated region.
-    void* address = address_;
-    size_t size = size_;
-    CHECK(InVM(address, size));
-    Reset();
-    bool result = ReleaseRegion(address, size);
-    USE(result);
-    DCHECK(result);
-  }
-
-  // Assign control of the reserved region to a different VirtualMemory object.
-  // The old object is no longer functional (IsReserved() returns false).
-  void TakeControl(VirtualMemory* from) {
-    DCHECK(!IsReserved());
-    address_ = from->address_;
-    size_ = from->size_;
-    from->Reset();
-  }
-
-  static void* ReserveRegion(size_t size, void* hint);
-
-  static bool CommitRegion(void* base, size_t size, bool is_executable);
-
-  static bool UncommitRegion(void* base, size_t size);
-
-  // Must be called with a base pointer that has been returned by ReserveRegion
-  // and the same size it was reserved with.
-  static bool ReleaseRegion(void* base, size_t size);
-
-  // Must be called with a base pointer that has been returned by ReserveRegion
-  // and the same size it was reserved with.
-  // [free_start, free_start + free_size] is the memory that will be released.
-  static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
-                                   size_t free_size);
-
-  // Returns true if OS performs lazy commits, i.e. the memory allocation call
-  // defers actual physical memory allocation till the first memory access.
-  // Otherwise returns false.
-  static bool HasLazyCommits();
-
- private:
-  bool InVM(void* address, size_t size) {
-    return (reinterpret_cast<uintptr_t>(address_) <=
-            reinterpret_cast<uintptr_t>(address)) &&
-           ((reinterpret_cast<uintptr_t>(address_) + size_) >=
-            (reinterpret_cast<uintptr_t>(address) + size));
-  }
-
-  void* address_;  // Start address of the virtual memory.
-  size_t size_;  // Size of the virtual memory.
-};
-
-
 // ----------------------------------------------------------------------------
 // Thread
 //
diff --git a/src/d8.cc b/src/d8.cc
index 45f73fd..b8e8a7a 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -138,7 +138,7 @@
   void Free(void* data, size_t length) override {
 #if USE_VM
     if (RoundToPageSize(&length)) {
-      base::VirtualMemory::ReleaseRegion(data, length);
+      base::OS::ReleaseRegion(data, length);
       return;
     }
 #endif
@@ -156,9 +156,9 @@
   }
 #if USE_VM
   void* VirtualMemoryAllocate(size_t length) {
-    void* data = base::VirtualMemory::ReserveRegion(length, nullptr);
-    if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
-      base::VirtualMemory::ReleaseRegion(data, length);
+    void* data = base::OS::ReserveRegion(length, nullptr);
+    if (data && !base::OS::CommitRegion(data, length, false)) {
+      base::OS::ReleaseRegion(data, length);
       return nullptr;
     }
     MSAN_MEMORY_IS_INITIALIZED(data, length);
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index c42a1e0..ef66226 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -8,7 +8,6 @@
 
 #include "src/base/bits.h"
 #include "src/base/macros.h"
-#include "src/base/platform/platform.h"
 #include "src/base/platform/semaphore.h"
 #include "src/counters.h"
 #include "src/heap/array-buffer-tracker.h"
@@ -118,7 +117,7 @@
 
   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
 
-  base::VirtualMemory reservation;
+  VirtualMemory reservation;
   if (!AlignedAllocVirtualMemory(
           requested,
           Max(kCodeRangeAreaAlignment,
@@ -408,16 +407,14 @@
 
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
                                    Executability executable) {
-  if (!base::VirtualMemory::CommitRegion(base, size,
-                                         executable == EXECUTABLE)) {
+  if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
     return false;
   }
   UpdateAllocatedSpaceLimits(base, base + size);
   return true;
 }
 
-
-void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
   // Code which is part of the code-range does not have its own VirtualMemory.
@@ -439,7 +436,7 @@
     code_range()->FreeRawMemory(base, size);
   } else {
     DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
-    bool result = base::VirtualMemory::ReleaseRegion(base, size);
+    bool result = base::OS::ReleaseRegion(base, size);
     USE(result);
     DCHECK(result);
   }
@@ -447,8 +444,8 @@
 
 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
                                               void* hint,
-                                              base::VirtualMemory* controller) {
-  base::VirtualMemory reservation;
+                                              VirtualMemory* controller) {
+  VirtualMemory reservation;
   if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
     return nullptr;
 
@@ -465,9 +462,9 @@
 
 Address MemoryAllocator::AllocateAlignedMemory(
     size_t reserve_size, size_t commit_size, size_t alignment,
-    Executability executable, void* hint, base::VirtualMemory* controller) {
+    Executability executable, void* hint, VirtualMemory* controller) {
   DCHECK(commit_size <= reserve_size);
-  base::VirtualMemory reservation;
+  VirtualMemory reservation;
   Address base =
       ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
   if (base == NULL) return NULL;
@@ -525,7 +522,7 @@
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
                                      Address area_start, Address area_end,
                                      Executability executable, Space* owner,
-                                     base::VirtualMemory* reservation) {
+                                     VirtualMemory* reservation) {
   MemoryChunk* chunk = FromAddress(base);
 
   DCHECK(base == chunk->address());
@@ -686,7 +683,7 @@
 }
 
 size_t MemoryChunk::CommittedPhysicalMemory() {
-  if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
+  if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
     return size();
   return high_water_mark_.Value();
 }
@@ -719,7 +716,7 @@
   size_t chunk_size;
   Heap* heap = isolate_->heap();
   Address base = nullptr;
-  base::VirtualMemory reservation;
+  VirtualMemory reservation;
   Address area_start = nullptr;
   Address area_end = nullptr;
   void* address_hint = heap->GetRandomMmapAddr();
@@ -860,7 +857,7 @@
 size_t Page::ShrinkToHighWaterMark() {
   // Shrinking only makes sense outside of the CodeRange, where we don't care
   // about address space fragmentation.
-  base::VirtualMemory* reservation = reserved_memory();
+  VirtualMemory* reservation = reserved_memory();
   if (!reservation->IsReserved()) return 0;
 
   // Shrink pages to high water mark. The water mark points either to a filler
@@ -938,7 +935,7 @@
 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
                                         size_t bytes_to_free,
                                         Address new_area_end) {
-  base::VirtualMemory* reservation = chunk->reserved_memory();
+  VirtualMemory* reservation = chunk->reserved_memory();
   DCHECK(reservation->IsReserved());
   chunk->size_ -= bytes_to_free;
   chunk->area_end_ = new_area_end;
@@ -966,7 +963,7 @@
   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                          chunk->IsEvacuationCandidate());
 
-  base::VirtualMemory* reservation = chunk->reserved_memory();
+  VirtualMemory* reservation = chunk->reserved_memory();
   const size_t size =
       reservation->IsReserved() ? reservation->size() : chunk->size();
   DCHECK_GE(size_.Value(), static_cast<size_t>(size));
@@ -985,7 +982,7 @@
   DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   chunk->ReleaseAllocatedMemory();
 
-  base::VirtualMemory* reservation = chunk->reserved_memory();
+  VirtualMemory* reservation = chunk->reserved_memory();
   if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
     UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
   } else {
@@ -1078,7 +1075,7 @@
   if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
     return nullptr;
   }
-  base::VirtualMemory reservation(start, size);
+  VirtualMemory reservation(start, size);
   MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
                           NOT_EXECUTABLE, owner, &reservation);
   size_.Increment(size);
@@ -1099,7 +1096,7 @@
 
 
 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
+  if (!base::OS::UncommitRegion(start, size)) return false;
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
   return true;
 }
@@ -1151,9 +1148,8 @@
   }
 }
 
-
-bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
-                                             Address start, size_t commit_size,
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
+                                             size_t commit_size,
                                              size_t reserved_size) {
   // Commit page header (not executable).
   Address header = start;
@@ -1459,7 +1455,7 @@
 
 
 size_t PagedSpace::CommittedPhysicalMemory() {
-  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  if (!base::OS::HasLazyCommits()) return CommittedMemory();
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = 0;
   for (Page* page : *this) {
@@ -2635,7 +2631,7 @@
 
 
 size_t NewSpace::CommittedPhysicalMemory() {
-  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  if (!base::OS::HasLazyCommits()) return CommittedMemory();
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = to_space_.CommittedPhysicalMemory();
   if (from_space_.is_committed()) {
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 41de4d3..2d455ad 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -354,7 +354,7 @@
       + kUIntptrSize      // uintptr_t flags_
       + kPointerSize      // Address area_start_
       + kPointerSize      // Address area_end_
-      + 2 * kPointerSize  // base::VirtualMemory reservation_
+      + 2 * kPointerSize  // VirtualMemory reservation_
       + kPointerSize      // Address owner_
       + kPointerSize      // Heap* heap_
       + kIntptrSize       // intptr_t progress_bar_
@@ -631,12 +631,12 @@
   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
                                  Address area_start, Address area_end,
                                  Executability executable, Space* owner,
-                                 base::VirtualMemory* reservation);
+                                 VirtualMemory* reservation);
 
   // Should be called when memory chunk is about to be freed.
   void ReleaseAllocatedMemory();
 
-  base::VirtualMemory* reserved_memory() { return &reservation_; }
+  VirtualMemory* reserved_memory() { return &reservation_; }
 
   size_t size_;
   uintptr_t flags_;
@@ -646,7 +646,7 @@
   Address area_end_;
 
   // If the chunk needs to remember its memory reservation, it is stored here.
-  base::VirtualMemory reservation_;
+  VirtualMemory reservation_;
 
   // The identity of the owning space.  This is tagged as a failure pointer, but
   // no failure can be in an object, so this can be distinguished from any entry
@@ -1070,7 +1070,7 @@
   Isolate* isolate_;
 
   // The reserved range of virtual memory that all code objects are put in.
-  base::VirtualMemory virtual_memory_;
+  VirtualMemory virtual_memory_;
 
   // The global mutex guards free_list_ and allocation_list_ as GC threads may
   // access both lists concurrently to the main thread.
@@ -1338,14 +1338,14 @@
                              Executability executable, Space* space);
 
   Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
-                               base::VirtualMemory* controller);
+                               VirtualMemory* controller);
   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
                                 size_t alignment, Executability executable,
-                                void* hint, base::VirtualMemory* controller);
+                                void* hint, VirtualMemory* controller);
 
   bool CommitMemory(Address addr, size_t size, Executability executable);
 
-  void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+  void FreeMemory(VirtualMemory* reservation, Executability executable);
   void FreeMemory(Address addr, size_t size, Executability executable);
 
   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
@@ -1371,8 +1371,8 @@
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
 
-  MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
-                                              Address start, size_t commit_size,
+  MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
+                                              size_t commit_size,
                                               size_t reserved_size);
 
   CodeRange* code_range() { return code_range_; }
@@ -1435,7 +1435,7 @@
   base::AtomicValue<void*> lowest_ever_allocated_;
   base::AtomicValue<void*> highest_ever_allocated_;
 
-  base::VirtualMemory last_chunk_;
+  VirtualMemory last_chunk_;
   Unmapper unmapper_;
 
   friend class heap::TestCodeRangeScope;
@@ -2746,8 +2746,7 @@
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
-  base::VirtualMemory reservation_;
-
+  VirtualMemory reservation_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index 981aa76..de0fe7d 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -32,7 +32,7 @@
   // Allocate 3x the buffer size, so that we can start the new store buffer
   // aligned to 2x the size.  This lets us use a bit test to detect the end of
   // the area.
-  base::VirtualMemory reservation;
+  VirtualMemory reservation;
   if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
                           &reservation)) {
     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index 2c61427..75da764 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -208,7 +208,7 @@
   // IN_GC mode.
   StoreBufferMode mode_;
 
-  base::VirtualMemory virtual_memory_;
+  VirtualMemory virtual_memory_;
 
   // Callbacks are more efficient than reading out the gc state for every
   // store buffer operation.
diff --git a/test/cctest/test-allocation.cc b/test/cctest/test-allocation.cc
index c06dcc5..99c83b1 100644
--- a/test/cctest/test-allocation.cc
+++ b/test/cctest/test-allocation.cc
@@ -130,7 +130,7 @@
 TEST(AllocVirtualMemoryOOM) {
   AllocationPlatform platform;
   CHECK(!platform.oom_callback_called);
-  v8::base::VirtualMemory result;
+  v8::internal::VirtualMemory result;
   bool success =
       v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
   // On a few systems, allocation somehow succeeds.
@@ -141,7 +141,7 @@
 TEST(AlignedAllocVirtualMemoryOOM) {
   AllocationPlatform platform;
   CHECK(!platform.oom_callback_called);
-  v8::base::VirtualMemory result;
+  v8::internal::VirtualMemory result;
   bool success = v8::internal::AlignedAllocVirtualMemory(
       GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
       &result);
diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc
index c358227..68932fc 100644
--- a/test/cctest/test-platform-linux.cc
+++ b/test/cctest/test-platform-linux.cc
@@ -25,32 +25,28 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Tests of the TokenLock class from lock.h
-
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>  // for usleep()
-
-#include "src/v8.h"
 
 #include "src/base/platform/platform.h"
 #include "test/cctest/cctest.h"
 
+using OS = v8::base::OS;
+
 namespace v8 {
 namespace internal {
 
-TEST(VirtualMemory) {
-  v8::base::VirtualMemory* vm =
-      new v8::base::VirtualMemory(1 * MB, v8::base::OS::GetRandomMmapAddr());
-  CHECK(vm->IsReserved());
-  void* block_addr = vm->address();
+TEST(OSReserveMemory) {
+  size_t mem_size = 0;
+  void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
+                                            OS::GetRandomMmapAddr(), &mem_size);
+  CHECK_NE(0, mem_size);
+  CHECK_NOT_NULL(mem_addr);
   size_t block_size = 4 * KB;
-  CHECK(vm->Commit(block_addr, block_size, false));
+  CHECK(OS::CommitRegion(mem_addr, block_size, false));
   // Check whether we can write to memory.
-  int* addr = static_cast<int*>(block_addr);
-  addr[KB-1] = 2;
-  CHECK(vm->Uncommit(block_addr, block_size));
-  delete vm;
+  int* addr = static_cast<int*>(mem_addr);
+  addr[KB - 1] = 2;
+  CHECK(OS::UncommitRegion(mem_addr, block_size));
+  OS::ReleaseRegion(mem_addr, mem_size);
 }
 
 }  // namespace internal
diff --git a/test/cctest/test-platform-win32.cc b/test/cctest/test-platform-win32.cc
index d9d56d8..68932fc 100644
--- a/test/cctest/test-platform-win32.cc
+++ b/test/cctest/test-platform-win32.cc
@@ -25,26 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Tests of the TokenLock class from lock.h
-
-#include <stdlib.h>
-
-#include "src/v8.h"
 
 #include "src/base/platform/platform.h"
-#include "src/base/win32-headers.h"
 #include "test/cctest/cctest.h"
 
-TEST(VirtualMemory) {
-  v8::base::VirtualMemory* vm =
-      new v8::base::VirtualMemory(1 * i::MB, v8::base::OS::GetRandomMmapAddr());
-  CHECK(vm->IsReserved());
-  void* block_addr = vm->address();
-  size_t block_size = 4 * i::KB;
-  CHECK(vm->Commit(block_addr, block_size, false));
+using OS = v8::base::OS;
+
+namespace v8 {
+namespace internal {
+
+TEST(OSReserveMemory) {
+  size_t mem_size = 0;
+  void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
+                                            OS::GetRandomMmapAddr(), &mem_size);
+  CHECK_NE(0, mem_size);
+  CHECK_NOT_NULL(mem_addr);
+  size_t block_size = 4 * KB;
+  CHECK(OS::CommitRegion(mem_addr, block_size, false));
   // Check whether we can write to memory.
-  int* addr = static_cast<int*>(block_addr);
-  addr[i::KB - 1] = 2;
-  CHECK(vm->Uncommit(block_addr, block_size));
-  delete vm;
+  int* addr = static_cast<int*>(mem_addr);
+  addr[KB - 1] = 2;
+  CHECK(OS::UncommitRegion(mem_addr, block_size));
+  OS::ReleaseRegion(mem_addr, mem_size);
 }
+
+}  // namespace internal
+}  // namespace v8