[scudo][standalone] Introduce the Secondary allocator

Summary:
The Secondary allocator wraps the platform allocation primitives. It is
meant to be used for larger sizes that the Primary can't fullfill, as
it will be slower, and sizes are multiple of the system page size.

This also changes some of the existing code, notably the opaque
platform data being passed to the platform specific functions: we can
shave a couple of syscalls on Fuchsia by storing additional data (this
addresses a TODO).

Reviewers: eugenis, vitalybuka, hctim, morehouse

Reviewed By: morehouse

Subscribers: mgorny, delcypher, jfb, #sanitizers, llvm-commits

Tags: #llvm, #sanitizers

Differential Revision: https://reviews.llvm.org/D60787

git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@359097 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/scudo/standalone/CMakeLists.txt b/lib/scudo/standalone/CMakeLists.txt
index c6626c3..adcff2d 100644
--- a/lib/scudo/standalone/CMakeLists.txt
+++ b/lib/scudo/standalone/CMakeLists.txt
@@ -42,6 +42,7 @@
   fuchsia.cc
   linux.cc
   report.cc
+  secondary.cc
   string_utils.cc)
 
 # Enable the SSE 4.2 instruction set for crc32_hw.cc, if available.
@@ -61,6 +62,7 @@
   checksum.h
   flags.h
   flags_parser.h
+  fuchsia.h
   interface.h
   internal_defs.h
   linux.h
@@ -68,6 +70,7 @@
   mutex.h
   platform.h
   report.h
+  secondary.h
   stats.h
   string_utils.h
   vector.h)
diff --git a/lib/scudo/standalone/common.h b/lib/scudo/standalone/common.h
index 89b8d11..988254d 100644
--- a/lib/scudo/standalone/common.h
+++ b/lib/scudo/standalone/common.h
@@ -11,6 +11,9 @@
 
 #include "internal_defs.h"
 
+#include "fuchsia.h"
+#include "linux.h"
+
 #include <stddef.h>
 #include <string.h>
 
@@ -144,20 +147,21 @@
 // - commit memory in a previously reserved space;
 // - commit memory at a random address.
 // As such, only a subset of parameters combinations is valid, which is checked
-// by the function implementation. The Extra parameter allows to pass opaque
+// by the function implementation. The Data parameter allows to pass opaque
 // platform specific data to the function.
 // Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
 void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
-          u64 *Extra = nullptr);
+          MapPlatformData *Data = nullptr);
 
 // Indicates that we are getting rid of the whole mapping, which might have
-// further consequences on Extra, depending on the platform.
+// further consequences on Data, depending on the platform.
 #define UNMAP_ALL (1U << 0)
 
-void unmap(void *Addr, uptr Size, uptr Flags = 0, u64 *Extra = nullptr);
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+           MapPlatformData *Data = nullptr);
 
 void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      u64 *Extra = nullptr);
+                      MapPlatformData *Data = nullptr);
 
 // Internal map & unmap fatal error. This must not call map().
 void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
diff --git a/lib/scudo/standalone/fuchsia.cc b/lib/scudo/standalone/fuchsia.cc
index a13c220..7a07ccd 100644
--- a/lib/scudo/standalone/fuchsia.cc
+++ b/lib/scudo/standalone/fuchsia.cc
@@ -15,8 +15,7 @@
 #include "string_utils.h"
 
 #include <limits.h> // for PAGE_SIZE
-#include <stdlib.h> // for abort()
-#include <zircon/process.h>
+#include <stdlib.h> // for getenv()
 #include <zircon/sanitizer.h>
 #include <zircon/syscalls.h>
 
@@ -35,69 +34,45 @@
 // with ZX_HANDLE_INVALID.
 COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
 
-struct MapInfo {
-  zx_handle_t Vmar;
-  zx_handle_t Vmo;
-};
-COMPILER_CHECK(sizeof(MapInfo) == sizeof(u64));
-
-static void *allocateVmar(uptr Size, MapInfo *Info, bool AllowNoMem) {
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
   // Only scenario so far.
-  DCHECK(Info);
-  DCHECK_EQ(Info->Vmar, ZX_HANDLE_INVALID);
+  DCHECK(Data);
+  DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
 
-  uintptr_t P;
   const zx_status_t Status = _zx_vmar_allocate(
       _zx_vmar_root_self(),
       ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
-      Size, &Info->Vmar, &P);
+      Size, &Data->Vmar, &Data->VmarBase);
   if (Status != ZX_OK) {
     if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
   }
-  return reinterpret_cast<void *>(P);
+  return reinterpret_cast<void *>(Data->VmarBase);
 }
 
-// Returns the offset of an address range in a Vmar, while checking that said
-// range fully belongs to the Vmar. An alternative would be to keep track of
-// both the base & length to avoid calling this. The tradeoff being a system
-// call vs two extra uptr of storage.
-// TODO(kostyak): revisit the implications of both options.
-static uint64_t getOffsetInVmar(zx_handle_t Vmar, void *Addr, uintptr_t Size) {
-  zx_info_vmar_t Info;
-  const zx_status_t Status = _zx_object_get_info(
-      Vmar, ZX_INFO_VMAR, &Info, sizeof(Info), nullptr, nullptr);
-  CHECK_EQ(Status, ZX_OK);
-  const uint64_t Offset = reinterpret_cast<uintptr_t>(Addr) - Info.base;
-  CHECK_LE(Offset, Info.base + Info.len);
-  CHECK_LE(Offset + Size, Info.base + Info.len);
-  return Offset;
-}
-
-void *map(void *Addr, uptr Size, const char *Name, uptr Flags, u64 *Extra) {
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+          MapPlatformData *Data) {
   DCHECK_EQ(Size % PAGE_SIZE, 0);
   const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
-  MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
 
   // For MAP_NOACCESS, just allocate a Vmar and return.
   if (Flags & MAP_NOACCESS)
-    return allocateVmar(Size, Info, AllowNoMem);
+    return allocateVmar(Size, Data, AllowNoMem);
 
-  const zx_handle_t Vmar = Info ? Info->Vmar : _zx_vmar_root_self();
+  const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
   CHECK_NE(Vmar, ZX_HANDLE_INVALID);
 
   zx_status_t Status;
   zx_handle_t Vmo;
   uint64_t VmoSize = 0;
-  if (Info && Info->Vmo != ZX_HANDLE_INVALID) {
+  if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
     // If a Vmo was specified, it's a resize operation.
     CHECK(Addr);
     DCHECK(Flags & MAP_RESIZABLE);
-    Vmo = Info->Vmo;
-    Status = _zx_vmo_get_size(Vmo, &VmoSize);
-    if (Status == ZX_OK)
-      Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+    Vmo = Data->Vmo;
+    VmoSize = Data->VmoSize;
+    Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
     if (Status != ZX_OK) {
       if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
         dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
@@ -116,15 +91,16 @@
 
   uintptr_t P;
   zx_vm_option_t MapFlags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-  const uint64_t Offset = Addr ? getOffsetInVmar(Vmar, Addr, Size) : 0;
+  const uint64_t Offset =
+      Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
   if (Offset)
     MapFlags |= ZX_VM_SPECIFIC;
   Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
   // No need to track the Vmo if we don't intend on resizing it. Close it.
   if (Flags & MAP_RESIZABLE) {
-    DCHECK(Info);
-    DCHECK_EQ(Info->Vmo, ZX_HANDLE_INVALID);
-    Info->Vmo = Vmo;
+    DCHECK(Data);
+    DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
+    Data->Vmo = Vmo;
   } else {
     CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
   }
@@ -133,42 +109,41 @@
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
   }
+  if (Data)
+    Data->VmoSize += Size;
 
   return reinterpret_cast<void *>(P);
 }
 
-void unmap(void *Addr, uptr Size, uptr Flags, u64 *Extra) {
-  MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
   if (Flags & UNMAP_ALL) {
-    DCHECK_NE(Info, nullptr);
-    const zx_handle_t Vmar = Info->Vmar;
+    DCHECK_NE(Data, nullptr);
+    const zx_handle_t Vmar = Data->Vmar;
     DCHECK_NE(Vmar, _zx_vmar_root_self());
     // Destroying the vmar effectively unmaps the whole mapping.
     CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
     CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
   } else {
-    const zx_handle_t Vmar = Info ? Info->Vmar : _zx_vmar_root_self();
+    const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
     const zx_status_t Status =
         _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
     if (Status != ZX_OK)
       dieOnMapUnmapError();
   }
-  if (Info) {
-    if (Info->Vmo != ZX_HANDLE_INVALID)
-      CHECK_EQ(_zx_handle_close(Info->Vmo), ZX_OK);
-    Info->Vmo = ZX_HANDLE_INVALID;
-    Info->Vmar = ZX_HANDLE_INVALID;
+  if (Data) {
+    if (Data->Vmo != ZX_HANDLE_INVALID)
+      CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+    memset(Data, 0, sizeof(*Data));
   }
 }
 
 void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
-                      u64 *Extra) {
-  MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
-  DCHECK(Info);
-  DCHECK_NE(Info->Vmar, ZX_HANDLE_INVALID);
-  DCHECK_NE(Info->Vmo, ZX_HANDLE_INVALID);
+                      MapPlatformData *Data) {
+  DCHECK(Data);
+  DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+  DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
   const zx_status_t Status =
-      _zx_vmo_op_range(Info->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+      _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
   CHECK_EQ(Status, ZX_OK);
 }
 
@@ -188,7 +163,7 @@
   CHECK_EQ(Status, ZX_OK);
 }
 
-u64 getMonotonicTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
 
 u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
 
diff --git a/lib/scudo/standalone/fuchsia.h b/lib/scudo/standalone/fuchsia.h
new file mode 100644
index 0000000..d6993f8
--- /dev/null
+++ b/lib/scudo/standalone/fuchsia.h
@@ -0,0 +1,31 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+  zx_handle_t Vmar;
+  zx_handle_t Vmo;
+  uintptr_t VmarBase;
+  uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
diff --git a/lib/scudo/standalone/linux.cc b/lib/scudo/standalone/linux.cc
index f4c5fb4..5e695d7 100644
--- a/lib/scudo/standalone/linux.cc
+++ b/lib/scudo/standalone/linux.cc
@@ -44,7 +44,7 @@
 void NORETURN die() { abort(); }
 
 void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
-          UNUSED u64 *Extra) {
+          UNUSED MapPlatformData *Data) {
   int MmapFlags = MAP_PRIVATE | MAP_ANON;
   if (Flags & MAP_NOACCESS)
     MmapFlags |= MAP_NORESERVE;
@@ -68,13 +68,14 @@
   return P;
 }
 
-void unmap(void *Addr, uptr Size, UNUSED uptr Flags, UNUSED u64 *Extra) {
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+           UNUSED MapPlatformData *Data) {
   if (munmap(Addr, Size) != 0)
     dieOnMapUnmapError();
 }
 
 void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      UNUSED u64 *Extra) {
+                      UNUSED MapPlatformData *Data) {
   void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
   while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
   }
diff --git a/lib/scudo/standalone/linux.h b/lib/scudo/standalone/linux.h
index 32ed871..92c9eb5 100644
--- a/lib/scudo/standalone/linux.h
+++ b/lib/scudo/standalone/linux.h
@@ -15,6 +15,9 @@
 
 namespace scudo {
 
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
 #if SCUDO_ANDROID
 
 #if defined(__aarch64__)
diff --git a/lib/scudo/standalone/list.h b/lib/scudo/standalone/list.h
index 8d814dd..139e73e 100644
--- a/lib/scudo/standalone/list.h
+++ b/lib/scudo/standalone/list.h
@@ -110,9 +110,9 @@
       CHECK_EQ(Last, 0);
     } else {
       uptr count = 0;
-      for (Item *i = First;; i = i->Next) {
+      for (Item *I = First;; I = I->Next) {
         count++;
-        if (i == Last)
+        if (I == Last)
           break;
       }
       CHECK_EQ(size(), count);
diff --git a/lib/scudo/standalone/secondary.cc b/lib/scudo/standalone/secondary.cc
new file mode 100644
index 0000000..c0de268
--- /dev/null
+++ b/lib/scudo/standalone/secondary.cc
@@ -0,0 +1,136 @@
+//===-- secondary.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
+  DCHECK_GT(Size, AlignmentHint);
+  const uptr PageSize = getPageSizeCached();
+  const uptr MapSize =
+      roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+  MapPlatformData Data = {};
+  uptr MapBase =
+      reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+                                 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+  if (!MapBase)
+    return nullptr;
+  uptr CommitBase = MapBase + PageSize;
+  uptr MapEnd = MapBase + MapSize;
+
+  // In the unlikely event of alignments larger than a page, adjust the amount
+  // of memory we want to commit, and trim the extra memory.
+  if (AlignmentHint >= PageSize) {
+    // For alignments greater than or equal to a page, the user pointer (eg: the
+    // pointer that is returned by the C or C++ allocation APIs) ends up on a
+    // page boundary , and our headers will live in the preceding page.
+    CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+    const uptr NewMapBase = CommitBase - PageSize;
+    DCHECK_GE(NewMapBase, MapBase);
+    // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+    // are less constrained memory wise, and that saves us two syscalls.
+    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+      unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+      MapBase = NewMapBase;
+    }
+    const uptr NewMapEnd = CommitBase + PageSize +
+                           roundUpTo((Size - AlignmentHint), PageSize) +
+                           PageSize;
+    DCHECK_LE(NewMapEnd, MapEnd);
+    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+      unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+      MapEnd = NewMapEnd;
+    }
+  }
+
+  const uptr CommitSize = MapEnd - PageSize - CommitBase;
+  const uptr Ptr =
+      reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+                                 CommitSize, "scudo:secondary", 0, &Data));
+  LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+  H->MapBase = MapBase;
+  H->MapSize = MapEnd - MapBase;
+  H->BlockEnd = CommitBase + CommitSize;
+  H->Data = Data;
+  {
+    SpinMutexLock L(&Mutex);
+    if (!Tail) {
+      Tail = H;
+    } else {
+      Tail->Next = H;
+      H->Prev = Tail;
+      Tail = H;
+    }
+    AllocatedBytes += CommitSize;
+    if (LargestSize < CommitSize)
+      LargestSize = CommitSize;
+    NumberOfAllocs++;
+    Stats.add(StatAllocated, CommitSize);
+    Stats.add(StatMapped, H->MapSize);
+  }
+  if (BlockEnd)
+    *BlockEnd = CommitBase + CommitSize;
+  return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+void MapAllocator::deallocate(void *Ptr) {
+  LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+  {
+    SpinMutexLock L(&Mutex);
+    LargeBlock::Header *Prev = H->Prev;
+    LargeBlock::Header *Next = H->Next;
+    if (Prev) {
+      CHECK_EQ(Prev->Next, H);
+      Prev->Next = Next;
+    }
+    if (Next) {
+      CHECK_EQ(Next->Prev, H);
+      Next->Prev = Prev;
+    }
+    if (Tail == H) {
+      CHECK(!Next);
+      Tail = Prev;
+    } else {
+      CHECK(Next);
+    }
+    const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+    FreedBytes += CommitSize;
+    NumberOfFrees++;
+    Stats.sub(StatAllocated, CommitSize);
+    Stats.sub(StatMapped, H->MapSize);
+  }
+  void *Addr = reinterpret_cast<void *>(H->MapBase);
+  const uptr Size = H->MapSize;
+  MapPlatformData Data;
+  Data = H->Data;
+  unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+void MapAllocator::printStats() const {
+  Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
+         "(%zdK), remains %zd (%zdK) max %zdM\n",
+         NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+         NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+         LargestSize >> 20);
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/secondary.h b/lib/scudo/standalone/secondary.h
new file mode 100644
index 0000000..016928c
--- /dev/null
+++ b/lib/scudo/standalone/secondary.h
@@ -0,0 +1,97 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+  LargeBlock::Header *Prev;
+  LargeBlock::Header *Next;
+  uptr BlockEnd;
+  uptr MapBase;
+  uptr MapSize;
+  MapPlatformData Data;
+};
+
+constexpr uptr getHeaderSize() {
+  return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+  return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+  return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+  void initLinkerInitialized(GlobalStats *S) {
+    Stats.initLinkerInitialized();
+    if (S)
+      S->link(&Stats);
+  }
+  void init(GlobalStats *S) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(S);
+  }
+
+  void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+
+  void deallocate(void *Ptr);
+
+  static uptr getBlockEnd(void *Ptr) {
+    return LargeBlock::getHeader(Ptr)->BlockEnd;
+  }
+
+  static uptr getBlockSize(void *Ptr) {
+    return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+  }
+
+  void printStats() const;
+
+  void disable() { Mutex.lock(); }
+
+  void enable() { Mutex.unlock(); }
+
+  template <typename F> void iterateOverBlocks(F Callback) const {
+    for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+      Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+  }
+
+private:
+  StaticSpinMutex Mutex;
+  LargeBlock::Header *Tail;
+  uptr AllocatedBytes;
+  uptr FreedBytes;
+  uptr LargestSize;
+  u32 NumberOfAllocs;
+  u32 NumberOfFrees;
+  LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
diff --git a/lib/scudo/standalone/tests/CMakeLists.txt b/lib/scudo/standalone/tests/CMakeLists.txt
index 41a4a28..233b652 100644
--- a/lib/scudo/standalone/tests/CMakeLists.txt
+++ b/lib/scudo/standalone/tests/CMakeLists.txt
@@ -57,6 +57,7 @@
   map_test.cc
   mutex_test.cc
   report_test.cc
+  secondary_test.cc
   stats_test.cc
   strings_test.cc
   vector_test.cc
diff --git a/lib/scudo/standalone/tests/bytemap_test.cc b/lib/scudo/standalone/tests/bytemap_test.cc
index c83ff3f..615b946 100644
--- a/lib/scudo/standalone/tests/bytemap_test.cc
+++ b/lib/scudo/standalone/tests/bytemap_test.cc
@@ -1,4 +1,4 @@
-//===-- bytemap_test.cc------------------------------------------*- C++ -*-===//
+//===-- bytemap_test.cc -----------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/tests/map_test.cc b/lib/scudo/standalone/tests/map_test.cc
index ee627d6..dbf67cb 100644
--- a/lib/scudo/standalone/tests/map_test.cc
+++ b/lib/scudo/standalone/tests/map_test.cc
@@ -1,4 +1,4 @@
-//===-- map_test.cc----------------------------------------------*- C++ -*-===//
+//===-- map_test.cc ---------------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -16,50 +16,50 @@
 
 TEST(ScudoMapTest, MapNoAccessUnmap) {
   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::u64 PlatformData = 0;
-  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData);
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &Data);
   EXPECT_NE(P, nullptr);
   EXPECT_DEATH(memset(P, 0xaa, Size), "");
-  scudo::unmap(P, Size, UNMAP_ALL, &PlatformData);
+  scudo::unmap(P, Size, UNMAP_ALL, &Data);
 }
 
 TEST(ScudoMapTest, MapUnmap) {
   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::u64 PlatformData = 0;
-  void *P = scudo::map(nullptr, Size, MappingName, 0, &PlatformData);
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, 0, &Data);
   EXPECT_NE(P, nullptr);
   memset(P, 0xaa, Size);
-  scudo::unmap(P, Size, 0, &PlatformData);
+  scudo::unmap(P, Size, 0, &Data);
   EXPECT_DEATH(memset(P, 0xbb, Size), "");
 }
 
 TEST(ScudoMapTest, MapWithGuardUnmap) {
   const scudo::uptr PageSize = scudo::getPageSizeCached();
   const scudo::uptr Size = 4 * PageSize;
-  scudo::u64 PlatformData = 0;
+  scudo::MapPlatformData Data = {};
   void *P = scudo::map(nullptr, Size + 2 * PageSize, MappingName, MAP_NOACCESS,
-                       &PlatformData);
+                       &Data);
   EXPECT_NE(P, nullptr);
   void *Q =
       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) + PageSize);
-  EXPECT_EQ(scudo::map(Q, Size, MappingName, 0, &PlatformData), Q);
+  EXPECT_EQ(scudo::map(Q, Size, MappingName, 0, &Data), Q);
   memset(Q, 0xaa, Size);
   EXPECT_DEATH(memset(Q, 0xaa, Size + 1), "");
-  scudo::unmap(P, Size + 2 * PageSize, UNMAP_ALL, &PlatformData);
+  scudo::unmap(P, Size + 2 * PageSize, UNMAP_ALL, &Data);
 }
 
 TEST(ScudoMapTest, MapGrowUnmap) {
   const scudo::uptr PageSize = scudo::getPageSizeCached();
   const scudo::uptr Size = 4 * PageSize;
-  scudo::u64 PlatformData = 0;
-  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData);
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &Data);
   EXPECT_NE(P, nullptr);
   void *Q =
       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) + PageSize);
-  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &PlatformData), Q);
+  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &Data), Q);
   memset(Q, 0xaa, PageSize);
   Q = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Q) + PageSize);
-  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &PlatformData), Q);
+  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &Data), Q);
   memset(Q, 0xbb, PageSize);
-  scudo::unmap(P, Size, UNMAP_ALL, &PlatformData);
+  scudo::unmap(P, Size, UNMAP_ALL, &Data);
 }
diff --git a/lib/scudo/standalone/tests/mutex_test.cc b/lib/scudo/standalone/tests/mutex_test.cc
index 9e898eb..ce33db5 100644
--- a/lib/scudo/standalone/tests/mutex_test.cc
+++ b/lib/scudo/standalone/tests/mutex_test.cc
@@ -1,4 +1,4 @@
-//===-- mutex_test.cc--------------------------------------------*- C++ -*-===//
+//===-- mutex_test.cc -------------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/tests/secondary_test.cc b/lib/scudo/standalone/tests/secondary_test.cc
new file mode 100644
index 0000000..8eed16e
--- /dev/null
+++ b/lib/scudo/standalone/tests/secondary_test.cc
@@ -0,0 +1,137 @@
+//===-- secondary_test.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "gtest/gtest.h"
+
+#include <stdio.h>
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+TEST(ScudoSecondaryTest, SecondaryBasic) {
+  scudo::GlobalStats S;
+  S.init();
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(&S);
+  const scudo::uptr Size = 1U << 16;
+  void *P = L->allocate(Size);
+  EXPECT_NE(P, nullptr);
+  memset(P, 'A', Size);
+  EXPECT_GE(scudo::MapAllocator::getBlockSize(P), Size);
+  L->deallocate(P);
+  EXPECT_DEATH(memset(P, 'A', Size), "");
+
+  const scudo::uptr Align = 1U << 16;
+  P = L->allocate(Size + Align, Align);
+  EXPECT_NE(P, nullptr);
+  void *AlignedP = reinterpret_cast<void *>(
+      scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+  memset(AlignedP, 'A', Size);
+  L->deallocate(P);
+
+  std::vector<void *> V;
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate(Size));
+  std::random_shuffle(V.begin(), V.end());
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+// This exercises a variety of combinations of size and alignment for the
+// MapAllocator. The size computation done here mimic the ones done by the
+// combined allocator.
+TEST(ScudoSecondaryTest, SecondaryCombinations) {
+  constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
+  constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
+    for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
+         AlignLog++) {
+      const scudo::uptr Align = 1U << AlignLog;
+      for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
+        if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
+          continue;
+        const scudo::uptr UserSize =
+            scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
+        const scudo::uptr Size =
+            HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
+        void *P = L->allocate(Size, Align);
+        EXPECT_NE(P, nullptr);
+        void *AlignedP = reinterpret_cast<void *>(
+            scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+        memset(AlignedP, 0xff, UserSize);
+        L->deallocate(P);
+      }
+    }
+  }
+  L->printStats();
+}
+
+TEST(ScudoSecondaryTest, SecondaryIterate) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  auto Lambda = [V](scudo::uptr Block) {
+    EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
+              V.end());
+  };
+  L->disable();
+  L->iterateOverBlocks(Lambda);
+  L->enable();
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+std::mutex Mutex;
+std::condition_variable Cv;
+bool Ready = false;
+
+static void performAllocations(scudo::MapAllocator *L) {
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+}
+
+TEST(ScudoSecondaryTest, SecondaryThreadsRace) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::thread Threads[10];
+  for (scudo::uptr I = 0; I < 10; I++)
+    Threads[I] = std::thread(performAllocations, L);
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+  L->printStats();
+}