Add GetAllocationSize support to ZebraBlockHeap.

BUG=
R=sebmarchand@chromium.org

Review URL: https://codereview.appspot.com/135210045

git-svn-id: http://sawbuck.googlecode.com/svn/trunk@2288 15e8cca8-e42c-11de-a347-f34a4f72eb7d
diff --git a/syzygy/agent/asan/heaps/zebra_block_heap.cc b/syzygy/agent/asan/heaps/zebra_block_heap.cc
index dbe7266..d79d0be 100644
--- a/syzygy/agent/asan/heaps/zebra_block_heap.cc
+++ b/syzygy/agent/asan/heaps/zebra_block_heap.cc
@@ -54,8 +54,9 @@
   // Initialize the metadata describing the state of our heap.
   slab_info_.resize(slab_count_);
   for (size_t i = 0; i < slab_count_; ++i) {
-    slab_info_[i].allocated_address = NULL;
     slab_info_[i].state = kFreeSlab;
+    slab_info_[i].allocated_address = NULL;
+    slab_info_[i].allocation_size = 0;
     free_slabs_.push(i);
   }
 }
@@ -68,30 +69,15 @@
 }
 
 uint32 ZebraBlockHeap::GetHeapFeatures() const {
-  return kHeapSupportsIsAllocated | kHeapReportsReservations;
+  return kHeapSupportsIsAllocated | kHeapReportsReservations |
+      kHeapSupportsGetAllocationSize;
 }
 
 void* ZebraBlockHeap::Allocate(size_t bytes) {
-  if (bytes == 0 || bytes > kPageSize)
+  SlabInfo* slab_info = AllocateImpl(bytes);
+  if (slab_info == NULL)
     return NULL;
-  common::AutoRecursiveLock lock(lock_);
-
-  if (free_slabs_.empty())
-    return NULL;
-
-  size_t slab_index = free_slabs_.front();
-  DCHECK_NE(kInvalidSlabIndex, slab_index);
-  free_slabs_.pop();
-  uint8* slab_address = GetSlabAddress(slab_index);
-  DCHECK_NE(reinterpret_cast<uint8*>(NULL), slab_address);
-
-  // Push the allocation to the end of the even page.
-  uint8* alloc = slab_address + kPageSize - bytes;
-  alloc = common::AlignDown(alloc, kShadowRatio);
-
-  slab_info_[slab_index].state = kAllocatedSlab;
-  slab_info_[slab_index].allocated_address = alloc;
-  return alloc;
+  return slab_info->allocated_address;
 }
 
 bool ZebraBlockHeap::Free(void* alloc) {
@@ -113,6 +99,7 @@
   // Make the slab available for allocations.
   slab_info_[slab_index].state = kFreeSlab;
   slab_info_[slab_index].allocated_address = NULL;
+  slab_info_[slab_index].allocation_size = 0;
   free_slabs_.push(slab_index);
   return true;
 }
@@ -124,13 +111,25 @@
   size_t slab_index = GetSlabIndex(alloc);
   if (slab_index == kInvalidSlabIndex)
     return false;
+  if (slab_info_[slab_index].state == kFreeSlab)
+    return false;
   if (slab_info_[slab_index].allocated_address != alloc)
     return false;
-  return (slab_info_[slab_index].state != kFreeSlab);
+  return true;
 }
 
 size_t ZebraBlockHeap::GetAllocationSize(void* alloc) {
-  return kUnknownSize;
+  if (alloc == NULL)
+    return kUnknownSize;
+  common::AutoRecursiveLock lock(lock_);
+  size_t slab_index = GetSlabIndex(alloc);
+  if (slab_index == kInvalidSlabIndex)
+    return kUnknownSize;
+  if (slab_info_[slab_index].state == kFreeSlab)
+    return kUnknownSize;
+  if (slab_info_[slab_index].allocated_address != alloc)
+    return kUnknownSize;
+  return slab_info_[slab_index].allocation_size;
 }
 
 void ZebraBlockHeap::Lock() {
@@ -173,9 +172,14 @@
   if (right_redzone_size - kPageSize >= kShadowRatio)
     return NULL;
 
-  // Allocate space for the block. If the allocation fails, it will
-  // return NULL and we'll simply pass it on.
-  void* alloc = Allocate(kPageSize);
+  // Allocate space for the block, and update the slab info to reflect the right
+  // redzone.
+  void* alloc = NULL;
+  SlabInfo* slab_info = AllocateImpl(kPageSize);
+  if (slab_info != NULL) {
+    slab_info->allocation_size = 2 * kPageSize;
+    alloc = slab_info->allocated_address;
+  }
 
   DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(alloc) % kShadowRatio);
   return alloc;
@@ -255,6 +259,33 @@
   quarantine_ratio_ = quarantine_ratio;
 }
 
+ZebraBlockHeap::SlabInfo* ZebraBlockHeap::AllocateImpl(size_t bytes) {
+  if (bytes == 0 || bytes > kPageSize)
+    return NULL;
+  common::AutoRecursiveLock lock(lock_);
+
+  if (free_slabs_.empty())
+    return NULL;
+
+  size_t slab_index = free_slabs_.front();
+  DCHECK_NE(kInvalidSlabIndex, slab_index);
+  free_slabs_.pop();
+  uint8* slab_address = GetSlabAddress(slab_index);
+  DCHECK_NE(reinterpret_cast<uint8*>(NULL), slab_address);
+
+  // Push the allocation to the end of the even page.
+  uint8* alloc = slab_address + kPageSize - bytes;
+  alloc = common::AlignDown(alloc, kShadowRatio);
+
+  // Update the slab info.
+  SlabInfo* slab_info = &slab_info_[slab_index];
+  slab_info->state = kAllocatedSlab;
+  slab_info->allocated_address = alloc;
+  slab_info->allocation_size = bytes;
+
+  return slab_info;
+}
+
 bool ZebraBlockHeap::QuarantineInvariantIsSatisfied() {
   return quarantine_.empty() ||
          (quarantine_.size() / static_cast<float>(slab_count_) <=
diff --git a/syzygy/agent/asan/heaps/zebra_block_heap.h b/syzygy/agent/asan/heaps/zebra_block_heap.h
index 9970410..894cffc 100644
--- a/syzygy/agent/asan/heaps/zebra_block_heap.h
+++ b/syzygy/agent/asan/heaps/zebra_block_heap.h
@@ -118,6 +118,29 @@
   void set_quarantine_ratio(float quarantine_ratio);
 
  protected:
+  // The set of possible states of the slabs.
+  enum SlabState {
+    kFreeSlab,
+    kAllocatedSlab,
+    kQuarantinedSlab
+  };
+
+  // TODO(chrisha): Make this a bitfield; all three fields fit in 4 bytes
+  //     rather than 12.
+  // - Need 14 bits for size (0 - 8192).
+  // - Need 13 bits for allocation offset in a slab (0 - 4096).
+  // - Need 2 bits for state.
+  // Describes the slab state.
+  struct SlabInfo {
+    SlabState state;
+    uint8* allocated_address;
+    size_t allocation_size;
+  };
+
+  // Performs an allocation, and returns a pointer to the SlabInfo where the
+  // allocation was made.
+  SlabInfo* AllocateImpl(size_t bytes);
+
   // Checks if the quarantine invariant is satisfied.
   // @returns true if the quarantine invariant is satisfied, false otherwise.
   bool QuarantineInvariantIsSatisfied();
@@ -136,19 +159,6 @@
   // Defines an invalid slab index.
   static const size_t kInvalidSlabIndex = -1;
 
-  // The set of possible states of the slabs.
-  enum SlabState {
-    kFreeSlab,
-    kAllocatedSlab,
-    kQuarantinedSlab
-  };
-
-  // Describes the slab state.
-  struct SlabInfo {
-    SlabState state;
-    uint8* allocated_address;
-  };
-
   // Heap memory address.
   uint8* heap_address_;
 
diff --git a/syzygy/agent/asan/heaps/zebra_block_heap_unittest.cc b/syzygy/agent/asan/heaps/zebra_block_heap_unittest.cc
index caf90cb..6203b44 100644
--- a/syzygy/agent/asan/heaps/zebra_block_heap_unittest.cc
+++ b/syzygy/agent/asan/heaps/zebra_block_heap_unittest.cc
@@ -74,7 +74,8 @@
 TEST(ZebraBlockHeapTest, FeaturesAreValid) {
   TestZebraBlockHeap h;
   EXPECT_EQ(HeapInterface::kHeapSupportsIsAllocated |
-                HeapInterface::kHeapReportsReservations,
+                HeapInterface::kHeapReportsReservations |
+                HeapInterface::kHeapSupportsGetAllocationSize,
             h.GetHeapFeatures());
 }
 
@@ -388,6 +389,14 @@
   EXPECT_FALSE(h.IsAllocated(a));
 }
 
+TEST(ZebraBlockHeapTest, GetAllocationSize) {
+  TestZebraBlockHeap h;
+
+  void* alloc = h.Allocate(67);
+  ASSERT_TRUE(alloc != NULL);
+  EXPECT_EQ(67u, h.GetAllocationSize(alloc));
+}
+
 TEST(ZebraBlockHeapTest, PushPopInvariant) {
   TestZebraBlockHeap h;
   BlockLayout layout = {};