[heap] Background allocation supports sweeping

Before actually failing to allocate, let the background thread help to
sweep all pages of that space.

As a drive-by also rename allocation functions to make background and
main thread allocation more similar.

Bug: v8:10315
Change-Id: I26d4b622de949d4943e35071cee1df8b3d2889c2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2297383
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68843}
diff --git a/src/heap/concurrent-allocator.cc b/src/heap/concurrent-allocator.cc
index 2875eb6..46f6682 100644
--- a/src/heap/concurrent-allocator.cc
+++ b/src/heap/concurrent-allocator.cc
@@ -111,7 +111,7 @@
 }
 
 bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
-  auto result = space_->SlowGetLinearAllocationAreaBackground(
+  auto result = space_->RawRefillLabBackground(
       local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
 
   if (!result) return false;
@@ -135,8 +135,8 @@
 
 AllocationResult ConcurrentAllocator::AllocateOutsideLab(
     int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
-  auto result = space_->SlowGetLinearAllocationAreaBackground(
-      local_heap_, object_size, object_size, alignment, origin);
+  auto result = space_->RawRefillLabBackground(local_heap_, object_size,
+                                               object_size, alignment, origin);
   if (!result) return AllocationResult::Retry(OLD_SPACE);
 
   HeapObject object = HeapObject::FromAddress(result->first);
diff --git a/src/heap/paged-spaces.cc b/src/heap/paged-spaces.cc
index 7de78b8..13f266f 100644
--- a/src/heap/paged-spaces.cc
+++ b/src/heap/paged-spaces.cc
@@ -512,8 +512,8 @@
       new PagedSpaceObjectIterator(heap, this));
 }
 
-bool PagedSpace::RefillLabFromFreeListMain(size_t size_in_bytes,
-                                           AllocationOrigin origin) {
+bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
+                                               AllocationOrigin origin) {
   DCHECK(IsAligned(size_in_bytes, kTaggedSize));
   DCHECK_LE(top(), limit());
 #ifdef DEBUG
@@ -561,12 +561,9 @@
   return true;
 }
 
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
-                                                  size_t min_size_in_bytes,
-                                                  size_t max_size_in_bytes,
-                                                  AllocationAlignment alignment,
-                                                  AllocationOrigin origin) {
+base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
+    LocalHeap* local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes,
+    AllocationAlignment alignment, AllocationOrigin origin) {
   DCHECK(!is_local_space() && identity() == OLD_SPACE);
   DCHECK_EQ(origin, AllocationOrigin::kRuntime);
 
@@ -589,6 +586,8 @@
         local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
     if (result) return result;
 
+    // Now contribute to sweeping from background thread and then try to
+    // reallocate.
     Sweeper::FreeSpaceMayContainInvalidatedSlots
         invalidated_slots_in_free_space =
             Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
@@ -620,7 +619,19 @@
     if (result) return result;
   }
 
-  // TODO(dinfuehr): Complete sweeping here and try allocation again.
+  if (collector->sweeping_in_progress()) {
+    // Complete sweeping for this space.
+    collector->DrainSweepingWorklistForSpace(identity());
+
+    {
+      ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+      RefillFreeList();
+    }
+
+    // Last try to acquire memory from free list.
+    return TryAllocationFromFreeListBackground(
+        local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+  }
 
   return {};
 }
@@ -873,13 +884,13 @@
 }
 
 bool OffThreadSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
-  if (RefillLabFromFreeListMain(size_in_bytes, origin)) return true;
+  if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
 
   if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
     DCHECK((CountTotalPages() > 1) ||
            (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
-    return RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes),
-                                     origin);
+    return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+                                         origin);
   }
 
   return false;
@@ -893,7 +904,7 @@
   DCHECK_GE(size_in_bytes, 0);
   const int kMaxPagesToSweep = 1;
 
-  if (RefillLabFromFreeListMain(size_in_bytes, origin)) return true;
+  if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
 
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   // Sweeping is still in progress.
@@ -908,7 +919,8 @@
     RefillFreeList();
 
     // Retry the free list allocation.
-    if (RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes), origin))
+    if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+                                      origin))
       return true;
 
     if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
@@ -923,7 +935,8 @@
     Page* page = main_space->RemovePageSafe(size_in_bytes);
     if (page != nullptr) {
       AddPage(page);
-      if (RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes), origin))
+      if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+                                        origin))
         return true;
     }
   }
@@ -937,8 +950,8 @@
       }
       DCHECK((CountTotalPages() > 1) ||
              (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
-      return RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes),
-                                       origin);
+      return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+                                           origin);
     }
   }
 
@@ -953,7 +966,7 @@
       RefillFreeList();
 
       // Last try to acquire memory from free list.
-      return RefillLabFromFreeListMain(size_in_bytes, origin);
+      return TryAllocationFromFreeListMain(size_in_bytes, origin);
     }
     return false;
   }
@@ -975,7 +988,7 @@
         invalidated_slots_in_free_space);
     RefillFreeList();
     if (max_freed >= size_in_bytes)
-      return RefillLabFromFreeListMain(size_in_bytes, origin);
+      return TryAllocationFromFreeListMain(size_in_bytes, origin);
   }
   return false;
 }
diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h
index 40a74a5..3caf3c7 100644
--- a/src/heap/paged-spaces.h
+++ b/src/heap/paged-spaces.h
@@ -148,11 +148,10 @@
   // Allocate the requested number of bytes in the space from a background
   // thread.
   V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
-  SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
-                                        size_t min_size_in_bytes,
-                                        size_t max_size_in_bytes,
-                                        AllocationAlignment alignment,
-                                        AllocationOrigin origin);
+  RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
+                         size_t max_size_in_bytes,
+                         AllocationAlignment alignment,
+                         AllocationOrigin origin);
 
   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
     if (size_in_bytes == 0) return 0;
@@ -364,8 +363,8 @@
   inline AllocationResult TryAllocateLinearlyAligned(
       int* size_in_bytes, AllocationAlignment alignment);
 
-  V8_WARN_UNUSED_RESULT bool RefillLabFromFreeListMain(size_t size_in_bytes,
-                                                       AllocationOrigin origin);
+  V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
+      size_t size_in_bytes, AllocationOrigin origin);
 
   V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
                                                       int max_pages,