[heap] Add global memory controller

Provide a global memory controller used to compute limits for combined
on-heap and embedder memory. The global controller uses the same
mechanism (gc speed, mutator speed) and growing factors as the regular
on-heap controller.

Rely on V8's mechanisms for configured state that stops shrinking the
limit.

Bug: chromium:948807
Change-Id: I3283a2c28e6ab889f8d2ad85c9b67b8f234b9900
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1619762
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61712}
diff --git a/include/v8.h b/include/v8.h
index 8b23e02..3444f78 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -7138,6 +7138,24 @@
     virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) = 0;
   };
 
+  /**
+   * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
+   * summary is reported.
+   */
+  struct TraceSummary {
+    /**
+     * Time spent managing the retained memory in milliseconds. This can e.g.
+     * include the time tracing through objects in the embedder.
+     */
+    double time;
+
+    /**
+     * Memory retained by the embedder through the |EmbedderHeapTracer|
+     * mechanism in bytes.
+     */
+    size_t allocated_size;
+  };
+
   virtual ~EmbedderHeapTracer() = default;
 
   /**
@@ -7184,9 +7202,12 @@
   /**
    * Called at the end of a GC cycle.
    *
-   * Note that allocation is *not* allowed within |TraceEpilogue|.
+   * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
+   * overriden to fill a |TraceSummary| that is used by V8 to schedule future
+   * garbage collections.
    */
   virtual void TraceEpilogue() = 0;
+  virtual void TraceEpilogue(TraceSummary* trace_summary) { TraceEpilogue(); }
 
   /**
    * Called upon entering the final marking pause. No more incremental marking
@@ -7224,6 +7245,14 @@
   void GarbageCollectionForTesting(EmbedderStackState stack_state);
 
   /*
+   * Called by the embedder to signal newly allocated memory. Not bound to
+   * tracing phases. Embedders should trade off when increments are reported as
+   * V8 may consult global heuristics on whether to trigger garbage collection
+   * on this change.
+   */
+  void IncreaseAllocatedSize(size_t bytes);
+
+  /*
    * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
    * is not attached to any v8::Isolate.
    */
diff --git a/src/api/api.cc b/src/api/api.cc
index ca0707f..dbc79e4 100644
--- a/src/api/api.cc
+++ b/src/api/api.cc
@@ -41,6 +41,7 @@
 #include "src/frames-inl.h"
 #include "src/global-handles.h"
 #include "src/globals.h"
+#include "src/heap/embedder-tracing.h"
 #include "src/heap/heap-inl.h"
 #include "src/init/bootstrapper.h"
 #include "src/init/icu_util.h"
@@ -10146,6 +10147,17 @@
                                  kGCCallbackFlagForced);
 }
 
+void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
+  if (isolate_) {
+    i::LocalEmbedderHeapTracer* const tracer =
+        reinterpret_cast<i::Isolate*>(isolate_)
+            ->heap()
+            ->local_embedder_heap_tracer();
+    DCHECK_NOT_NULL(tracer);
+    tracer->IncreaseAllocatedSize(bytes);
+  }
+}
+
 void EmbedderHeapTracer::RegisterEmbedderReference(
     const TracedGlobal<v8::Value>& ref) {
   if (ref.IsEmpty()) return;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 1c1326d..b4b8c8d 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -747,6 +747,8 @@
             "Increase max size of the old space to 4 GB for x64 systems with"
             "the physical memory bigger than 16 GB")
 DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_BOOL(global_gc_scheduling, false,
+            "enable GC scheduling based on global memory")
 DEFINE_BOOL(gc_global, false, "always perform global GCs")
 DEFINE_INT(random_gc_interval, 0,
            "Collect garbage after random(0, X) allocations. It overrides "
diff --git a/src/heap/embedder-tracing.cc b/src/heap/embedder-tracing.cc
index db9eb9a..058f33f 100644
--- a/src/heap/embedder-tracing.cc
+++ b/src/heap/embedder-tracing.cc
@@ -5,6 +5,7 @@
 #include "src/heap/embedder-tracing.h"
 
 #include "src/base/logging.h"
+#include "src/heap/gc-tracer.h"
 #include "src/objects/embedder-data-slot.h"
 #include "src/objects/js-objects-inl.h"
 
@@ -31,7 +32,17 @@
 void LocalEmbedderHeapTracer::TraceEpilogue() {
   if (!InUse()) return;
 
-  remote_tracer_->TraceEpilogue();
+  EmbedderHeapTracer::TraceSummary summary;
+  remote_tracer_->TraceEpilogue(&summary);
+  remote_stats_.allocated_size = summary.allocated_size;
+  // Force a check next time increased memory is reported. This allows for
+  // setting limits close to actual heap sizes.
+  remote_stats_.allocated_size_limit_for_check = 0;
+  constexpr double kMinReportingTimeMs = 0.5;
+  if (summary.time > kMinReportingTimeMs) {
+    isolate_->heap()->tracer()->RecordEmbedderSpeed(summary.allocated_size,
+                                                    summary.time);
+  }
 }
 
 void LocalEmbedderHeapTracer::EnterFinalPause() {
@@ -100,5 +111,12 @@
   FlushWrapperCacheIfFull();
 }
 
+void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
+  Heap* heap = isolate_->heap();
+  heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+      heap->GCFlagsForIncrementalMarking(),
+      kGCCallbackScheduleIdleGarbageCollection);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/embedder-tracing.h b/src/heap/embedder-tracing.h
index e2b239e..e4e4f52 100644
--- a/src/heap/embedder-tracing.h
+++ b/src/heap/embedder-tracing.h
@@ -76,7 +76,27 @@
     embedder_worklist_empty_ = is_empty;
   }
 
+  void IncreaseAllocatedSize(size_t bytes) {
+    remote_stats_.allocated_size += bytes;
+    remote_stats_.accumulated_allocated_size += bytes;
+    if (remote_stats_.allocated_size >
+        remote_stats_.allocated_size_limit_for_check) {
+      StartIncrementalMarkingIfNeeded();
+      remote_stats_.allocated_size_limit_for_check =
+          remote_stats_.allocated_size + kEmbedderAllocatedThreshold;
+    }
+  }
+
+  void StartIncrementalMarkingIfNeeded();
+
+  size_t allocated_size() const { return remote_stats_.allocated_size; }
+  size_t accumulated_allocated_size() const {
+    return remote_stats_.accumulated_allocated_size;
+  }
+
  private:
+  static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
+
   Isolate* const isolate_;
   EmbedderHeapTracer* remote_tracer_ = nullptr;
 
@@ -88,6 +108,19 @@
   // segments of potential embedder fields to move to the main thread.
   bool embedder_worklist_empty_ = false;
 
+  struct RemoteStatistics {
+    // Allocated size of objects in bytes reported by the embedder. Updated via
+    // TraceSummary at the end of tracing and incrementally when the GC is not
+    // in progress.
+    size_t allocated_size = 0;
+    // Limit for |allocated_size_| in bytes to avoid checking for starting a GC
+    // on each increment.
+    size_t allocated_size_limit_for_check = 0;
+    // Totally accumulated bytes allocated by the embedder. Monotonically
+    // increasing value. Used to approximate allocation rate.
+    size_t accumulated_allocated_size = 0;
+  } remote_stats_;
+
   friend class EmbedderStackStateScope;
 };
 
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index bd25740..620eb0f 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -191,6 +191,7 @@
   recorded_incremental_mark_compacts_.Reset();
   recorded_new_generation_allocations_.Reset();
   recorded_old_generation_allocations_.Reset();
+  recorded_embedder_generation_allocations_.Reset();
   recorded_context_disposal_times_.Reset();
   recorded_survival_ratios_.Reset();
   start_counter_ = 0;
@@ -221,7 +222,8 @@
   previous_ = current_;
   double start_time = heap_->MonotonicallyIncreasingTimeInMs();
   SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
-                   heap_->OldGenerationAllocationCounter());
+                   heap_->OldGenerationAllocationCounter(),
+                   heap_->EmbedderAllocationCounter());
 
   switch (collector) {
     case SCAVENGER:
@@ -375,15 +377,16 @@
   }
 }
 
-
 void GCTracer::SampleAllocation(double current_ms,
                                 size_t new_space_counter_bytes,
-                                size_t old_generation_counter_bytes) {
+                                size_t old_generation_counter_bytes,
+                                size_t embedder_allocation_bytes) {
   if (allocation_time_ms_ == 0) {
     // It is the first sample.
     allocation_time_ms_ = current_ms;
     new_space_allocation_counter_bytes_ = new_space_counter_bytes;
     old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+    embedder_allocation_counter_bytes_ = embedder_allocation_bytes;
     return;
   }
   // This assumes that counters are unsigned integers so that the subtraction
@@ -392,6 +395,8 @@
       new_space_counter_bytes - new_space_allocation_counter_bytes_;
   size_t old_generation_allocated_bytes =
       old_generation_counter_bytes - old_generation_allocation_counter_bytes_;
+  size_t embedder_allocated_bytes =
+      embedder_allocation_bytes - embedder_allocation_counter_bytes_;
   double duration = current_ms - allocation_time_ms_;
   allocation_time_ms_ = current_ms;
   new_space_allocation_counter_bytes_ = new_space_counter_bytes;
@@ -400,9 +405,9 @@
   new_space_allocation_in_bytes_since_gc_ += new_space_allocated_bytes;
   old_generation_allocation_in_bytes_since_gc_ +=
       old_generation_allocated_bytes;
+  embedder_allocation_in_bytes_since_gc_ += embedder_allocated_bytes;
 }
 
-
 void GCTracer::AddAllocation(double current_ms) {
   allocation_time_ms_ = current_ms;
   if (allocation_duration_since_gc_ > 0) {
@@ -412,10 +417,13 @@
     recorded_old_generation_allocations_.Push(
         MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
                              allocation_duration_since_gc_));
+    recorded_embedder_generation_allocations_.Push(MakeBytesAndDuration(
+        embedder_allocation_in_bytes_since_gc_, allocation_duration_since_gc_));
   }
   allocation_duration_since_gc_ = 0;
   new_space_allocation_in_bytes_since_gc_ = 0;
   old_generation_allocation_in_bytes_since_gc_ = 0;
+  embedder_allocation_in_bytes_since_gc_ = 0;
 }
 
 
@@ -881,6 +889,16 @@
   }
 }
 
+void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
+  if (duration == 0 || bytes == 0) return;
+  double current_speed = bytes / duration;
+  if (recorded_embedder_speed_ == 0.0) {
+    recorded_embedder_speed_ = current_speed;
+  } else {
+    recorded_embedder_speed_ = (recorded_embedder_speed_ + current_speed) / 2;
+  }
+}
+
 void GCTracer::RecordMutatorUtilization(double mark_compact_end_time,
                                         double mark_compact_duration) {
   if (previous_mark_compact_end_time_ == 0) {
@@ -919,7 +937,6 @@
 }
 
 double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
-  const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
   if (recorded_incremental_marking_speed_ != 0) {
     return recorded_incremental_marking_speed_;
   }
@@ -929,6 +946,13 @@
   return kConservativeSpeedInBytesPerMillisecond;
 }
 
+double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
+  if (recorded_embedder_speed_ != 0.0) {
+    return recorded_embedder_speed_;
+  }
+  return kConservativeSpeedInBytesPerMillisecond;
+}
+
 double GCTracer::ScavengeSpeedInBytesPerMillisecond(
     ScavengeSpeedMode mode) const {
   if (mode == kForAllObjects) {
@@ -975,6 +999,15 @@
   return combined_mark_compact_speed_cache_;
 }
 
+double GCTracer::CombineSpeedsInBytesPerMillisecond(double default_speed,
+                                                    double optional_speed) {
+  constexpr double kMinimumSpeed = 0.5;
+  if (optional_speed < kMinimumSpeed) {
+    return default_speed;
+  }
+  return default_speed * optional_speed / (default_speed + optional_speed);
+}
+
 double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
   size_t bytes = new_space_allocation_in_bytes_since_gc_;
@@ -991,6 +1024,14 @@
                       MakeBytesAndDuration(bytes, durations), time_ms);
 }
 
+double GCTracer::EmbedderAllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
+  size_t bytes = embedder_allocation_in_bytes_since_gc_;
+  double durations = allocation_duration_since_gc_;
+  return AverageSpeed(recorded_embedder_generation_allocations_,
+                      MakeBytesAndDuration(bytes, durations), time_ms);
+}
+
 double GCTracer::AllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
   return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
@@ -1007,6 +1048,12 @@
       kThroughputTimeFrameMs);
 }
 
+double GCTracer::CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
+    const {
+  return EmbedderAllocationThroughputInBytesPerMillisecond(
+      kThroughputTimeFrameMs);
+}
+
 double GCTracer::ContextDisposalRateInMilliseconds() const {
   if (recorded_context_disposal_times_.Count() <
       recorded_context_disposal_times_.kSize)
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index 54b5dea..00fc111 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -200,6 +200,10 @@
   };
 
   static const int kThroughputTimeFrameMs = 5000;
+  static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
+
+  static double CombineSpeedsInBytesPerMillisecond(double default_speed,
+                                                   double optional_speed);
 
   static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
 
@@ -217,7 +221,8 @@
 
   // Sample and accumulate bytes allocated since the last GC.
   void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
-                        size_t old_generation_counter_bytes);
+                        size_t old_generation_counter_bytes,
+                        size_t embedder_allocation_bytes);
 
   // Log the accumulated new space allocation bytes.
   void AddAllocation(double current_ms);
@@ -232,9 +237,13 @@
   void AddIncrementalMarkingStep(double duration, size_t bytes);
 
   // Compute the average incremental marking speed in bytes/millisecond.
-  // Returns 0 if no events have been recorded.
+  // Returns a conservative value if no events have been recorded.
   double IncrementalMarkingSpeedInBytesPerMillisecond() const;
 
+  // Compute the average embedder speed in bytes/millisecond.
+  // Returns a conservative value if no events have been recorded.
+  double EmbedderSpeedInBytesPerMillisecond() const;
+
   // Compute the average scavenge speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
   double ScavengeSpeedInBytesPerMillisecond(
@@ -268,6 +277,12 @@
   double OldGenerationAllocationThroughputInBytesPerMillisecond(
       double time_ms = 0) const;
 
+  // Allocation throughput in the embedder in bytes/millisecond in the
+  // last time_ms milliseconds. Reported through v8::EmbedderHeapTracer.
+  // Returns 0 if no allocation events have been recorded.
+  double EmbedderAllocationThroughputInBytesPerMillisecond(
+      double time_ms = 0) const;
+
   // Allocation throughput in heap in bytes/millisecond in the last time_ms
   // milliseconds.
   // Returns 0 if no allocation events have been recorded.
@@ -283,6 +298,11 @@
   // Returns 0 if no allocation events have been recorded.
   double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
 
+  // Allocation throughput in the embedder in bytes/milliseconds in the last
+  // kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer.
+  // Returns 0 if no allocation events have been recorded.
+  double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
+
   // Computes the context disposal rate in milliseconds. It takes the time
   // frame of the first recorded context disposal to the current time and
   // divides it by the number of recorded events.
@@ -323,6 +343,8 @@
 
   void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
 
+  void RecordEmbedderSpeed(size_t bytes, double duration);
+
  private:
   FRIEND_TEST(GCTracer, AverageSpeed);
   FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -414,6 +436,8 @@
 
   double recorded_incremental_marking_speed_;
 
+  double recorded_embedder_speed_ = 0.0;
+
   // Incremental scopes carry more information than just the duration. The infos
   // here are merged back upon starting/stopping the GC tracer.
   IncrementalMarkingInfos
@@ -424,11 +448,13 @@
   double allocation_time_ms_;
   size_t new_space_allocation_counter_bytes_;
   size_t old_generation_allocation_counter_bytes_;
+  size_t embedder_allocation_counter_bytes_;
 
   // Accumulated duration and allocated bytes since the last GC.
   double allocation_duration_since_gc_;
   size_t new_space_allocation_in_bytes_since_gc_;
   size_t old_generation_allocation_in_bytes_since_gc_;
+  size_t embedder_allocation_in_bytes_since_gc_;
 
   double combined_mark_compact_speed_cache_;
 
@@ -448,6 +474,7 @@
   base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
   base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
   base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+  base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
   base::RingBuffer<double> recorded_context_disposal_times_;
   base::RingBuffer<double> recorded_survival_ratios_;
 
diff --git a/src/heap/heap-controller.cc b/src/heap/heap-controller.cc
index 16e38e0..e89978e 100644
--- a/src/heap/heap-controller.cc
+++ b/src/heap/heap-controller.cc
@@ -49,17 +49,17 @@
 //   F * (1 - MU / (R * (1 - MU))) = 1
 //   F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
 //   F = R * (1 - MU) / (R * (1 - MU) - MU)
-double HeapController::GrowingFactor(double gc_speed, double mutator_speed,
-                                     double max_factor) {
+double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
+                                       double max_factor) {
   DCHECK_LE(min_growing_factor_, max_factor);
   DCHECK_GE(max_growing_factor_, max_factor);
   if (gc_speed == 0 || mutator_speed == 0) return max_factor;
 
   const double speed_ratio = gc_speed / mutator_speed;
 
-  const double a = speed_ratio * (1 - kTargetMutatorUtilization);
-  const double b =
-      speed_ratio * (1 - kTargetMutatorUtilization) - kTargetMutatorUtilization;
+  const double a = speed_ratio * (1 - target_mutator_utlization_);
+  const double b = speed_ratio * (1 - target_mutator_utlization_) -
+                   target_mutator_utlization_;
 
   // The factor is a / b, but we need to check for small b first.
   double factor = (a < b * max_factor) ? a / b : max_factor;
@@ -140,6 +140,31 @@
   return factor;
 }
 
+double GlobalMemoryController::MaxGrowingFactor(size_t curr_max_size) {
+  constexpr double kMinSmallFactor = 1.3;
+  constexpr double kMaxSmallFactor = 2.0;
+  constexpr double kHighFactor = 4.0;
+
+  size_t max_size_in_mb = curr_max_size / MB;
+  max_size_in_mb = Max(max_size_in_mb, kMinSize);
+
+  // If we are on a device with lots of memory, we allow a high heap
+  // growing factor.
+  if (max_size_in_mb >= kMaxSize) {
+    return kHighFactor;
+  }
+
+  DCHECK_GE(max_size_in_mb, kMinSize);
+  DCHECK_LT(max_size_in_mb, kMaxSize);
+
+  // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+  double factor = (max_size_in_mb - kMinSize) *
+                      (kMaxSmallFactor - kMinSmallFactor) /
+                      (kMaxSize - kMinSize) +
+                  kMinSmallFactor;
+  return factor;
+}
+
 size_t HeapController::CalculateAllocationLimit(
     size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
     size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
@@ -150,7 +175,25 @@
     Isolate::FromHeap(heap_)->PrintWithTimestamp(
         "[%s] factor %.1f based on mu=%.3f, speed_ratio=%.f "
         "(gc=%.f, mutator=%.f)\n",
-        ControllerName(), factor, kTargetMutatorUtilization,
+        ControllerName(), factor, target_mutator_utlization_,
+        gc_speed / mutator_speed, gc_speed, mutator_speed);
+  }
+
+  return CalculateAllocationLimitBase(curr_size, max_size, factor,
+                                      new_space_capacity, growing_mode);
+}
+
+size_t GlobalMemoryController::CalculateAllocationLimit(
+    size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
+    size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
+  const double max_factor = MaxGrowingFactor(max_size);
+  const double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
+
+  if (FLAG_trace_gc_verbose) {
+    Isolate::FromHeap(heap_)->PrintWithTimestamp(
+        "[%s] factor %.1f based on mu=%.3f, speed_ratio=%.f "
+        "(gc=%.f, mutator=%.f)\n",
+        ControllerName(), factor, target_mutator_utlization_,
         gc_speed / mutator_speed, gc_speed, mutator_speed);
   }
 
diff --git a/src/heap/heap-controller.h b/src/heap/heap-controller.h
index 374ef78..6fc9895 100644
--- a/src/heap/heap-controller.h
+++ b/src/heap/heap-controller.h
@@ -15,31 +15,38 @@
 
 class V8_EXPORT_PRIVATE MemoryController {
  public:
-  virtual ~MemoryController() = default;
-
   // Computes the growing step when the limit increases.
-  size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
+  static size_t MinimumAllocationLimitGrowingStep(
+      Heap::HeapGrowingMode growing_mode);
+
+  virtual ~MemoryController() = default;
 
  protected:
   MemoryController(Heap* heap, double min_growing_factor,
                    double max_growing_factor,
-                   double conservative_growing_factor)
+                   double conservative_growing_factor,
+                   double target_mutator_utlization)
       : heap_(heap),
         min_growing_factor_(min_growing_factor),
         max_growing_factor_(max_growing_factor),
-        conservative_growing_factor_(conservative_growing_factor) {}
+        conservative_growing_factor_(conservative_growing_factor),
+        target_mutator_utlization_(target_mutator_utlization) {}
 
   // Computes the allocation limit to trigger the next garbage collection.
   size_t CalculateAllocationLimitBase(size_t curr_size, size_t max_size,
                                       double factor, size_t additional_bytes,
                                       Heap::HeapGrowingMode growing_mode);
 
+  double GrowingFactor(double gc_speed, double mutator_speed,
+                       double max_factor);
+
   virtual const char* ControllerName() = 0;
 
   Heap* const heap_;
   const double min_growing_factor_;
   const double max_growing_factor_;
   const double conservative_growing_factor_;
+  const double target_mutator_utlization_;
 };
 
 class V8_EXPORT_PRIVATE HeapController : public MemoryController {
@@ -47,9 +54,9 @@
   // Sizes are in MB.
   static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
   static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
-  static constexpr double kTargetMutatorUtilization = 0.97;
 
-  explicit HeapController(Heap* heap) : MemoryController(heap, 1.1, 4.0, 1.3) {}
+  explicit HeapController(Heap* heap)
+      : MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
 
   size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
                                   double gc_speed, double mutator_speed,
@@ -57,9 +64,6 @@
                                   Heap::HeapGrowingMode growing_mode);
 
  protected:
-  double GrowingFactor(double gc_speed, double mutator_speed,
-                       double max_factor);
-
   double MaxGrowingFactor(size_t curr_max_size);
 
   const char* ControllerName() override { return "HeapController"; }
@@ -70,6 +74,26 @@
   FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
 };
 
+class V8_EXPORT_PRIVATE GlobalMemoryController : public MemoryController {
+ public:
+  // Sizes are in MB.
+  static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
+  static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+
+  explicit GlobalMemoryController(Heap* heap)
+      : MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
+
+  size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
+                                  double gc_speed, double mutator_speed,
+                                  size_t new_space_capacity,
+                                  Heap::HeapGrowingMode growing_mode);
+
+ protected:
+  double MaxGrowingFactor(size_t curr_max_size);
+
+  const char* ControllerName() override { return "GlobalMemoryController"; }
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 0a355332..71347d2 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -183,6 +183,7 @@
           Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
       memory_pressure_level_(MemoryPressureLevel::kNone),
       old_generation_allocation_limit_(initial_old_generation_size_),
+      global_allocation_limit_(initial_old_generation_size_),
       global_pretenuring_feedback_(kInitialFeedbackCapacity),
       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
       is_current_gc_forced_(false),
@@ -1526,9 +1527,12 @@
     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
-      StartIncrementalMarking(gc_flags,
-                              GarbageCollectionReason::kAllocationLimit,
-                              gc_callback_flags);
+      StartIncrementalMarking(
+          gc_flags,
+          OldGenerationSpaceAvailable() <= new_space_->Capacity()
+              ? GarbageCollectionReason::kAllocationLimit
+              : GarbageCollectionReason::kGlobalAllocationLimit,
+          gc_callback_flags);
     }
   }
 }
@@ -1929,6 +1933,24 @@
   double mutator_speed =
       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
   size_t old_gen_size = OldGenerationSizeOfObjects();
+
+  double global_mutator_speed;
+  double global_gc_speed;
+  size_t global_memory_size;
+  if (UseGlobalMemoryScheduling()) {
+    global_mutator_speed = GCTracer::CombineSpeedsInBytesPerMillisecond(
+        mutator_speed,
+        local_embedder_heap_tracer()
+            ? tracer()
+                  ->CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
+            : 0.0);
+    global_gc_speed = GCTracer::CombineSpeedsInBytesPerMillisecond(
+        gc_speed, local_embedder_heap_tracer()
+                      ? tracer()->EmbedderSpeedInBytesPerMillisecond()
+                      : 0.0);
+    global_memory_size = GlobalSizeOfObjects();
+  }
+
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
     isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
@@ -1941,7 +1963,13 @@
         heap_controller()->CalculateAllocationLimit(
             old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
             new_space()->Capacity(), CurrentHeapGrowingMode());
-
+    if (UseGlobalMemoryScheduling()) {
+      global_allocation_limit_ =
+          global_memory_controller()->CalculateAllocationLimit(
+              global_memory_size, max_global_memory_size_, global_gc_speed,
+              global_mutator_speed, new_space()->Capacity(),
+              CurrentHeapGrowingMode());
+    }
     CheckIneffectiveMarkCompact(
         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
   } else if (HasLowYoungGenerationAllocationRate() &&
@@ -1952,6 +1980,16 @@
     if (new_limit < old_generation_allocation_limit_) {
       old_generation_allocation_limit_ = new_limit;
     }
+    if (UseGlobalMemoryScheduling()) {
+      const size_t new_global_limit =
+          global_memory_controller()->CalculateAllocationLimit(
+              global_memory_size, max_global_memory_size_, global_gc_speed,
+              global_mutator_speed, new_space()->Capacity(),
+              CurrentHeapGrowingMode());
+      if (new_global_limit < global_allocation_limit_) {
+        global_allocation_limit_ = new_global_limit;
+      }
+    }
   }
 
   {
@@ -2608,18 +2646,29 @@
 
 void Heap::ConfigureInitialOldGenerationSize() {
   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
-    const size_t new_limit =
-        Max(OldGenerationSizeOfObjects() +
-                heap_controller()->MinimumAllocationLimitGrowingStep(
-                    CurrentHeapGrowingMode()),
+    const size_t minimum_growing_step =
+        MemoryController::MinimumAllocationLimitGrowingStep(
+            CurrentHeapGrowingMode());
+    const size_t new_old_generation_allocation_limit =
+        Max(OldGenerationSizeOfObjects() + minimum_growing_step,
             static_cast<size_t>(
                 static_cast<double>(old_generation_allocation_limit_) *
                 (tracer()->AverageSurvivalRatio() / 100)));
-    if (new_limit < old_generation_allocation_limit_) {
-      old_generation_allocation_limit_ = new_limit;
+    if (new_old_generation_allocation_limit <
+        old_generation_allocation_limit_) {
+      old_generation_allocation_limit_ = new_old_generation_allocation_limit;
     } else {
       old_generation_size_configured_ = true;
     }
+    if (UseGlobalMemoryScheduling()) {
+      const size_t new_global_memory_limit = Max(
+          GlobalSizeOfObjects() + minimum_growing_step,
+          static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
+                              (tracer()->AverageSurvivalRatio() / 100)));
+      if (new_global_memory_limit < global_allocation_limit_) {
+        global_allocation_limit_ = new_global_memory_limit;
+      }
+    }
   }
 }
 
@@ -3381,7 +3430,8 @@
   double idle_time_in_ms = deadline_in_ms - start_ms;
 
   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
-                             OldGenerationAllocationCounter());
+                             OldGenerationAllocationCounter(),
+                             EmbedderAllocationCounter());
 
   GCIdleTimeHeapState heap_state = ComputeHeapState();
 
@@ -3634,6 +3684,8 @@
       return "testing";
     case GarbageCollectionReason::kExternalFinalize:
       return "external finalize";
+    case GarbageCollectionReason::kGlobalAllocationLimit:
+      return "global allocation limit";
     case GarbageCollectionReason::kUnknown:
       return "unknown";
   }
@@ -4372,6 +4424,15 @@
   return total + lo_space_->SizeOfObjects();
 }
 
+size_t Heap::GlobalSizeOfObjects() {
+  const size_t on_heap_size = OldGenerationSizeOfObjects();
+  const size_t embedder_size =
+      local_embedder_heap_tracer()
+          ? local_embedder_heap_tracer()->allocated_size()
+          : 0;
+  return on_heap_size + embedder_size;
+}
+
 uint64_t Heap::PromotedExternalMemorySize() {
   IsolateData* isolate_data = isolate()->isolate_data();
   if (isolate_data->external_memory_ <=
@@ -4431,6 +4492,14 @@
   return Heap::HeapGrowingMode::kDefault;
 }
 
+size_t Heap::GlobalMemoryAvailable() {
+  return UseGlobalMemoryScheduling()
+             ? GlobalSizeOfObjects() < global_allocation_limit_
+                   ? global_allocation_limit_ - GlobalSizeOfObjects()
+                   : 0
+             : 1;
+}
+
 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
 // The kNoLimit means that either incremental marking is disabled or it is too
 // early to start incremental marking.
@@ -4491,8 +4560,10 @@
   }
 
   size_t old_generation_space_available = OldGenerationSpaceAvailable();
+  const size_t global_memory_available = GlobalMemoryAvailable();
 
-  if (old_generation_space_available > new_space_->Capacity()) {
+  if (old_generation_space_available > new_space_->Capacity() &&
+      (global_memory_available > 0)) {
     return IncrementalMarkingLimit::kNoLimit;
   }
   if (ShouldOptimizeForMemoryUsage()) {
@@ -4504,6 +4575,9 @@
   if (old_generation_space_available == 0) {
     return IncrementalMarkingLimit::kHardLimit;
   }
+  if (global_memory_available == 0) {
+    return IncrementalMarkingLimit::kHardLimit;
+  }
   return IncrementalMarkingLimit::kSoftLimit;
 }
 
@@ -4657,6 +4731,7 @@
   store_buffer_.reset(new StoreBuffer(this));
 
   heap_controller_.reset(new HeapController(this));
+  global_memory_controller_.reset(new GlobalMemoryController(this));
 
   mark_compact_collector_.reset(new MarkCompactCollector(this));
 
@@ -4934,6 +5009,7 @@
   }
 
   heap_controller_.reset();
+  global_memory_controller_.reset();
 
   if (mark_compact_collector_) {
     mark_compact_collector_->TearDown();
@@ -5781,6 +5857,12 @@
   UNREACHABLE();
 }
 
+size_t Heap::EmbedderAllocationCounter() const {
+  return local_embedder_heap_tracer()
+             ? local_embedder_heap_tracer()->accumulated_allocated_size()
+             : 0;
+}
+
 void Heap::CreateObjectStats() {
   if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
   if (!live_object_stats_) {
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 73c1d3f..98c6f8a 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -62,6 +62,7 @@
 class GCIdleTimeHandler;
 class GCIdleTimeHeapState;
 class GCTracer;
+class GlobalMemoryController;
 class HeapController;
 class HeapObjectAllocationTracker;
 class HeapObjectsFilter;
@@ -129,7 +130,8 @@
   kSamplingProfiler = 19,
   kSnapshotCreator = 20,
   kTesting = 21,
-  kExternalFinalize = 22
+  kExternalFinalize = 22,
+  kGlobalAllocationLimit = 23,
   // If you add new items here, then update the incremental_marking_reason,
   // mark_compact_reason, and scavenge_reason counters in counters.h.
   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
@@ -1147,6 +1149,8 @@
            PromotedSinceLastGC();
   }
 
+  size_t EmbedderAllocationCounter() const;
+
   // This should be used only for testing.
   void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
     old_generation_allocation_counter_at_last_gc_ = new_value;
@@ -1178,6 +1182,8 @@
   // Excludes external memory held by those objects.
   V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
 
+  V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+
   // ===========================================================================
   // Prologue/epilogue callback methods.========================================
   // ===========================================================================
@@ -1687,6 +1693,9 @@
   // ===========================================================================
 
   HeapController* heap_controller() { return heap_controller_.get(); }
+  GlobalMemoryController* global_memory_controller() const {
+    return global_memory_controller_.get();
+  }
   MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
 
   // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
@@ -1714,6 +1723,12 @@
   enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
   IncrementalMarkingLimit IncrementalMarkingLimitReached();
 
+  bool UseGlobalMemoryScheduling() const {
+    return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
+  }
+
+  size_t GlobalMemoryAvailable();
+
   // ===========================================================================
   // Idle notification. ========================================================
   // ===========================================================================
@@ -1807,6 +1822,11 @@
   size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
   size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
   size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
+  // TODO(mlippautz): Clarify whether this should be take some embedder
+  // configurable limit into account.
+  size_t max_global_memory_size_ =
+      Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
+          static_cast<uint64_t>(max_old_generation_size_) * 2);
   size_t initial_max_old_generation_size_;
   size_t initial_max_old_generation_size_threshold_;
   size_t initial_old_generation_size_;
@@ -1915,6 +1935,7 @@
   // which collector to invoke, before expanding a paged space in the old
   // generation and on every allocation in large object space.
   size_t old_generation_allocation_limit_;
+  size_t global_allocation_limit_;
 
   // Indicates that inline bump-pointer allocation has been globally disabled
   // for all spaces. This is used to disable allocations in generated code.
@@ -1965,6 +1986,7 @@
   std::unique_ptr<MemoryAllocator> memory_allocator_;
   std::unique_ptr<StoreBuffer> store_buffer_;
   std::unique_ptr<HeapController> heap_controller_;
+  std::unique_ptr<GlobalMemoryController> global_memory_controller_;
   std::unique_ptr<IncrementalMarking> incremental_marking_;
   std::unique_ptr<ConcurrentMarking> concurrent_marking_;
   std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
@@ -2064,6 +2086,7 @@
   friend class ConcurrentMarking;
   friend class GCCallbacksScope;
   friend class GCTracer;
+  friend class GlobalMemoryController;
   friend class HeapController;
   friend class MemoryController;
   friend class HeapIterator;
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index 6d0dfe5..bfb91b8 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -39,7 +39,8 @@
   Event event;
   double time_ms = heap->MonotonicallyIncreasingTimeInMs();
   heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
-                                   heap->OldGenerationAllocationCounter());
+                                   heap->OldGenerationAllocationCounter(),
+                                   heap->EmbedderAllocationCounter());
   bool low_allocation_rate = heap->HasLowAllocationRate();
   bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
   if (FLAG_trace_gc_verbose) {
diff --git a/src/logging/counters-definitions.h b/src/logging/counters-definitions.h
index bd3cb61..5ff754d 100644
--- a/src/logging/counters-definitions.h
+++ b/src/logging/counters-definitions.h
@@ -17,9 +17,9 @@
   HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)              \
   HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20)         \
   HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)                       \
-  HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22)     \
+  HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23)     \
   HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101)       \
-  HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22)                   \
+  HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23)                   \
   HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101)                  \
   HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101)            \
   HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101)            \
@@ -34,7 +34,7 @@
   /* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */          \
   HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0,   \
      100000, 50)                                                               \
-  HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22)                          \
+  HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23)                          \
   HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3)         \
   /* Asm/Wasm. */                                                              \
   HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
diff --git a/test/unittests/heap/gc-tracer-unittest.cc b/test/unittests/heap/gc-tracer-unittest.cc
index eeec787..3aaca42 100644
--- a/test/unittests/heap/gc-tracer-unittest.cc
+++ b/test/unittests/heap/gc-tracer-unittest.cc
@@ -53,11 +53,13 @@
 
 namespace {
 
+constexpr size_t kNoGlobalMemory = 0;
+
 void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
                            size_t new_space_counter_bytes,
                            size_t old_generation_counter_bytes) {
   tracer->SampleAllocation(time_ms, new_space_counter_bytes,
-                           old_generation_counter_bytes);
+                           old_generation_counter_bytes, kNoGlobalMemory);
   tracer->AddAllocation(time_ms);
 }
 
@@ -70,7 +72,7 @@
   int time1 = 100;
   size_t counter1 = 1000;
   // First sample creates baseline but is not part of the recorded samples.
-  tracer->SampleAllocation(time1, counter1, counter1);
+  tracer->SampleAllocation(time1, counter1, counter1, kNoGlobalMemory);
   SampleAndAddAllocaton(tracer, time1, counter1, counter1);
   int time2 = 200;
   size_t counter2 = 2000;