Remove memory peak detection code.

This CL removes dead code. It has no intended behavior change.

Change-Id: I5f5cabb2e162da82777a168df191c14fd0f9a295
Bug: 847876
TBR: dpranke@chromium.org
Reviewed-on: https://chromium-review.googlesource.com/1076506
Commit-Queue: Erik Chen <erikchen@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Siddhartha S <ssid@chromium.org>
Cr-Commit-Position: refs/heads/master@{#562836}
diff --git a/base/BUILD.gn b/base/BUILD.gn
index a3da63e..917434d 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -975,8 +975,6 @@
     "trace_event/memory_dump_scheduler.h",
     "trace_event/memory_infra_background_whitelist.cc",
     "trace_event/memory_infra_background_whitelist.h",
-    "trace_event/memory_peak_detector.cc",
-    "trace_event/memory_peak_detector.h",
     "trace_event/memory_usage_estimator.cc",
     "trace_event/memory_usage_estimator.h",
     "trace_event/process_memory_dump.cc",
@@ -2408,7 +2406,6 @@
     "trace_event/memory_allocator_dump_unittest.cc",
     "trace_event/memory_dump_manager_unittest.cc",
     "trace_event/memory_dump_scheduler_unittest.cc",
-    "trace_event/memory_peak_detector_unittest.cc",
     "trace_event/memory_usage_estimator_unittest.cc",
     "trace_event/process_memory_dump_unittest.cc",
     "trace_event/trace_category_unittest.cc",
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index f2ac75fc..d61528a 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -30,7 +30,6 @@
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/memory_dump_scheduler.h"
 #include "base/trace_event/memory_infra_background_whitelist.h"
-#include "base/trace_event/memory_peak_detector.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_argument.h"
@@ -52,9 +51,9 @@
 
 MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
 
-// Temporary (until peak detector and scheduler are moved outside of here)
+// Temporary (until scheduler is moved outside of here)
 // trampoline function to match the |request_dump_function| passed to Initialize
-// to the callback expected by MemoryPeakDetector and MemoryDumpScheduler.
+// to the callback expected by MemoryDumpScheduler.
 // TODO(primiano): remove this.
 void DoGlobalDumpWithoutCallback(
     MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
@@ -194,11 +193,6 @@
       new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
                                  whitelisted_for_background_mode);
 
-  if (options.is_fast_polling_supported) {
-    DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
-                                     "polling must NOT be thread bound.";
-  }
-
   {
     AutoLock lock(lock_);
     bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -206,9 +200,6 @@
     // path for RenderThreadImpl::Init().
     if (already_registered)
       return;
-
-    if (options.is_fast_polling_supported)
-      MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
   }
 }
 
@@ -244,7 +235,6 @@
     // - At the end of this function, if no dump is in progress.
     // - In ContinueAsyncProcessDump() when MDPInfo is removed from
     //   |pending_dump_providers|.
-    // - When the provider is removed from other clients (MemoryPeakDetector).
     DCHECK(!(*mdp_iter)->owned_dump_provider);
     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
   } else {
@@ -263,11 +253,6 @@
         << "unregister itself in a racy way. Please file a crbug.";
   }
 
-  if ((*mdp_iter)->options.is_fast_polling_supported) {
-    DCHECK(take_mdp_ownership_and_delete_async);
-    MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
-  }
-
   // The MDPInfo instance can still be referenced by the
   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -277,16 +262,6 @@
   dump_providers_.erase(mdp_iter);
 }
 
-void MemoryDumpManager::GetDumpProvidersForPolling(
-    std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) {
-  DCHECK(providers->empty());
-  AutoLock lock(lock_);
-  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) {
-    if (mdp->options.is_fast_polling_supported)
-      providers->push_back(mdp);
-  }
-}
-
 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
     MemoryDumpProvider* provider) {
   AutoLock lock(lock_);
@@ -337,9 +312,6 @@
 
     pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
         args, dump_providers_, callback, GetOrCreateBgTaskRunnerLocked()));
-
-    // If enabled, holds back the peak detector resetting its estimation window.
-    MemoryPeakDetector::GetInstance()->Throttle();
   }
 
   // Start the process dump. This involves task runner hops as specified by the
@@ -523,7 +495,6 @@
   DCHECK(can_request_global_dumps());
 
   MemoryDumpScheduler::Config periodic_config;
-  bool peak_detector_configured = false;
   for (const auto& trigger : memory_dump_config.triggers) {
     if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
       if (periodic_config.triggers.empty()) {
@@ -533,34 +504,6 @@
       }
       periodic_config.triggers.push_back(
           {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
-    } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
-      // At most one peak trigger is allowed.
-      CHECK(!peak_detector_configured);
-      peak_detector_configured = true;
-      MemoryPeakDetector::GetInstance()->Setup(
-          BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
-                        Unretained(this)),
-          GetOrCreateBgTaskRunnerLocked(),
-          BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
-                        MemoryDumpType::PEAK_MEMORY_USAGE,
-                        trigger.level_of_detail));
-
-      MemoryPeakDetector::Config peak_config;
-      peak_config.polling_interval_ms = 10;
-      peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
-      peak_config.enable_verbose_poll_tracing =
-          trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
-      MemoryPeakDetector::GetInstance()->Start(peak_config);
-
-      // When peak detection is enabled, trigger a dump straight away as it
-      // gives a good reference point for analyzing the trace.
-      if (is_coordinator_) {
-        GetOrCreateBgTaskRunnerLocked()->PostTask(
-            FROM_HERE,
-            BindOnce(&DoGlobalDumpWithoutCallback, request_dump_function_,
-                     MemoryDumpType::PEAK_MEMORY_USAGE,
-                     trigger.level_of_detail));
-      }
     }
   }
 
@@ -578,7 +521,6 @@
   AutoLock lock(lock_);
 
   MemoryDumpScheduler::GetInstance()->Stop();
-  MemoryPeakDetector::GetInstance()->TearDown();
 }
 
 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 1393bee..6033cfb 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -62,8 +62,7 @@
   //  request_dump_function: Function to invoke a global dump. Global dump
   //      involves embedder-specific behaviors like multiprocess handshaking.
   //      TODO(primiano): this is only required to trigger global dumps from
-  //      the scheduler and the peak detector. Should be removed once they are
-  //      both moved out of base.
+  //      the scheduler. Should be removed once they are both moved out of base.
   void Initialize(RequestGlobalDumpFunction request_dump_function,
                   bool is_coordinator);
 
@@ -99,14 +98,13 @@
   // This method takes ownership of the dump provider and guarantees that:
   //  - The |mdp| will be deleted at some point in the near future.
   //  - Its deletion will not happen concurrently with the OnMemoryDump() call.
-  // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
-  // after this method returns.
+  // Note that OnMemoryDump() calls can still happen after this method returns.
   void UnregisterAndDeleteDumpProviderSoon(
       std::unique_ptr<MemoryDumpProvider> mdp);
 
   // Prepare MemoryDumpManager for CreateProcessDump() calls for tracing-related
   // modes (i.e. |level_of_detail| != SUMMARY_ONLY).
-  // Also initializes the peak detector and scheduler with the given config.
+  // Also initializes the scheduler with the given config.
   void SetupForTracing(const TraceConfig::MemoryDumpConfig&);
 
   // Tear-down tracing related state.
@@ -231,11 +229,6 @@
   void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
                                       bool take_mdp_ownership_and_delete_async);
 
-  // Fills the passed vector with the subset of dump providers which were
-  // registered with is_fast_polling_supported == true.
-  void GetDumpProvidersForPolling(
-      std::vector<scoped_refptr<MemoryDumpProviderInfo>>*);
-
   bool can_request_global_dumps() const {
     return !request_dump_function_.is_null();
   }
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 428c80e..706df2d 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -108,8 +108,6 @@
   MOCK_METHOD0(Destructor, void());
   MOCK_METHOD2(OnMemoryDump,
                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
-  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
-  MOCK_METHOD0(SuspendFastMemoryPolling, void());
 
   MockMemoryDumpProvider() : enable_mock_destructor(false) {
     ON_CALL(*this, OnMemoryDump(_, _))
@@ -117,10 +115,6 @@
             Invoke([](const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
               return true;
             }));
-
-    ON_CALL(*this, PollFastMemoryTotal(_))
-        .WillByDefault(
-            Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
   }
   ~MockMemoryDumpProvider() override {
     if (enable_mock_destructor)
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index f3c766e5..f55e2cf 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -20,19 +20,12 @@
  public:
   // Optional arguments for MemoryDumpManager::RegisterDumpProvider().
   struct Options {
-    Options()
-        : dumps_on_single_thread_task_runner(false),
-          is_fast_polling_supported(false) {}
+    Options() : dumps_on_single_thread_task_runner(false) {}
 
     // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
     // a SingleThreadTaskRunner, which is usually the case. It is faster to run
     // all providers that run on the same thread together without thread hops.
     bool dumps_on_single_thread_task_runner;
-
-    // Set to true if the dump provider implementation supports high frequency
-    // polling. Only providers running without task runner affinity are
-    // supported.
-    bool is_fast_polling_supported;
   };
 
   virtual ~MemoryDumpProvider() = default;
@@ -47,18 +40,6 @@
   virtual bool OnMemoryDump(const MemoryDumpArgs& args,
                             ProcessMemoryDump* pmd) = 0;
 
-  // Quickly record the total memory usage in |memory_total|. This method will
-  // be called only when the dump provider registration has
-  // |is_fast_polling_supported| set to true. This method is used for polling at
-  // high frequency for detecting peaks. See comment on
-  // |is_fast_polling_supported| option if you need to override this method.
-  virtual void PollFastMemoryTotal(uint64_t* memory_total) {}
-
-  // Indicates that fast memory polling is not going to be used in the near
-  // future and the MDP can tear down any resource kept around for fast memory
-  // polling.
-  virtual void SuspendFastMemoryPolling() {}
-
  protected:
   MemoryDumpProvider() = default;
 
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index 3cb9cabae..8be3c32 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -16,8 +16,6 @@
       return "periodic_interval";
     case MemoryDumpType::EXPLICITLY_TRIGGERED:
       return "explicitly_triggered";
-    case MemoryDumpType::PEAK_MEMORY_USAGE:
-      return "peak_memory_usage";
     case MemoryDumpType::SUMMARY_ONLY:
       return "summary_only";
   }
@@ -30,8 +28,6 @@
     return MemoryDumpType::PERIODIC_INTERVAL;
   if (str == "explicitly_triggered")
     return MemoryDumpType::EXPLICITLY_TRIGGERED;
-  if (str == "peak_memory_usage")
-    return MemoryDumpType::PEAK_MEMORY_USAGE;
   if (str == "summary_only")
     return MemoryDumpType::SUMMARY_ONLY;
   NOTREACHED();
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 41bc99bc..f854a4b 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -30,7 +30,6 @@
 enum class MemoryDumpType {
   PERIODIC_INTERVAL,     // Dumping memory at periodic intervals.
   EXPLICITLY_TRIGGERED,  // Non maskable dump request.
-  PEAK_MEMORY_USAGE,     // Dumping memory at detected peak total memory usage.
   SUMMARY_ONLY,          // Calculate just the summary & don't add to the trace.
   LAST = SUMMARY_ONLY
 };
diff --git a/base/trace_event/memory_peak_detector.cc b/base/trace_event/memory_peak_detector.cc
deleted file mode 100644
index 54195940..0000000
--- a/base/trace_event/memory_peak_detector.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/sys_info.h"
-#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/time/time.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
-  static MemoryPeakDetector* instance = new MemoryPeakDetector();
-  return instance;
-}
-
-MemoryPeakDetector::MemoryPeakDetector()
-    : generation_(0),
-      state_(NOT_INITIALIZED),
-      poll_tasks_count_for_testing_(0) {}
-
-MemoryPeakDetector::~MemoryPeakDetector() {
-  // This is hit only in tests, in which case the test is expected to TearDown()
-  // cleanly and not leave the peak detector running.
-  DCHECK_EQ(NOT_INITIALIZED, state_);
-}
-
-void MemoryPeakDetector::Setup(
-    const GetDumpProvidersFunction& get_dump_providers_function,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
-    const OnPeakDetectedCallback& on_peak_detected_callback) {
-  DCHECK(!get_dump_providers_function.is_null());
-  DCHECK(task_runner);
-  DCHECK(!on_peak_detected_callback.is_null());
-  DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
-  DCHECK(dump_providers_.empty());
-  get_dump_providers_function_ = get_dump_providers_function;
-  task_runner_ = task_runner;
-  on_peak_detected_callback_ = on_peak_detected_callback;
-  state_ = DISABLED;
-  config_ = {};
-  ResetPollHistory();
-
-  static_threshold_bytes_ = 0;
-#if !defined(OS_NACL)
-  // Set threshold to 1% of total system memory.
-  static_threshold_bytes_ =
-      static_cast<uint64_t>(SysInfo::AmountOfPhysicalMemory()) / 100;
-#endif
-  // Fallback, mostly for test environments where AmountOfPhysicalMemory() is
-  // broken.
-  static_threshold_bytes_ =
-      std::max(static_threshold_bytes_, static_cast<uint64_t>(5 * 1024 * 1024));
-}
-
-void MemoryPeakDetector::TearDown() {
-  if (task_runner_) {
-    task_runner_->PostTask(
-        FROM_HERE,
-        BindOnce(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
-  }
-  task_runner_ = nullptr;
-}
-
-void MemoryPeakDetector::Start(MemoryPeakDetector::Config config) {
-  if (!config.polling_interval_ms) {
-    NOTREACHED();
-    return;
-  }
-  task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryPeakDetector::StartInternal,
-                                             Unretained(this), config));
-}
-
-void MemoryPeakDetector::Stop() {
-  task_runner_->PostTask(
-      FROM_HERE, BindOnce(&MemoryPeakDetector::StopInternal, Unretained(this)));
-}
-
-void MemoryPeakDetector::Throttle() {
-  if (!task_runner_)
-    return;  // Can be called before Setup().
-  task_runner_->PostTask(
-      FROM_HERE, BindOnce(&MemoryPeakDetector::ResetPollHistory,
-                          Unretained(this), true /* keep_last_sample */));
-}
-
-void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
-  if (!task_runner_)
-    return;  // Can be called before Setup().
-  task_runner_->PostTask(
-      FROM_HERE,
-      BindOnce(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
-               Unretained(this)));
-}
-
-void MemoryPeakDetector::StartInternal(MemoryPeakDetector::Config config) {
-  DCHECK_EQ(DISABLED, state_);
-  state_ = ENABLED;
-  config_ = config;
-  ResetPollHistory();
-
-  // If there are any dump providers available,
-  // NotifyMemoryDumpProvidersChanged will fetch them and start the polling.
-  // Otherwise this will remain in the ENABLED state and the actual polling
-  // will start on the next call to
-  // ReloadDumpProvidersAndStartPollingIfNeeded().
-  // Depending on the sandbox model, it is possible that no polling-capable
-  // dump providers will be ever available.
-  ReloadDumpProvidersAndStartPollingIfNeeded();
-}
-
-void MemoryPeakDetector::StopInternal() {
-  DCHECK_NE(NOT_INITIALIZED, state_);
-  state_ = DISABLED;
-  ++generation_;
-  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info : dump_providers_)
-    mdp_info->dump_provider->SuspendFastMemoryPolling();
-  dump_providers_.clear();
-}
-
-void MemoryPeakDetector::TearDownInternal() {
-  StopInternal();
-  get_dump_providers_function_.Reset();
-  on_peak_detected_callback_.Reset();
-  state_ = NOT_INITIALIZED;
-}
-
-void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
-  if (state_ == DISABLED || state_ == NOT_INITIALIZED)
-    return;  // Start() will re-fetch the MDP list later.
-
-  DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
-         (state_ == ENABLED && dump_providers_.empty()));
-
-  dump_providers_.clear();
-
-  // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
-  get_dump_providers_function_.Run(&dump_providers_);
-
-  if (state_ == ENABLED && !dump_providers_.empty()) {
-    // It's now time to start polling for realz.
-    state_ = RUNNING;
-    task_runner_->PostTask(
-        FROM_HERE, BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak,
-                            Unretained(this), ++generation_));
-  } else if (state_ == RUNNING && dump_providers_.empty()) {
-    // Will cause the next PollMemoryAndDetectPeak() task to early return.
-    state_ = ENABLED;
-    ++generation_;
-  }
-}
-
-void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
-  if (state_ != RUNNING || generation_ != expected_generation)
-    return;
-
-  // We should never end up in a situation where state_ == RUNNING but all dump
-  // providers are gone.
-  DCHECK(!dump_providers_.empty());
-
-  poll_tasks_count_for_testing_++;
-  uint64_t polled_mem_bytes = 0;
-  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
-       dump_providers_) {
-    DCHECK(mdp_info->options.is_fast_polling_supported);
-    uint64_t value = 0;
-    mdp_info->dump_provider->PollFastMemoryTotal(&value);
-    polled_mem_bytes += value;
-  }
-  if (config_.enable_verbose_poll_tracing) {
-    TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
-                   polled_mem_bytes / 1024 / 1024);
-  }
-
-  // Peak detection logic. Design doc: https://goo.gl/0kOU4A .
-  bool is_peak = false;
-  if (skip_polls_ > 0) {
-    skip_polls_--;
-  } else if (last_dump_memory_total_ == 0) {
-    last_dump_memory_total_ = polled_mem_bytes;
-  } else if (polled_mem_bytes > 0) {
-    int64_t diff_from_last_dump = polled_mem_bytes - last_dump_memory_total_;
-
-    DCHECK_GT(static_threshold_bytes_, 0u);
-    is_peak =
-        diff_from_last_dump > static_cast<int64_t>(static_threshold_bytes_);
-
-    if (!is_peak)
-      is_peak = DetectPeakUsingSlidingWindowStddev(polled_mem_bytes);
-  }
-
-  DCHECK_GT(config_.polling_interval_ms, 0u);
-  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE,
-      BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
-               expected_generation),
-      TimeDelta::FromMilliseconds(config_.polling_interval_ms));
-
-  if (!is_peak)
-    return;
-  TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
-                       "Peak memory detected", TRACE_EVENT_SCOPE_PROCESS,
-                       "PolledMemoryMB", polled_mem_bytes / 1024 / 1024);
-  ResetPollHistory(true /* keep_last_sample */);
-  last_dump_memory_total_ = polled_mem_bytes;
-  on_peak_detected_callback_.Run();
-}
-
-bool MemoryPeakDetector::DetectPeakUsingSlidingWindowStddev(
-    uint64_t polled_mem_bytes) {
-  DCHECK(polled_mem_bytes);
-  samples_bytes_[samples_index_] = polled_mem_bytes;
-  samples_index_ = (samples_index_ + 1) % kSlidingWindowNumSamples;
-  float mean = 0;
-  for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
-    if (samples_bytes_[i] == 0)
-      return false;  // Not enough samples to detect peaks.
-    mean += samples_bytes_[i];
-  }
-  mean /= kSlidingWindowNumSamples;
-  float variance = 0;
-  for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
-    const float deviation = samples_bytes_[i] - mean;
-    variance += deviation * deviation;
-  }
-  variance /= kSlidingWindowNumSamples;
-
-  // If stddev is less than 0.2% then we consider that the process is inactive.
-  if (variance < (mean / 500) * (mean / 500))
-    return false;
-
-  // (mean + 3.69 * stddev) corresponds to a value that is higher than current
-  // sample with 99.99% probability.
-  const float cur_sample_deviation = polled_mem_bytes - mean;
-  return cur_sample_deviation * cur_sample_deviation > (3.69 * 3.69 * variance);
-}
-
-void MemoryPeakDetector::ResetPollHistory(bool keep_last_sample) {
-  // TODO(primiano,ssid): this logic should probably be revisited. In the case
-  // of Android, the browser process sees the total of all processes memory in
-  // the same peak detector instance. Perhaps the best thing to do here is to
-  // keep the window of samples around and just bump the skip_polls_.
-  last_dump_memory_total_ = 0;
-  if (keep_last_sample) {
-    const uint32_t prev_index =
-        samples_index_ > 0 ? samples_index_ - 1 : kSlidingWindowNumSamples - 1;
-    last_dump_memory_total_ = samples_bytes_[prev_index];
-  }
-  memset(samples_bytes_, 0, sizeof(samples_bytes_));
-  samples_index_ = 0;
-  skip_polls_ = 0;
-  if (config_.polling_interval_ms > 0) {
-    skip_polls_ =
-        (config_.min_time_between_peaks_ms + config_.polling_interval_ms - 1) /
-        config_.polling_interval_ms;
-  }
-}
-
-void MemoryPeakDetector::SetStaticThresholdForTesting(
-    uint64_t static_threshold_bytes) {
-  DCHECK_EQ(DISABLED, state_);
-  static_threshold_bytes_ = static_threshold_bytes;
-}
-
-MemoryPeakDetector::MemoryPeakDetector::Config::Config()
-    : Config(0, 0, false) {}
-
-MemoryPeakDetector::MemoryPeakDetector::Config::Config(
-    uint32_t polling_interval_ms,
-    uint32_t min_time_between_peaks_ms,
-    bool enable_verbose_poll_tracing)
-    : polling_interval_ms(polling_interval_ms),
-      min_time_between_peaks_ms(min_time_between_peaks_ms),
-      enable_verbose_poll_tracing(enable_verbose_poll_tracing) {}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_peak_detector.h b/base/trace_event/memory_peak_detector.h
deleted file mode 100644
index bbe205ba..0000000
--- a/base/trace_event/memory_peak_detector.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-
-namespace base {
-
-class SequencedTaskRunner;
-
-namespace trace_event {
-
-struct MemoryDumpProviderInfo;
-
-// Detects temporally local memory peaks. Peak detection is based on
-// continuously querying memory usage using MemoryDumpprovider(s) that support
-// fast polling (e.g., ProcessMetricsDumpProvider which under the hoods reads
-// /proc/PID/statm on Linux) and using a combination of:
-// - An static threshold (currently 1% of total system memory).
-// - Sliding window stddev analysis.
-// Design doc: https://goo.gl/0kOU4A .
-// This class is NOT thread-safe, the caller has to ensure linearization of
-// the calls to the public methods. In any case, the public methods do NOT have
-// to be called from the |task_runner| on which the polling tasks run.
-class BASE_EXPORT MemoryPeakDetector {
- public:
-  using OnPeakDetectedCallback = RepeatingClosure;
-  using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
-  using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
-
-  enum State {
-    NOT_INITIALIZED = 0,  // Before Setup()
-    DISABLED,             // Before Start() or after Stop().
-    ENABLED,              // After Start() but no dump_providers_ are available.
-    RUNNING  // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
-  };
-
-  // Peak detector configuration, passed to Start().
-  struct BASE_EXPORT Config {
-    Config();
-    Config(uint32_t polling_interval_ms,
-           uint32_t min_time_between_peaks_ms,
-           bool enable_verbose_poll_tracing);
-
-    // The rate at which memory will be polled. Polls will happen on the task
-    // runner passed to Setup().
-    uint32_t polling_interval_ms;
-
-    // Two consecutive peak detection callbacks will happen at least
-    // |min_time_between_peaks_ms| apart from each other.
-    uint32_t min_time_between_peaks_ms;
-
-    // When enabled causes a TRACE_COUNTER event to be injected in the trace
-    // for each poll (if tracing is enabled).
-    bool enable_verbose_poll_tracing;
-  };
-
-  static MemoryPeakDetector* GetInstance();
-
-  // Configures the peak detector, binding the polling tasks on the given
-  // thread. Setup() can be called several times, provided that: (1) Stop()
-  // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
-  // remains the same.
-  // GetDumpProvidersFunction: is the function that will be invoked to get
-  //   an updated list of polling-capable dump providers. This is really just
-  //   MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
-  //   indirection allows easier testing.
-  // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
-  //  be periodically called.
-  // OnPeakDetectedCallback: a callback that will be invoked on the
-  //   given task runner when a memory peak is detected.
-  void Setup(const GetDumpProvidersFunction&,
-             const scoped_refptr<SequencedTaskRunner>&,
-             const OnPeakDetectedCallback&);
-
-  // Releases the |task_runner_| and the bound callbacks.
-  void TearDown();
-
-  // This posts a task onto the passed task runner which refreshes the list of
-  // dump providers via the GetDumpProvidersFunction. If at least one dump
-  // provider is available, this starts immediately polling on the task runner.
-  // If not, the detector remains in the ENABLED state and will start polling
-  // automatically (i.e. without requiring another call to Start()) on the
-  // next call to NotifyMemoryDumpProvidersChanged().
-  void Start(Config);
-
-  // Stops the polling on the task runner (if was active at all). This doesn't
-  // wait for the task runner to drain pending tasks, so it is possible that
-  // a polling will happen concurrently (or in the immediate future) with the
-  // Stop() call. It is responsibility of the caller to drain or synchronize
-  // with the task runner.
-  void Stop();
-
-  // If Start()-ed, prevents that a peak callback is triggered before the next
-  // |min_time_between_peaks_ms|. No-op if the peak detector is not enabled.
-  void Throttle();
-
-  // Used by MemoryDumpManager to notify that the list of polling-capable dump
-  // providers has changed. The peak detector will reload the list on the next
-  // polling task. This function can be called before Setup(), in which
-  // case will be just a no-op.
-  void NotifyMemoryDumpProvidersChanged();
-
-  void SetStaticThresholdForTesting(uint64_t static_threshold_bytes);
-
- private:
-  friend class MemoryPeakDetectorTest;
-
-  static constexpr uint32_t kSlidingWindowNumSamples = 50;
-
-  MemoryPeakDetector();
-  ~MemoryPeakDetector();
-
-  // All these methods are always called on the |task_runner_|.
-  void StartInternal(Config);
-  void StopInternal();
-  void TearDownInternal();
-  void ReloadDumpProvidersAndStartPollingIfNeeded();
-  void PollMemoryAndDetectPeak(uint32_t expected_generation);
-  bool DetectPeakUsingSlidingWindowStddev(uint64_t last_sample_bytes);
-  void ResetPollHistory(bool keep_last_sample = false);
-
-  // It is safe to call these testing methods only on the |task_runner_|.
-  State state_for_testing() const { return state_; }
-  uint32_t poll_tasks_count_for_testing() const {
-    return poll_tasks_count_for_testing_;
-  }
-
-  // The task runner where all the internal calls are posted onto. This field
-  // must be NOT be accessed by the tasks posted on the |task_runner_| because
-  // there might still be outstanding tasks on the |task_runner_| while this
-  // refptr is reset. This can only be safely accessed by the public methods
-  // above, which the client of this class is supposed to call sequentially.
-  scoped_refptr<SequencedTaskRunner> task_runner_;
-
-  // After the Setup() call, the fields below, must be accessed only from
-  // the |task_runner_|.
-
-  // Bound function to get an updated list of polling-capable dump providers.
-  GetDumpProvidersFunction get_dump_providers_function_;
-
-  // The callback to invoke when peaks are detected.
-  OnPeakDetectedCallback on_peak_detected_callback_;
-
-  // List of polling-aware dump providers to invoke upon each poll.
-  DumpProvidersList dump_providers_;
-
-  // The generation is incremented every time the |state_| is changed and causes
-  // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
-  // most recent |generation_|. This allows to drop on the floor outstanding
-  // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
-  // disabled because of NotifyMemoryDumpProvidersChanged().
-  uint32_t generation_;
-
-  State state_;
-
-  // Config passed to Start(), only valid when |state_| = {ENABLED, RUNNING}.
-  Config config_;
-
-  uint64_t static_threshold_bytes_;
-  uint32_t skip_polls_;
-  uint64_t last_dump_memory_total_;
-  uint64_t samples_bytes_[kSlidingWindowNumSamples];
-  uint32_t samples_index_;
-  uint32_t poll_tasks_count_for_testing_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/base/trace_event/memory_peak_detector_unittest.cc b/base/trace_event/memory_peak_detector_unittest.cc
deleted file mode 100644
index bc10c80..0000000
--- a/base/trace_event/memory_peak_detector_unittest.cc
+++ /dev/null
@@ -1,564 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::Invoke;
-using ::testing::Return;
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-const TimeDelta kMs = TimeDelta::FromMilliseconds(1);
-const MemoryPeakDetector::Config kConfigNoCallbacks(
-    1 /* polling_interval_ms */,
-    60000 /* min_time_between_peaks_ms */,
-    false /* enable_verbose_poll_tracing */
-    );
-
-class MockMemoryDumpProvider : public MemoryDumpProvider {
- public:
-  bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
-    NOTREACHED();
-    return true;
-  }
-
-  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
-};
-
-// Wrapper to use gmock on a callback.
-struct OnPeakDetectedWrapper {
-  MOCK_METHOD0(OnPeak, void());
-};
-
-}  // namespace
-
-class MemoryPeakDetectorTest : public testing::Test {
- public:
-  struct FriendDeleter {
-    void operator()(MemoryPeakDetector* inst) { delete inst; }
-  };
-
-  MemoryPeakDetectorTest() : testing::Test() {}
-  static const uint64_t kSlidingWindowNumSamples =
-      MemoryPeakDetector::kSlidingWindowNumSamples;
-
-  std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
-    return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
-        new MemoryPeakDetector());
-  }
-
-  void RestartThreadAndReinitializePeakDetector() {
-    bg_thread_.reset(new Thread("Peak Detector Test Thread"));
-    bg_thread_->Start();
-    peak_detector_ = NewInstance();
-    peak_detector_->Setup(
-        Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
-        bg_thread_->task_runner(),
-        Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
-  }
-
-  void SetUp() override {
-    get_mdp_call_count_ = 0;
-    RestartThreadAndReinitializePeakDetector();
-  }
-
-  void TearDown() override {
-    peak_detector_->TearDown();
-    bg_thread_->FlushForTesting();
-    EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
-    bg_thread_.reset();
-    dump_providers_.clear();
-  }
-
-  // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
-  // the result on the current thread.
-  MemoryPeakDetector::State GetPeakDetectorState() {
-    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                      WaitableEvent::InitialState::NOT_SIGNALED);
-    MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
-    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
-                     MemoryPeakDetector::State* res) {
-      *res = peak_detector->state_for_testing();
-      evt->Signal();
-    };
-    bg_thread_->task_runner()->PostTask(
-        FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
-                            Unretained(&evt), Unretained(&res)));
-    evt.Wait();
-    return res;
-  }
-
-  // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
-  // and returns the result on the current thread.
-  uint32_t GetNumPollingTasksRan() {
-    uint32_t res = 0;
-    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
-                     uint32_t* res) {
-      *res = peak_detector->poll_tasks_count_for_testing();
-      evt->Signal();
-    };
-
-    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                      WaitableEvent::InitialState::NOT_SIGNALED);
-    bg_thread_->task_runner()->PostTask(
-        FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
-                            Unretained(&evt), Unretained(&res)));
-    evt.Wait();
-    return res;
-  }
-
-  // Runs the peak detector with a mock MDP with the given
-  // |config|. The mock MDP will invoke the |poll_function| on any call to
-  // PollFastMemoryTotal(), until |num_samples| have been polled.
-  // It returns the number of peaks detected.
-  uint32_t RunWithCustomPollFunction(
-      MemoryPeakDetector::Config config,
-      uint32_t num_samples,
-      RepeatingCallback<uint64_t(uint32_t)> poll_function) {
-    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                      WaitableEvent::InitialState::NOT_SIGNALED);
-    scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-    dump_providers_.push_back(mdp);
-    uint32_t cur_sample_idx = 0;
-    EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-        .WillRepeatedly(Invoke(
-            [&cur_sample_idx, &evt, poll_function, num_samples](uint64_t* mem) {
-              if (cur_sample_idx >= num_samples) {
-                *mem = 1;
-                evt.Signal();
-              } else {
-                *mem = poll_function.Run(cur_sample_idx++);
-              }
-            }));
-
-    uint32_t num_peaks = 0;
-    EXPECT_CALL(on_peak_callback_, OnPeak())
-        .WillRepeatedly(Invoke([&num_peaks] { num_peaks++; }));
-    peak_detector_->Start(config);
-    evt.Wait();  // Wait for |num_samples| invocations of PollFastMemoryTotal().
-    peak_detector_->Stop();
-    EXPECT_EQ(num_samples, cur_sample_idx);
-    EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-    return num_peaks;
-  }
-
-  // Called on the |bg_thread_|.
-  void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
-    get_mdp_call_count_++;
-    *mdps = dump_providers_;
-  }
-
-  uint32_t GetNumGetDumpProvidersCalls() {
-    bg_thread_->FlushForTesting();
-    return get_mdp_call_count_;
-  }
-
-  scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
-    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
-    MemoryDumpProvider::Options opt;
-    opt.is_fast_polling_supported = true;
-    scoped_refptr<MemoryDumpProviderInfo> mdp_info(new MemoryDumpProviderInfo(
-        mdp.get(), "Mock MDP", nullptr, opt,
-        false /* whitelisted_for_background_mode */));
-
-    // The |mdp| instance will be destroyed together with the |mdp_info|.
-    mdp_info->owned_dump_provider = std::move(mdp);
-    return mdp_info;
-  }
-
-  static MockMemoryDumpProvider& GetMockMDP(
-      const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
-    return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
-  }
-
-  static uint64_t PollFunctionThatCausesPeakViaStdDev(uint32_t sample_idx) {
-    // Start with a baseline of 50 MB.
-    if (sample_idx < kSlidingWindowNumSamples)
-      return 50000 + (sample_idx % 3) * 100;
-
-    // Then 10 samples around 80 MB
-    if (sample_idx < 10 + kSlidingWindowNumSamples)
-      return 80000 + (sample_idx % 3) * 200;
-
-    // Than back to 60 MB.
-    if (sample_idx < 2 * kSlidingWindowNumSamples)
-      return 60000 + (sample_idx % 3) * 100;
-
-    // Then 20 samples around 120 MB.
-    if (sample_idx < 20 + 2 * kSlidingWindowNumSamples)
-      return 120000 + (sample_idx % 3) * 200;
-
-    // Then back to idle to around 50 MB until the end.
-    return 50000 + (sample_idx % 3) * 100;
-  }
-
- protected:
-  MemoryPeakDetector::DumpProvidersList dump_providers_;
-  uint32_t get_mdp_call_count_;
-  std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
-  std::unique_ptr<Thread> bg_thread_;
-  OnPeakDetectedWrapper on_peak_callback_;
-};
-
-const uint64_t MemoryPeakDetectorTest::kSlidingWindowNumSamples;
-
-TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, ThrottleAndNotifyBeforeInitialize) {
-  peak_detector_->TearDown();
-
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-  peak_detector_->Throttle();
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
-  RestartThreadAndReinitializePeakDetector();
-
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  evt.Wait();  // Wait for a PollFastMemoryTotal() call.
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_GE(GetNumPollingTasksRan(), 1u);
-}
-
-TEST_F(MemoryPeakDetectorTest, DoubleStop) {
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-
-  peak_detector_->Start(kConfigNoCallbacks);
-  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_GT(GetNumPollingTasksRan(), 0u);
-}
-
-TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-
-  for (int i = 0; i < 5; ++i) {
-    evt.Reset();
-    peak_detector_->Start(kConfigNoCallbacks);
-    evt.Wait();  // Wait for a PollFastMemoryTotal() call.
-    // Check that calling TearDown implicitly does a Stop().
-    peak_detector_->TearDown();
-
-    // Reinitialize and re-bind to a new task runner.
-    RestartThreadAndReinitializePeakDetector();
-  }
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-
-  // Check that no poll tasks are posted before any dump provider is registered.
-  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-
-  // Registed the MDP After Start() has been issued and expect that the
-  // PeakDetector transitions ENABLED -> RUNNING on the next
-  // NotifyMemoryDumpProvidersChanged() call.
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-
-  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
-
-  // Now simulate the unregisration and expect that the PeakDetector transitions
-  // back to ENABLED.
-  dump_providers_.clear();
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
-  uint32_t num_poll_tasks = GetNumPollingTasksRan();
-  EXPECT_GT(num_poll_tasks, 0u);
-
-  // At this point, no more polling tasks should be posted.
-  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-}
-
-// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
-// several outstanding timer tasks and instead respects the polling_interval_ms.
-TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  dump_providers_.push_back(mdp);
-  const uint32_t kNumPolls = 20;
-  uint32_t polls_done = 0;
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
-        if (++polls_done == kNumPolls)
-          evt.Signal();
-      }));
-
-  const TimeTicks tstart = TimeTicks::Now();
-  for (int i = 0; i < 5; i++) {
-    peak_detector_->Start(kConfigNoCallbacks);
-    peak_detector_->Stop();
-  }
-
-  bg_thread_->task_runner()->PostTask(
-      FROM_HERE, base::BindOnce([](uint32_t* polls_done) { *polls_done = 0; },
-                                &polls_done));
-
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  evt.Wait();  // Wait for kNumPolls.
-  const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
-
-  EXPECT_GE(time_ms, (kNumPolls - 1) * kConfigNoCallbacks.polling_interval_ms);
-  peak_detector_->Stop();
-}
-
-TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
-  WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
-                     WaitableEvent::InitialState::NOT_SIGNALED);
-  WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
-                     WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
-  scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
-  EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
-
-  // Register only one MDP and start the detector.
-  dump_providers_.push_back(mdp1);
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Wait for one poll task and then register also the other one.
-  evt1.Wait();
-  dump_providers_.push_back(mdp2);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  evt2.Wait();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Now unregister the first MDP and check that everything is still running.
-  dump_providers_.erase(dump_providers_.begin());
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Now unregister both and check that the detector goes to idle.
-  dump_providers_.clear();
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  // Now re-register both and check that the detector re-activates posting
-  // new polling tasks.
-  uint32_t num_poll_tasks = GetNumPollingTasksRan();
-  evt1.Reset();
-  evt2.Reset();
-  dump_providers_.push_back(mdp1);
-  dump_providers_.push_back(mdp2);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  evt1.Wait();
-  evt2.Wait();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
-
-  // Stop everything, tear down the MDPs, restart the detector and check that
-  // it detector doesn't accidentally try to re-access them.
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  dump_providers_.clear();
-  mdp1 = nullptr;
-  mdp2 = nullptr;
-
-  num_poll_tasks = GetNumPollingTasksRan();
-  peak_detector_->Start(kConfigNoCallbacks);
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-
-  EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
-}
-
-// Tests the behavior of the static threshold detector, which is supposed to
-// detect a peak whenever an increase >= threshold is detected.
-TEST_F(MemoryPeakDetectorTest, StaticThreshold) {
-  const uint32_t kNumSamples = 2 * kSlidingWindowNumSamples;
-  constexpr uint32_t kNumSamplesPerStep = 10;
-  constexpr uint64_t kThreshold = 1000000;
-  peak_detector_->SetStaticThresholdForTesting(kThreshold);
-  const MemoryPeakDetector::Config kConfig(
-      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
-      false /* enable_verbose_poll_tracing */
-      );
-
-  // The mocked PollFastMemoryTotal() will return a step function,
-  // e.g. (1, 1, 1, 5, 5, 5, ...) where the steps are 2x threshold, in order to
-  // trigger only the static threshold logic.
-  auto poll_fn = Bind(
-      [](const uint32_t kNumSamplesPerStep, const uint64_t kThreshold,
-         uint32_t sample_idx) -> uint64_t {
-        return (1 + sample_idx / kNumSamplesPerStep) * 2 * kThreshold;
-      },
-      kNumSamplesPerStep, kThreshold);
-  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
-  EXPECT_EQ(kNumSamples / kNumSamplesPerStep - 1, num_peaks);
-}
-
-// Checks the throttling logic of Config's |min_time_between_peaks_ms|.
-TEST_F(MemoryPeakDetectorTest, PeakCallbackThrottling) {
-  const size_t kNumSamples = 2 * kSlidingWindowNumSamples;
-  constexpr uint64_t kThreshold = 1000000;
-  peak_detector_->SetStaticThresholdForTesting(kThreshold);
-  const MemoryPeakDetector::Config kConfig(
-      1 /* polling_interval_ms */, 4 /* min_time_between_peaks_ms */,
-      false /* enable_verbose_poll_tracing */
-      );
-
-  // Each mock value returned is N * 2 * threshold, so all of them would be
-  // eligible to be a peak if throttling wasn't enabled.
-  auto poll_fn = Bind(
-      [](uint64_t kThreshold, uint32_t sample_idx) -> uint64_t {
-        return (sample_idx + 1) * 2 * kThreshold;
-      },
-      kThreshold);
-  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
-  const uint32_t kExpectedThrottlingRate =
-      kConfig.min_time_between_peaks_ms / kConfig.polling_interval_ms;
-  EXPECT_LT(num_peaks, kNumSamples / kExpectedThrottlingRate);
-}
-
-TEST_F(MemoryPeakDetectorTest, StdDev) {
-  // Set the threshold to some arbitrarily high value, so that the static
-  // threshold logic is not hit in this test.
-  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
-  peak_detector_->SetStaticThresholdForTesting(kThreshold);
-  const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
-  const MemoryPeakDetector::Config kConfig(
-      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
-      false /* enable_verbose_poll_tracing */
-      );
-
-  auto poll_fn = Bind(&PollFunctionThatCausesPeakViaStdDev);
-  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
-  EXPECT_EQ(2u, num_peaks);  // 80 MB, 120 MB.
-}
-
-// Tests that Throttle() actually holds back peak notifications.
-TEST_F(MemoryPeakDetectorTest, Throttle) {
-  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
-  const uint32_t kNumSamples = 3 * kSlidingWindowNumSamples;
-  peak_detector_->SetStaticThresholdForTesting(kThreshold);
-  const MemoryPeakDetector::Config kConfig(
-      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
-      false /* enable_verbose_poll_tracing */
-      );
-
-  auto poll_fn = Bind(
-      [](MemoryPeakDetector* peak_detector, uint32_t sample_idx) -> uint64_t {
-        if (sample_idx % 20 == 20 - 1)
-          peak_detector->Throttle();
-        return PollFunctionThatCausesPeakViaStdDev(sample_idx);
-      },
-      Unretained(&*peak_detector_));
-  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
-  EXPECT_EQ(0u, num_peaks);
-}
-
-// Tests that the windows stddev state is not carried over through
-// Stop() -> Start() sequences.
-TEST_F(MemoryPeakDetectorTest, RestartClearsState) {
-  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
-  peak_detector_->SetStaticThresholdForTesting(kThreshold);
-  const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
-  const MemoryPeakDetector::Config kConfig(
-      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
-      false /* enable_verbose_poll_tracing */
-      );
-  auto poll_fn = Bind(
-      [](MemoryPeakDetector* peak_detector,
-         const uint32_t kSlidingWindowNumSamples,
-         MemoryPeakDetector::Config kConfig, uint32_t sample_idx) -> uint64_t {
-        if (sample_idx % kSlidingWindowNumSamples ==
-            kSlidingWindowNumSamples - 1) {
-          peak_detector->Stop();
-          peak_detector->Start(kConfig);
-        }
-        return PollFunctionThatCausesPeakViaStdDev(sample_idx);
-      },
-      Unretained(&*peak_detector_), kSlidingWindowNumSamples, kConfig);
-  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
-  EXPECT_EQ(0u, num_peaks);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 57608fd6..cc49a65c 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -144,32 +144,6 @@
         "}",
         MemoryDumpManager::kTraceCategory, period_ms);
   }
-
-  static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
-    return StringPrintf(
-        "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"excluded_categories\":["
-        "\"*\""
-        "],"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-        "\"triggers\":["
-        "{"
-        "\"min_time_between_dumps_ms\":%d,"
-        "\"mode\":\"detailed\","
-        "\"type\":\"peak_memory_usage\""
-        "}"
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory, heavy_period);
-  }
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 3cb6d61b..efdbffb 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -56,7 +56,7 @@
     "{"
     "\"min_time_between_dumps_ms\":1000,"
     "\"mode\":\"detailed\","
-    "\"type\":\"peak_memory_usage\""
+    "\"type\":\"periodic_interval\""
     "}"
     "]"
     "},"
@@ -634,16 +634,6 @@
   EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
             tc3.memory_dump_config().triggers[0].level_of_detail);
-
-  std::string tc_str4 =
-      TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
-          1 /*heavy_period */);
-  TraceConfig tc4(tc_str4);
-  EXPECT_EQ(tc_str4, tc4.ToString());
-  ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
-  EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
-  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc4.memory_dump_config().triggers[0].level_of_detail);
 }
 
 TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/services/resource_coordinator/memory_instrumentation/coordinator_impl.cc b/services/resource_coordinator/memory_instrumentation/coordinator_impl.cc
index a83d51e..7ea0aab 100644
--- a/services/resource_coordinator/memory_instrumentation/coordinator_impl.cc
+++ b/services/resource_coordinator/memory_instrumentation/coordinator_impl.cc
@@ -277,8 +277,7 @@
   // another request in the queue with the same level of detail, there's no
   // point in enqueuing this request.
   if (another_dump_is_queued &&
-      (args.dump_type == MemoryDumpType::PERIODIC_INTERVAL ||
-       args.dump_type == MemoryDumpType::PEAK_MEMORY_USAGE)) {
+      args.dump_type == MemoryDumpType::PERIODIC_INTERVAL) {
     for (const auto& request : queued_memory_dump_requests_) {
       if (request.args.level_of_detail == args.level_of_detail) {
         VLOG(1) << "RequestGlobalMemoryDump("
diff --git a/services/resource_coordinator/public/cpp/memory_instrumentation/client_process_impl.h b/services/resource_coordinator/public/cpp/memory_instrumentation/client_process_impl.h
index 3c5c8f6..4a311711 100644
--- a/services/resource_coordinator/public/cpp/memory_instrumentation/client_process_impl.h
+++ b/services/resource_coordinator/public/cpp/memory_instrumentation/client_process_impl.h
@@ -54,8 +54,7 @@
   ~ClientProcessImpl() override;
 
   // Implements base::trace_event::MemoryDumpManager::RequestGlobalDumpCallback.
-  // This function will be called by the MemoryDumpScheduler::OnTick and
-  // MemoryPeakDetector.
+  // This function will be called by the MemoryDumpScheduler::OnTick.
   void RequestGlobalMemoryDump_NoCallback(
       base::trace_event::MemoryDumpType type,
       base::trace_event::MemoryDumpLevelOfDetail level_of_detail);
diff --git a/services/resource_coordinator/public/cpp/memory_instrumentation/memory_instrumentation_mojom_traits.cc b/services/resource_coordinator/public/cpp/memory_instrumentation/memory_instrumentation_mojom_traits.cc
index 16dc11b8..a20880a 100644
--- a/services/resource_coordinator/public/cpp/memory_instrumentation/memory_instrumentation_mojom_traits.cc
+++ b/services/resource_coordinator/public/cpp/memory_instrumentation/memory_instrumentation_mojom_traits.cc
@@ -16,14 +16,12 @@
       return memory_instrumentation::mojom::DumpType::PERIODIC_INTERVAL;
     case base::trace_event::MemoryDumpType::EXPLICITLY_TRIGGERED:
       return memory_instrumentation::mojom::DumpType::EXPLICITLY_TRIGGERED;
-    case base::trace_event::MemoryDumpType::PEAK_MEMORY_USAGE:
-      return memory_instrumentation::mojom::DumpType::PEAK_MEMORY_USAGE;
     case base::trace_event::MemoryDumpType::SUMMARY_ONLY:
       return memory_instrumentation::mojom::DumpType::SUMMARY_ONLY;
     default:
       CHECK(false) << "Invalid type: " << static_cast<uint8_t>(type);
       // This should not be reached. Just return a random value.
-      return memory_instrumentation::mojom::DumpType::PEAK_MEMORY_USAGE;
+      return memory_instrumentation::mojom::DumpType::PERIODIC_INTERVAL;
   }
 }
 
@@ -39,9 +37,6 @@
     case memory_instrumentation::mojom::DumpType::EXPLICITLY_TRIGGERED:
       *out = base::trace_event::MemoryDumpType::EXPLICITLY_TRIGGERED;
       break;
-    case memory_instrumentation::mojom::DumpType::PEAK_MEMORY_USAGE:
-      *out = base::trace_event::MemoryDumpType::PEAK_MEMORY_USAGE;
-      break;
     case memory_instrumentation::mojom::DumpType::SUMMARY_ONLY:
       *out = base::trace_event::MemoryDumpType::SUMMARY_ONLY;
       break;
diff --git a/services/resource_coordinator/public/cpp/memory_instrumentation/tracing_integration_unittest.cc b/services/resource_coordinator/public/cpp/memory_instrumentation/tracing_integration_unittest.cc
index 9b5d2dc5..6759ae5f 100644
--- a/services/resource_coordinator/public/cpp/memory_instrumentation/tracing_integration_unittest.cc
+++ b/services/resource_coordinator/public/cpp/memory_instrumentation/tracing_integration_unittest.cc
@@ -76,8 +76,6 @@
   MOCK_METHOD0(Destructor, void());
   MOCK_METHOD2(OnMemoryDump,
                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
-  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
-  MOCK_METHOD0(SuspendFastMemoryPolling, void());
 
   MockMemoryDumpProvider() : enable_mock_destructor(false) {
     ON_CALL(*this, OnMemoryDump(_, _))
@@ -85,10 +83,6 @@
             Invoke([](const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
               return true;
             }));
-
-    ON_CALL(*this, PollFastMemoryTotal(_))
-        .WillByDefault(
-            Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
   }
 
   ~MockMemoryDumpProvider() override {
@@ -455,57 +449,6 @@
   DisableTracing();
 }
 
-TEST_F(MemoryTracingIntegrationTest, TestPollingOnDumpThread) {
-  InitializeClientProcess(mojom::ProcessType::RENDERER);
-  std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
-  std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
-  mdp1->enable_mock_destructor = true;
-  mdp2->enable_mock_destructor = true;
-  EXPECT_CALL(*mdp1, Destructor());
-  EXPECT_CALL(*mdp2, Destructor());
-
-  MemoryDumpProvider::Options options;
-  options.is_fast_polling_supported = true;
-  RegisterDumpProvider(mdp1.get(), nullptr, options);
-
-  base::RunLoop run_loop;
-  auto test_task_runner = base::ThreadTaskRunnerHandle::Get();
-  auto quit_closure = run_loop.QuitClosure();
-  MemoryDumpManager* mdm = mdm_.get();
-
-  EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
-      .WillOnce(Invoke([&mdp2, options, this](uint64_t*) {
-        RegisterDumpProvider(mdp2.get(), nullptr, options);
-      }))
-      .WillOnce(Return())
-      .WillOnce(Invoke([mdm, &mdp2](uint64_t*) {
-        mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
-      }))
-      .WillOnce(Invoke([test_task_runner, quit_closure](uint64_t*) {
-        test_task_runner->PostTask(FROM_HERE, quit_closure);
-      }))
-      .WillRepeatedly(Return());
-
-  // We expect a call to |mdp1| because it is still registered at the time the
-  // Peak detector is Stop()-ed (upon OnTraceLogDisabled(). We do NOT expect
-  // instead a call for |mdp2|, because that gets unregisterd before the Stop().
-  EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
-  EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(0);
-
-  // |mdp2| should invoke exactly twice:
-  // - once after the registrarion, when |mdp1| hits the first Return()
-  // - the 2nd time when |mdp1| unregisters |mdp1|. The unregistration is
-  //   posted and will necessarily happen after the polling task.
-  EXPECT_CALL(*mdp2, PollFastMemoryTotal(_)).Times(2).WillRepeatedly(Return());
-
-  EnableMemoryInfraTracingWithTraceConfig(
-      base::trace_event::TraceConfigMemoryTestUtil::
-          GetTraceConfig_PeakDetectionTrigger(1));
-  run_loop.Run();
-  DisableTracing();
-  mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
-}
-
 // Regression test for https://crbug.com/766274 .
 TEST_F(MemoryTracingIntegrationTest, GenerationChangeDoesntReenterMDM) {
   InitializeClientProcess(mojom::ProcessType::RENDERER);
diff --git a/services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom b/services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom
index 7ef67e6..2782e877 100644
--- a/services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom
+++ b/services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom
@@ -11,7 +11,6 @@
 enum DumpType {
   PERIODIC_INTERVAL,
   EXPLICITLY_TRIGGERED,
-  PEAK_MEMORY_USAGE,
   SUMMARY_ONLY
 };
 
diff --git a/tools/gn/bootstrap/bootstrap.py b/tools/gn/bootstrap/bootstrap.py
index 300abac..7ae06c3 100755
--- a/tools/gn/bootstrap/bootstrap.py
+++ b/tools/gn/bootstrap/bootstrap.py
@@ -646,7 +646,6 @@
       'base/trace_event/memory_dump_request_args.cc',
       'base/trace_event/memory_dump_scheduler.cc',
       'base/trace_event/memory_infra_background_whitelist.cc',
-      'base/trace_event/memory_peak_detector.cc',
       'base/trace_event/memory_usage_estimator.cc',
       'base/trace_event/process_memory_dump.cc',
       'base/trace_event/trace_buffer.cc',