ui: Add FrameMetrics stream analyzer helpers.

This adds the StreamAnalyzer class, which is responsible
for calculating the mean, rms, smr, standard deviation,
variance of roots, and threshold percentiles of a
continuous stream of values.

It owns a Histogram and a WindowedAnalyzer, which it uses
to delegate percentile estimates and to track regions of
time where metrics are worst.

Bug: 807463
Change-Id: I718b06778582d0628b964747f61352ae291452f0
Reviewed-on: https://chromium-review.googlesource.com/972568
Commit-Queue: Brian Anderson <brianderson@chromium.org>
Reviewed-by: Sadrul Chowdhury <sadrul@chromium.org>
Reviewed-by: Timothy Dresser <tdresser@chromium.org>
Cr-Commit-Position: refs/heads/master@{#546168}
diff --git a/ui/latency/BUILD.gn b/ui/latency/BUILD.gn
index 49055f2..2aeebd9 100644
--- a/ui/latency/BUILD.gn
+++ b/ui/latency/BUILD.gn
@@ -16,6 +16,8 @@
     "latency_info.h",
     "latency_tracker.cc",
     "latency_tracker.h",
+    "stream_analyzer.cc",
+    "stream_analyzer.h",
     "windowed_analyzer.cc",
     "windowed_analyzer.h",
   ]
@@ -44,10 +46,11 @@
 test("latency_unittests") {
   sources = [
     "fixed_point_unittest.cc",
-    "histograms_test_common.cc",
-    "histograms_test_common.h",
+    "frame_metrics_test_common.cc",
+    "frame_metrics_test_common.h",
     "histograms_unittest.cc",
     "latency_info_unittest.cc",
+    "stream_analyzer_unittest.cc",
     "windowed_analyzer_unittest.cc",
   ]
 
@@ -77,9 +80,9 @@
 
 test("latency_perftests") {
   sources = [
+    "frame_metrics_test_common.cc",
+    "frame_metrics_test_common.h",
     "histograms_perftest.cc",
-    "histograms_test_common.cc",
-    "histograms_test_common.h",
   ]
 
   deps = [
diff --git a/ui/latency/fixed_point.h b/ui/latency/fixed_point.h
index 7e36bc9..11b4191 100644
--- a/ui/latency/fixed_point.h
+++ b/ui/latency/fixed_point.h
@@ -23,6 +23,8 @@
 // root and undoing that shift after squaring in the SMR calculation.
 constexpr int kFixedPointRootShift = 32;
 constexpr int64_t kFixedPointRootMultiplier{1LL << kFixedPointRootShift};
+constexpr int64_t kFixedPointRootMultiplierSqrt{1LL
+                                                << (kFixedPointRootShift / 2)};
 
 // We need a huge range to accumulate values for RMS calculations, which
 // need double the range internally compared to the range we are targeting
diff --git a/ui/latency/fixed_point_unittest.cc b/ui/latency/fixed_point_unittest.cc
index 20d1e4d..02548fd03d 100644
--- a/ui/latency/fixed_point_unittest.cc
+++ b/ui/latency/fixed_point_unittest.cc
@@ -54,6 +54,15 @@
   EXPECT_LE(error1, 1);
 }
 
+TEST(FrameMetricsFixedPointTest, kFixedPointRootMultiplierSqrt) {
+  EXPECT_EQ(kFixedPointRootMultiplierSqrt,
+            std::sqrt(kFixedPointRootMultiplier));
+}
+
+TEST(FrameMetricsFixedPointTest, kFixedPointRootShift) {
+  EXPECT_EQ(kFixedPointRootMultiplier, 1LL << kFixedPointRootShift);
+}
+
 // Verify Accumulator96b's squared weight constructor.
 TEST(FrameMetricsFixedPointTest, Accumulator96bConstructor) {
   // A small value that fits in 32 bits.
diff --git a/ui/latency/histograms_test_common.cc b/ui/latency/frame_metrics_test_common.cc
similarity index 63%
rename from ui/latency/histograms_test_common.cc
rename to ui/latency/frame_metrics_test_common.cc
index 5fd2d5df..835cc00c 100644
--- a/ui/latency/histograms_test_common.cc
+++ b/ui/latency/frame_metrics_test_common.cc
@@ -2,13 +2,29 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "ui/latency/histograms_test_common.h"
+#include "ui/latency/frame_metrics_test_common.h"
 
 #include "base/logging.h"
 
 namespace ui {
 namespace frame_metrics {
 
+double TestStreamAnalyzerClient::TransformResult(double result) const {
+  return result * result_scale;
+}
+
+template <>
+void AddSamplesHelper(StreamAnalyzer* analyzer,
+                      uint64_t value,
+                      uint64_t weight,
+                      size_t iterations) {
+  DCHECK_LE(value, std::numeric_limits<uint32_t>::max());
+  DCHECK_LE(weight, std::numeric_limits<uint32_t>::max());
+  for (size_t i = 0; i < iterations; i++) {
+    analyzer->AddSample(value, weight);
+  }
+}
+
 TestRatioBoundaries::TestRatioBoundaries() {
   const uint32_t one = kFixedPointMultiplier;
   const uint32_t half = one / 2;
@@ -52,5 +68,25 @@
   DCHECK_EQ(112, i);
 }
 
+TestHistogram::TestHistogram() = default;
+TestHistogram::~TestHistogram() = default;
+
+void TestHistogram::AddSample(uint32_t value, uint32_t weight) {
+  added_samples_.push_back({value, weight});
+}
+
+PercentileResults TestHistogram::CalculatePercentiles() const {
+  return results_;
+}
+
+std::vector<TestHistogram::ValueWeightPair>
+TestHistogram::GetAndResetAllAddedSamples() {
+  return std::move(added_samples_);
+}
+
+void TestHistogram::SetResults(PercentileResults results) {
+  results_ = results;
+}
+
 }  // namespace frame_metrics
 }  // namespace ui
diff --git a/ui/latency/frame_metrics_test_common.h b/ui/latency/frame_metrics_test_common.h
new file mode 100644
index 0000000..170b0144
--- /dev/null
+++ b/ui/latency/frame_metrics_test_common.h
@@ -0,0 +1,174 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_LATENCY_FRAME_METRICS_TEST_COMMON_H_
+#define UI_LATENCY_FRAME_METRICS_TEST_COMMON_H_
+
+#include "ui/latency/fixed_point.h"
+#include "ui/latency/histograms.h"
+#include "ui/latency/stream_analyzer.h"
+#include "ui/latency/windowed_analyzer.h"
+
+#include <array>
+
+// Some convenience macros for checking expected error.
+#define EXPECT_ABS_LT(a, b) EXPECT_LT(std::abs(a), std::abs(b))
+#define EXPECT_ABS_LE(a, b) EXPECT_LE(std::abs(a), std::abs(b))
+#define EXPECT_NEAR_SMR(expected, actual, weight) \
+  EXPECT_NEAR(expected, actual, MaxErrorSMR(expected, weight))
+#define EXPECT_NEAR_VARIANCE_OF_ROOT(expected, actual, mean, weight) \
+  EXPECT_NEAR(expected, actual, MaxErrorSMR(mean, weight));
+
+namespace ui {
+namespace frame_metrics {
+
+// A simple client to verify it is actually used.
+class TestStreamAnalyzerClient : public StreamAnalyzerClient {
+ public:
+  double TransformResult(double result) const override;
+  static constexpr double result_scale = 2.0;
+};
+
+using TestWindowedAnalyzerClient = TestStreamAnalyzerClient;
+
+// The WindowedAnalyzer expects the caller to give it some precomputed values,
+// even though they are redundant. Precompute them with a helper function to
+// remove boilerplate.
+// A specialized version of this for StreamAnalyzer that doesn't pre compute
+// the weighted values is defined in the implementation file.
+template <typename AnalyzerType>
+void AddSamplesHelper(AnalyzerType* analyzer,
+                      uint64_t value,
+                      uint64_t weight,
+                      size_t iterations) {
+  DCHECK_LE(value, std::numeric_limits<uint32_t>::max());
+  DCHECK_LE(weight, std::numeric_limits<uint32_t>::max());
+  uint64_t weighted_value = weight * value;
+  uint64_t weighted_root = weight * std::sqrt(value << kFixedPointRootShift);
+  Accumulator96b weighted_square(value, weight);
+  for (size_t i = 0; i < iterations; i++) {
+    analyzer->AddSample(value, weight, weighted_value, weighted_root,
+                        weighted_square);
+  }
+}
+
+// A specialization of the templatized AddSamplesHelper above for
+// the WindowedAnalyzer, which doesn't need to have it's weighted values
+// pre computed.
+template <>
+void AddSamplesHelper(StreamAnalyzer* analyzer,
+                      uint64_t value,
+                      uint64_t weight,
+                      size_t iterations);
+
+// Moves the |shared_client|'s window forward in time by 1 microsecond and
+// adds all of the elements in |values| multipled by kFixedPointMultiplier.
+template <typename AnalyzerType>
+void AddPatternHelper(SharedWindowedAnalyzerClient* shared_client,
+                      AnalyzerType* analyzer,
+                      const std::vector<uint32_t>& values,
+                      const uint32_t weight) {
+  for (auto i : values) {
+    shared_client->window_begin += base::TimeDelta::FromMicroseconds(1);
+    shared_client->window_end += base::TimeDelta::FromMicroseconds(1);
+    AddSamplesHelper(analyzer, i * kFixedPointMultiplier, weight, 1);
+  }
+}
+
+// Same as AddPatternHelper, but uses each value (+1) as its own weight.
+// The "Cubed" name comes from the fact that the squared_accumulator
+// for the RMS will effectively be a "cubed accumulator".
+template <typename AnalyzerType>
+void AddCubedPatternHelper(SharedWindowedAnalyzerClient* shared_client,
+                           AnalyzerType* analyzer,
+                           const std::vector<uint32_t>& values) {
+  for (auto i : values) {
+    shared_client->window_begin += base::TimeDelta::FromMicroseconds(1);
+    shared_client->window_end += base::TimeDelta::FromMicroseconds(1);
+    // weight is i+1 to avoid divide by zero.
+    AddSamplesHelper(analyzer, i, i + 1, 1);
+  }
+}
+
+// Mean and RMS can be exact for most values, however SMR loses a bit of
+// precision internally when accumulating the roots. Make sure the SMR
+// precision is at least within .5 (i.e. rounded to the nearest integer
+// properly), or 8 decimal places if that is less precise.
+// When used with kFixedPointMultiplier, this gives us a total precision of
+// between ~5 and ~13 decimal places.
+// The precision should be even better when the sample's |weight| > 1 since
+// the implementation should only do any rounding after scaling by weight.
+inline double MaxErrorSMR(double expected_value, uint64_t weight) {
+  return std::max(.5, 1e-8 * expected_value / weight);
+}
+
+// This class initializes the ratio boundaries on construction in a way that
+// is easier to follow than the procedural code in the RatioHistogram
+// implementation.
+class TestRatioBoundaries {
+ public:
+  TestRatioBoundaries();
+  uint64_t operator[](size_t i) const { return boundaries[i]; }
+  size_t size() const { return boundaries.size(); }
+
+ public:
+  // uint64_t since the last boundary needs 33 bits.
+  std::array<uint64_t, 112> boundaries;
+};
+
+// An explicit list of VSync boundaries to verify the procedurally generated
+// ones in the implementation.
+static constexpr std::array<uint32_t, 99> kTestVSyncBoundries = {
+    {// C0: [0,1) (1 bucket).
+     0,
+     // C1: Powers of two from 1 to 2048 us @ 50% precision (12 buckets)
+     1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048,
+     // C2: Every 8 Hz from 256 Hz to 128 Hz @ 3-6% precision (16 buckets)
+     3906, 4032, 4167, 4310, 4464, 4630, 4808, 5000, 5208, 5435, 5682, 5952,
+     6250, 6579, 6944, 7353,
+     // C3: Every 4 Hz from 128 Hz to 64 Hz @ 3-6% precision (16 buckets)
+     7813, 8065, 8333, 8621, 8929, 9259, 9615, 10000, 10417, 10870, 11364,
+     11905, 12500, 13158, 13889, 14706,
+     // C4: Every 2 Hz from 64 Hz to 32 Hz @ 3-6% precision (16 buckets)
+     15625, 16129, 16667, 17241, 17857, 18519, 19231, 20000, 20833, 21739,
+     22727, 23810, 25000, 26316, 27778, 29412,
+     // C5: Every 1 Hz from 32 Hz to 1 Hz @ 3-33% precision (31 buckets)
+     31250, 32258, 33333, 34483, 35714, 37037, 38462, 40000, 41667, 43478,
+     45455, 47619, 50000, 52632, 55556, 58824, 62500, 66667, 71429, 76923,
+     83333, 90909, 100000, 111111, 125000, 142857, 166667, 200000, 250000,
+     333333, 500000,
+     // C6: Powers of two from 1s to 32s @ 50% precision (6 buckets)
+     1000000, 2000000, 4000000, 8000000, 16000000, 32000000,
+     // C7: Extra value to simplify estimate in Percentiles().
+     64000000}};
+
+// A histogram that can be used for dependency injection in tests.
+class TestHistogram : public Histogram {
+ public:
+  struct ValueWeightPair {
+    uint32_t value;
+    uint32_t weight;
+  };
+
+  TestHistogram();
+  ~TestHistogram() override;
+
+  // Histogram interface.
+  void AddSample(uint32_t value, uint32_t weight) override;
+  PercentileResults CalculatePercentiles() const override;
+  void Reset() override{};
+
+  // Test interface.
+  std::vector<ValueWeightPair> GetAndResetAllAddedSamples();
+  void SetResults(PercentileResults results);
+
+ private:
+  PercentileResults results_;
+  std::vector<ValueWeightPair> added_samples_;
+};
+
+}  // namespace frame_metrics
+}  // namespace ui
+
+#endif  // UI_LATENCY_FRAME_METRICS_TEST_COMMON_H_
diff --git a/ui/latency/histograms_perftest.cc b/ui/latency/histograms_perftest.cc
index a44e0331..6173cbb 100644
--- a/ui/latency/histograms_perftest.cc
+++ b/ui/latency/histograms_perftest.cc
@@ -12,7 +12,7 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/perf/perf_test.h"
 #include "ui/latency/fixed_point.h"
-#include "ui/latency/histograms_test_common.h"
+#include "ui/latency/frame_metrics_test_common.h"
 
 namespace ui {
 namespace frame_metrics {
@@ -147,7 +147,7 @@
     for (const auto& b : kTestVSyncBoundries) {
       bucket_ranges_.set_range(i++, b);
     }
-    // BucketRanges needs the last elemet set to INT_MAX.
+    // BucketRanges needs the last element set to INT_MAX.
     bucket_ranges_.set_range(i++, INT_MAX);
   }
 
diff --git a/ui/latency/histograms_test_common.h b/ui/latency/histograms_test_common.h
deleted file mode 100644
index 5b5657f..0000000
--- a/ui/latency/histograms_test_common.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef UI_LATENCY_HISTOGRAMS_TEST_COMMON_H_
-#define UI_LATENCY_HISTOGRAMS_TEST_COMMON_H_
-
-#include "ui/latency/fixed_point.h"
-
-#include <array>
-
-namespace ui {
-namespace frame_metrics {
-
-// This class initializes the ratio boundaries on construction in a way that
-// is easier to follow than the procedural code in the RatioHistogram
-// implementation.
-class TestRatioBoundaries {
- public:
-  TestRatioBoundaries();
-  uint64_t operator[](size_t i) const { return boundaries[i]; }
-  size_t size() const { return boundaries.size(); }
-
- public:
-  // uint64_t since the last boundary needs 33 bits.
-  std::array<uint64_t, 112> boundaries;
-};
-
-// An explicit list of VSync boundaries to verify the procedurally generated
-// ones in the implementation.
-static constexpr std::array<uint32_t, 99> kTestVSyncBoundries = {
-    {// C0: [0,1) (1 bucket).
-     0,
-     // C1: Powers of two from 1 to 2048 us @ 50% precision (12 buckets)
-     1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048,
-     // C2: Every 8 Hz from 256 Hz to 128 Hz @ 3-6% precision (16 buckets)
-     3906, 4032, 4167, 4310, 4464, 4630, 4808, 5000, 5208, 5435, 5682, 5952,
-     6250, 6579, 6944, 7353,
-     // C3: Every 4 Hz from 128 Hz to 64 Hz @ 3-6% precision (16 buckets)
-     7813, 8065, 8333, 8621, 8929, 9259, 9615, 10000, 10417, 10870, 11364,
-     11905, 12500, 13158, 13889, 14706,
-     // C4: Every 2 Hz from 64 Hz to 32 Hz @ 3-6% precision (16 buckets)
-     15625, 16129, 16667, 17241, 17857, 18519, 19231, 20000, 20833, 21739,
-     22727, 23810, 25000, 26316, 27778, 29412,
-     // C5: Every 1 Hz from 32 Hz to 1 Hz @ 3-33% precision (31 buckets)
-     31250, 32258, 33333, 34483, 35714, 37037, 38462, 40000, 41667, 43478,
-     45455, 47619, 50000, 52632, 55556, 58824, 62500, 66667, 71429, 76923,
-     83333, 90909, 100000, 111111, 125000, 142857, 166667, 200000, 250000,
-     333333, 500000,
-     // C6: Powers of two from 1s to 32s @ 50% precision (6 buckets)
-     1000000, 2000000, 4000000, 8000000, 16000000, 32000000,
-     // C7: Extra value to simplify estimate in Percentiles().
-     64000000}};
-
-}  // namespace frame_metrics
-}  // namespace ui
-
-#endif  // UI_LATENCY_HISTOGRAMS_TEST_COMMON_H_
diff --git a/ui/latency/histograms_unittest.cc b/ui/latency/histograms_unittest.cc
index 4687c18..45f62a5 100644
--- a/ui/latency/histograms_unittest.cc
+++ b/ui/latency/histograms_unittest.cc
@@ -11,7 +11,7 @@
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "ui/latency/fixed_point.h"
-#include "ui/latency/histograms_test_common.h"
+#include "ui/latency/frame_metrics_test_common.h"
 
 namespace ui {
 namespace frame_metrics {
diff --git a/ui/latency/stream_analyzer.cc b/ui/latency/stream_analyzer.cc
new file mode 100644
index 0000000..93458abd
--- /dev/null
+++ b/ui/latency/stream_analyzer.cc
@@ -0,0 +1,186 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/latency/stream_analyzer.h"
+
+namespace ui {
+namespace frame_metrics {
+
+StreamAnalyzer::StreamAnalyzer(
+    const StreamAnalyzerClient* client,
+    const SharedWindowedAnalyzerClient* shared_client,
+    std::vector<uint32_t> thresholds,
+    std::unique_ptr<Histogram> histogram)
+    : client_(client),
+      histogram_(std::move(histogram)),
+      windowed_analyzer_(client, shared_client) {
+  thresholds_.reserve(thresholds.size());
+  for (const uint32_t& t : thresholds)
+    thresholds_.emplace_back(t);
+}
+
+StreamAnalyzer::~StreamAnalyzer() = default;
+
+void StreamAnalyzer::Reset() {
+  StartNewReportPeriod();
+  windowed_analyzer_.ResetHistory();
+}
+
+void StreamAnalyzer::StartNewReportPeriod() {
+  histogram_->Reset();
+  windowed_analyzer_.ResetWorstValues();
+  for (auto& t : thresholds_)
+    t.ResetAccumulators();
+
+  total_weight_ = 0;
+  accumulator_ = 0;
+  root_accumulator_ = 0;
+  square_accumulator_ = Accumulator96b();
+}
+
+void StreamAnalyzer::AddSample(const uint32_t value, const uint32_t weight) {
+  DCHECK_GT(weight, 0u);
+
+  uint64_t weighted_value = static_cast<uint64_t>(weight) * value;
+  uint64_t weighted_root = weight * std::sqrt(static_cast<double>(value) *
+                                              kFixedPointRootMultiplier);
+  Accumulator96b weighted_square(value, weight);
+
+  // Verify overflow isn't an issue.
+  // square_accumulator_ has DCHECKs internally, so we don't worry about
+  // checking that here.
+  DCHECK_LT(weighted_value,
+            std::numeric_limits<decltype(accumulator_)>::max() - accumulator_);
+  DCHECK_LT(weighted_root,
+            std::numeric_limits<decltype(root_accumulator_)>::max() -
+                root_accumulator_);
+  DCHECK_LT(weight, std::numeric_limits<decltype(total_weight_)>::max() -
+                        total_weight_);
+
+  histogram_->AddSample(value, weight);
+  windowed_analyzer_.AddSample(value, weight, weighted_value, weighted_root,
+                               weighted_square);
+
+  for (auto& t : thresholds_) {
+    if (value >= t.threshold)
+      t.ge_weight += weight;
+    else
+      t.lt_weight += weight;
+  }
+
+  total_weight_ += weight;
+  accumulator_ += weighted_value;
+  root_accumulator_ += weighted_root;
+  square_accumulator_.Add(weighted_square);
+}
+
+double StreamAnalyzer::ComputeMean() const {
+  double result = static_cast<double>(accumulator_) / total_weight_;
+  return client_->TransformResult(result);
+}
+
+double StreamAnalyzer::ComputeRMS() const {
+  double mean_square = square_accumulator_.ToDouble() / total_weight_;
+  double result = std::sqrt(mean_square);
+  return client_->TransformResult(result);
+}
+
+double StreamAnalyzer::ComputeSMR() const {
+  double mean_root = static_cast<double>(root_accumulator_) / total_weight_;
+  double result = (mean_root * mean_root) / kFixedPointRootMultiplier;
+  return client_->TransformResult(result);
+}
+
+double StreamAnalyzer::VarianceHelper(double accum, double square_accum) const {
+  double mean = accum / total_weight_;
+  double mean_squared = mean * mean;
+  double mean_square = square_accum / total_weight_;
+  double variance = mean_square - mean_squared;
+  // This approach to calculating the standard deviation isn't numerically
+  // stable if the variance is very small relative to the mean, which might
+  // result in a negative variance. Clamp it to 0.
+  return std::max(0.0, variance);
+}
+
+double StreamAnalyzer::ComputeStdDev() const {
+  double variance =
+      VarianceHelper(accumulator_, square_accumulator_.ToDouble());
+  double std_dev = std::sqrt(variance);
+  return client_->TransformResult(std_dev);
+}
+
+double StreamAnalyzer::ComputeVarianceOfRoots() const {
+  double normalized_root =
+      static_cast<double>(root_accumulator_) / kFixedPointRootMultiplierSqrt;
+  double variance = VarianceHelper(normalized_root, accumulator_);
+  return client_->TransformResult(variance);
+}
+
+void StreamAnalyzer::ThresholdState::ResetAccumulators() {
+  ge_weight = 0;
+  lt_weight = 0;
+}
+
+std::vector<ThresholdResult> StreamAnalyzer::ComputeThresholds() const {
+  std::vector<ThresholdResult> results;
+  results.reserve(thresholds_.size());
+  for (const auto& t : thresholds_) {
+    double threshold = client_->TransformResult(t.threshold);
+    double ge_fraction =
+        static_cast<double>(t.ge_weight) / (t.ge_weight + t.lt_weight);
+    results.push_back({threshold, ge_fraction});
+  }
+  return results;
+}
+
+PercentileResults StreamAnalyzer::ComputePercentiles() const {
+  PercentileResults result;
+  result = histogram_->CalculatePercentiles();
+  for (size_t i = 0; i < PercentileResults::kCount; i++) {
+    result.values[i] = client_->TransformResult(result.values[i]);
+  }
+  return result;
+}
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+StreamAnalyzer::AsValue() const {
+  auto state = std::make_unique<base::trace_event::TracedValue>();
+  AsValueInto(state.get());
+  return std::move(state);
+}
+
+void StreamAnalyzer::AsValueInto(base::trace_event::TracedValue* state) const {
+  state->SetDouble("mean", ComputeMean());
+
+  state->SetDouble("rms", ComputeRMS());
+  state->SetDouble("smr", ComputeSMR());
+
+  state->SetDouble("std_dev", ComputeStdDev());
+  state->SetDouble("variance_of_roots", ComputeVarianceOfRoots());
+
+  state->BeginArray("percentiles");
+  PercentileResults result = ComputePercentiles();
+  for (size_t i = 0; i < PercentileResults::kCount; i++) {
+    state->BeginArray();
+    state->AppendDouble(PercentileResults::kPercentiles[i]);
+    state->AppendDouble(result.values[i]);
+    state->EndArray();
+  }
+  state->EndArray();
+
+  state->BeginArray("thresholds");
+  std::vector<ThresholdResult> thresholds(ComputeThresholds());
+  for (const auto& t : thresholds) {
+    state->BeginArray();
+    state->AppendDouble(t.threshold);
+    state->AppendDouble(t.ge_fraction);
+    state->EndArray();
+  }
+  state->EndArray();
+
+  windowed_analyzer_.AsValueInto(state);
+}
+
+}  // namespace frame_metrics
+}  // namespace ui
diff --git a/ui/latency/stream_analyzer.h b/ui/latency/stream_analyzer.h
new file mode 100644
index 0000000..45982fb
--- /dev/null
+++ b/ui/latency/stream_analyzer.h
@@ -0,0 +1,128 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_LATENCY_STREAM_ANALYZER_H_
+#define UI_LATENCY_STREAM_ANALYZER_H_
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "ui/latency/fixed_point.h"
+#include "ui/latency/histograms.h"
+#include "ui/latency/windowed_analyzer.h"
+
+namespace ui {
+
+// Used to communicate fraction of time the value of a metric was greater than
+// or equal to the threshold.
+struct ThresholdResult {
+  double threshold = 0.0;
+  double ge_fraction = 0.0;
+};
+
+namespace frame_metrics {
+
+// The StreamAnalyzerClient interface is currently the same as
+// WindowedAnalyzerClient and can rely on the same implementation.
+using StreamAnalyzerClient = WindowedAnalyzerClient;
+
+// Tracks the overall mean, RMS, and SMR for a metric and also owns
+// the Histogram and WindowedAnalyzer.
+class StreamAnalyzer {
+ public:
+  StreamAnalyzer(const StreamAnalyzerClient* client,
+                 const SharedWindowedAnalyzerClient* shared_client,
+                 std::vector<uint32_t> thresholds,
+                 std::unique_ptr<Histogram> histogram);
+  ~StreamAnalyzer();
+
+  // Resets all statistics and history.
+  void Reset();
+
+  // Resets the statistics without throwing away recent sample history in the
+  // WindowedAnalyzer.
+  void StartNewReportPeriod();
+
+  // To play well with the histogram range, |value| should be within the
+  // range [0,64000000]. If the units are milliseconds, that's 64 seconds.
+  // Otherwise, the histogram will clip the result.
+  // |weight| may be the duration the frame was active in microseconds
+  //          or it may be 1 in case every frame is to be weighed equally.
+  void AddSample(const uint32_t value, const uint32_t weight);
+
+  // The mean, root-mean-squared, and squared-mean-root of all samples
+  // received since the last call to StartNewReportPeriod().
+  // The units are the same as the values added in AddSample().
+  double ComputeMean() const;
+  double ComputeRMS() const;
+  double ComputeSMR() const;
+
+  // StdDev calculates the standard deviation of all values in the stream.
+  // The units are the same as the values added in AddSample().
+  // The work to track this is the same as RMS, so we effectively get this for
+  // free. Given two of the Mean, RMS, and StdDev, we can calculate the third.
+  double ComputeStdDev() const;
+
+  // VarianceOfRoots calculates the variance of all square roots of values.
+  // The units end up being the same as the values added in AddSample().
+  // The work to track this is the same as SMR.
+  // Given two of the Mean, SMR, and VarianceOfRoots, we can calculate the
+  // third. Note: We don't track something like RootStdDevOfSquares since it
+  // would be difficult to track values raised to the fourth power.
+  // TODO(brianderon): Remove VarianceOfRoots if it's not useful.
+  double ComputeVarianceOfRoots() const;
+
+  // Thresholds returns a percentile for threshold values given to the
+  // constructor. This is useful for tracking improvements in really good
+  // sources, but it's dynamic range is limited, which prevents it from
+  // detecting improvements in sources where most of the frames are "bad".
+  std::vector<ThresholdResult> ComputeThresholds() const;
+
+  // CalculatePercentiles returns a value for certain percentiles.
+  // It is only an estimate, since the values are calculated from a histogram
+  // rather than from the entire history of actual values.
+  // This is useful for tracking improvements even in really bad sources
+  // since it's dynamic range includes all possible values.
+  PercentileResults ComputePercentiles() const;
+
+  // Expose the WindowedAnalyzer as const to make it's accessors
+  // available directly.
+  const WindowedAnalyzer& window() const { return windowed_analyzer_; }
+
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+  void AsValueInto(base::trace_event::TracedValue* state) const;
+
+ protected:
+  double VarianceHelper(double accum, double square_accum) const;
+
+  struct ThresholdState {
+    explicit ThresholdState(uint32_t value) : threshold(value) {}
+    void ResetAccumulators();
+
+    uint32_t threshold;
+    uint32_t ge_weight = 0;
+    uint32_t lt_weight = 0;
+  };
+
+  const StreamAnalyzerClient* const client_;
+
+  std::vector<ThresholdState> thresholds_;
+  std::unique_ptr<Histogram> histogram_;
+  WindowedAnalyzer windowed_analyzer_;
+
+  uint64_t total_weight_ = 0;
+  uint64_t accumulator_ = 0;
+  uint64_t root_accumulator_ = 0;
+  Accumulator96b square_accumulator_;
+
+  DISALLOW_COPY_AND_ASSIGN(StreamAnalyzer);
+};
+
+}  // namespace frame_metrics
+}  // namespace ui
+
+#endif  // UI_LATENCY_STREAM_ANALYZER_H_
diff --git a/ui/latency/stream_analyzer_unittest.cc b/ui/latency/stream_analyzer_unittest.cc
new file mode 100644
index 0000000..20af1ba
--- /dev/null
+++ b/ui/latency/stream_analyzer_unittest.cc
@@ -0,0 +1,312 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/latency/stream_analyzer.h"
+
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "ui/latency/frame_metrics_test_common.h"
+
+namespace ui {
+namespace frame_metrics {
+namespace {
+
+class StreamAnalyzerTest : public testing::Test {
+ public:
+  StreamAnalyzerTest() { NewAnalyzer(10, {2, 7, 10}); }
+
+  void SetUp() override {}
+
+  StreamAnalyzer* analyzer() { return analyzer_.get(); }
+
+  void NewAnalyzer(size_t window_size, std::vector<uint32_t> thresholds) {
+    shared_client_.max_window_size = window_size;
+    for (auto& t : thresholds) {
+      t *= kFixedPointMultiplier;
+    }
+    thresholds_ = std::move(thresholds);
+    std::unique_ptr<TestHistogram> histogram =
+        std::make_unique<TestHistogram>();
+    histogram_ = histogram.get();
+    analyzer_ = std::make_unique<StreamAnalyzer>(
+        &client_, &shared_client_, thresholds_, std::move(histogram));
+  }
+
+ protected:
+  size_t window_size;
+  TestStreamAnalyzerClient client_;
+  SharedWindowedAnalyzerClient shared_client_;
+  std::vector<uint32_t> thresholds_;
+  TestHistogram* histogram_;
+  std::unique_ptr<StreamAnalyzer> analyzer_;
+};
+
+TEST_F(StreamAnalyzerTest, AllResultsTheSame) {
+  // Try adding a single sample vs. multiple samples.
+  for (size_t samples : {1u, 100u}) {
+    // A power of 2 sweep for both the value and weight dimensions.
+    for (uint64_t value = 1; value < 0x100000000ULL; value *= 2) {
+      // Adding too many samples can result in overflow when multiplied by the
+      // weight. Divide by samples to avoid overflow.
+      for (uint64_t weight = 1; weight < 0x100000000ULL / samples;
+           weight *= 2) {
+        analyzer()->Reset();
+        AddSamplesHelper(analyzer(), value, weight, samples);
+        uint64_t expected_value =
+            value * TestStreamAnalyzerClient::result_scale;
+        EXPECT_EQ(expected_value, analyzer_->ComputeMean());
+        EXPECT_EQ(expected_value, analyzer_->ComputeRMS());
+        EXPECT_NEAR_SMR(analyzer_->ComputeSMR(), expected_value, weight);
+        EXPECT_DOUBLE_EQ(0, analyzer_->ComputeStdDev());
+        EXPECT_NEAR_VARIANCE_OF_ROOT(0, analyzer_->ComputeVarianceOfRoots(),
+                                     expected_value, weight);
+
+        // Verify values are forwarded to the WindowedAnalyzer.
+        EXPECT_EQ(expected_value, analyzer_->window().WorstMean().value);
+        EXPECT_EQ(expected_value, analyzer_->window().WorstRMS().value);
+        EXPECT_NEAR_SMR(expected_value, analyzer_->window().WorstSMR().value,
+                        weight);
+      }
+    }
+  }
+
+  // All min/max combinations of value and weight.
+  for (uint64_t value : {0u, 0xFFFFFFFFu}) {
+    for (uint64_t weight : {1u, 0xFFFFFFFFu}) {
+      const size_t kSamplesToAdd = weight == 1 ? 100 : 1;
+      analyzer()->Reset();
+      AddSamplesHelper(analyzer(), value, weight, kSamplesToAdd);
+
+      // TestWindowedAnalyzerClient scales the result by 2.
+      uint64_t expected_value = value * TestStreamAnalyzerClient::result_scale;
+      // Makes sure our precision is good enough.
+      EXPECT_EQ(expected_value, analyzer_->ComputeMean());
+      EXPECT_EQ(expected_value, analyzer_->ComputeRMS());
+      EXPECT_NEAR_SMR(expected_value, analyzer_->ComputeSMR(), weight);
+      EXPECT_DOUBLE_EQ(0, analyzer_->ComputeStdDev());
+      EXPECT_NEAR_VARIANCE_OF_ROOT(0, analyzer_->ComputeVarianceOfRoots(),
+                                   expected_value, weight);
+
+      // Verify values are forwarded to the WindowedAnalyzer.
+      EXPECT_EQ(expected_value, analyzer_->window().WorstMean().value);
+      EXPECT_EQ(expected_value, analyzer_->window().WorstRMS().value);
+      EXPECT_NEAR_SMR(expected_value, analyzer_->window().WorstSMR().value,
+                      weight);
+    }
+  }
+}
+
+// This applies a pattern of 2 values that are easy to calculate the expected
+// results for. It verifies the mean, rms, smr, standard deviation,
+// variance of the roots, and thresholds are calculated properly.
+// This doesn't check histogram or windowed analyzer related values since they
+// are tested separately and other unit tests verify their interactions
+// with StreamAnalyzer.
+TEST_F(StreamAnalyzerTest, AllResultsDifferent) {
+  const uint32_t kSampleWeight = 100;
+
+  const std::vector<uint32_t> pattern49 = {4, 9, 4, 9, 4, 9};
+  const std::vector<uint32_t> pattern4 = {4, 4, 4, 4, 4, 4};
+  const std::vector<uint32_t> pattern9 = {9, 9, 9, 9, 9, 9};
+
+  // Calculate the expected values for an equal number of 4's and 9's.
+  const double expected_mean = (4 + 9) * .5 * kFixedPointMultiplier *
+                               TestStreamAnalyzerClient::result_scale;
+  const double expected_rms = std::sqrt((16 + 81) * .5) *
+                              kFixedPointMultiplier *
+                              TestStreamAnalyzerClient::result_scale;
+  const double mean_root = (2 + 3) * .5;
+  const double expected_smr = mean_root * mean_root * kFixedPointMultiplier *
+                              TestStreamAnalyzerClient::result_scale;
+  const double expected_std_dev = (9 - 4) * .5 * kFixedPointMultiplier *
+                                  TestStreamAnalyzerClient::result_scale;
+  const double std_dev_of_roots = (3 - 2) * .5;
+  const double expected_variance_of_roots =
+      std_dev_of_roots * std_dev_of_roots * kFixedPointMultiplier *
+      TestStreamAnalyzerClient::result_scale;
+
+  std::vector<ThresholdResult> thresholds;
+
+  // Alternate 4 and 9.
+  for (size_t i = 0; i < 1000; i++) {
+    AddPatternHelper(&shared_client_, analyzer(), pattern49, kSampleWeight);
+    EXPECT_DOUBLE_EQ(expected_mean, analyzer_->ComputeMean());
+    EXPECT_NEAR_SMR(expected_smr, analyzer_->ComputeSMR(), kSampleWeight);
+    EXPECT_DOUBLE_EQ(expected_rms, analyzer_->ComputeRMS());
+    EXPECT_DOUBLE_EQ(expected_std_dev, analyzer_->ComputeStdDev());
+    EXPECT_DOUBLE_EQ(expected_variance_of_roots,
+                     analyzer_->ComputeVarianceOfRoots());
+  }
+  thresholds = analyzer_->ComputeThresholds();
+  ASSERT_EQ(3u, thresholds.size());
+  EXPECT_EQ(client_.TransformResult(thresholds_[0]), thresholds[0].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[1]), thresholds[1].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[2]), thresholds[2].threshold);
+  EXPECT_EQ(1.0, thresholds[0].ge_fraction);
+  EXPECT_EQ(0.5, thresholds[1].ge_fraction);
+  EXPECT_EQ(0.0, thresholds[2].ge_fraction);
+
+  // 4's then 9's.
+  analyzer()->Reset();
+  for (size_t i = 0; i < 500; i++) {
+    AddPatternHelper(&shared_client_, analyzer(), pattern4, kSampleWeight);
+  }
+  for (size_t i = 0; i < 500; i++) {
+    AddPatternHelper(&shared_client_, analyzer(), pattern9, kSampleWeight);
+  }
+  thresholds = analyzer_->ComputeThresholds();
+  EXPECT_DOUBLE_EQ(expected_mean, analyzer_->ComputeMean());
+  EXPECT_NEAR_SMR(expected_smr, analyzer_->ComputeSMR(), kSampleWeight);
+  EXPECT_DOUBLE_EQ(expected_rms, analyzer_->ComputeRMS());
+  EXPECT_DOUBLE_EQ(expected_std_dev, analyzer_->ComputeStdDev());
+  EXPECT_DOUBLE_EQ(expected_variance_of_roots,
+                   analyzer_->ComputeVarianceOfRoots());
+  EXPECT_EQ(client_.TransformResult(thresholds_[0]), thresholds[0].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[1]), thresholds[1].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[2]), thresholds[2].threshold);
+  EXPECT_EQ(1.0, thresholds[0].ge_fraction);
+  EXPECT_EQ(0.5, thresholds[1].ge_fraction);
+  EXPECT_EQ(0.0, thresholds[2].ge_fraction);
+
+  // 9's then 4's.
+  analyzer()->Reset();
+  for (size_t i = 0; i < 500; i++) {
+    AddPatternHelper(&shared_client_, analyzer(), pattern9, kSampleWeight);
+  }
+  for (size_t i = 0; i < 500; i++) {
+    AddPatternHelper(&shared_client_, analyzer(), pattern4, kSampleWeight);
+  }
+  thresholds = analyzer_->ComputeThresholds();
+  EXPECT_DOUBLE_EQ(expected_mean, analyzer_->ComputeMean());
+  EXPECT_NEAR_SMR(expected_smr, analyzer_->ComputeSMR(), kSampleWeight);
+  EXPECT_DOUBLE_EQ(expected_rms, analyzer_->ComputeRMS());
+  EXPECT_DOUBLE_EQ(expected_std_dev, analyzer_->ComputeStdDev());
+  EXPECT_DOUBLE_EQ(expected_variance_of_roots,
+                   analyzer_->ComputeVarianceOfRoots());
+  EXPECT_EQ(client_.TransformResult(thresholds_[0]), thresholds[0].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[1]), thresholds[1].threshold);
+  EXPECT_EQ(client_.TransformResult(thresholds_[2]), thresholds[2].threshold);
+  EXPECT_EQ(1.0, thresholds[0].ge_fraction);
+  EXPECT_EQ(0.5, thresholds[1].ge_fraction);
+  EXPECT_EQ(0.0, thresholds[2].ge_fraction);
+}
+
+TEST_F(StreamAnalyzerTest, SamplesForwardedToHistogram) {
+  const uint32_t kSampleWeight = 123;
+  const std::vector<uint32_t> pattern = {4, 9, 16, 25, 36, 49};
+  AddPatternHelper(&shared_client_, analyzer(), pattern, kSampleWeight);
+  std::vector<TestHistogram::ValueWeightPair> samples(
+      histogram_->GetAndResetAllAddedSamples());
+  ASSERT_EQ(pattern.size(), samples.size());
+  for (size_t i = 0; i < samples.size(); i++) {
+    EXPECT_EQ(pattern[i] * kFixedPointMultiplier, samples[i].value);
+    EXPECT_EQ(kSampleWeight, samples[i].weight);
+  }
+}
+
+TEST_F(StreamAnalyzerTest, PercentilesModifiedByClient) {
+  double result0 = 7;
+  double result1 = 11;
+  histogram_->SetResults({{result0, result1}});
+  PercentileResults results = analyzer()->ComputePercentiles();
+  EXPECT_EQ(client_.TransformResult(result0), results.values[0]);
+  EXPECT_EQ(client_.TransformResult(result1), results.values[1]);
+}
+
+// StreamAnalyzerNaive is a subset of stream analyzer that only uses single
+// precision floating point accumulators and can accumulate error.
+// This is used to verify patterns that accumulate error, so we can then verify
+// those patterns don't result in acculated error in the actual implementation.
+struct StreamAnalyzerNaive {
+  void AddSample(uint32_t value,
+                 uint32_t weight,
+                 uint64_t weighted_value,
+                 uint64_t weighted_root,
+                 const Accumulator96b& weighted_square) {
+    accumulator_ += static_cast<double>(weight) * value;
+    root_accumulator_ += static_cast<double>(weight) * std::sqrt(value);
+    square_accumulator_ += static_cast<double>(weight) * value * value;
+    total_weight_ += weight;
+  }
+
+  double ComputeMean() {
+    return client_.TransformResult(accumulator_ / total_weight_);
+  }
+  double ComputeRMS() {
+    return client_.TransformResult(
+        std::sqrt(square_accumulator_ / total_weight_));
+  }
+  double ComputeSMR() {
+    double mean_root = root_accumulator_ / total_weight_;
+    return client_.TransformResult(mean_root * mean_root);
+  }
+
+  float total_weight_ = 0;
+  float accumulator_ = 0;
+  float root_accumulator_ = 0;
+  float square_accumulator_ = 0;
+
+  TestStreamAnalyzerClient client_;
+};
+
+// Unlike the WindowedAnalyzer, there aren't patterns of input that would
+// affect the precision of our results very much with double precision floating
+// point accumulators. This is because we aren't subtracting values like the
+// WindowedAnalyzer does. Nevertheless, there can be issues if the accumulators
+// are only single precision.
+TEST_F(StreamAnalyzerTest, Precision) {
+  StreamAnalyzerNaive naive_analyzer;
+
+  uint32_t large_value = 20 * base::TimeTicks::kMicrosecondsPerSecond;
+  uint32_t large_weight = large_value;
+  size_t large_sample_count = 1;
+  AddSamplesHelper(&naive_analyzer, large_value, large_weight,
+                   large_sample_count);
+  AddSamplesHelper(analyzer(), large_value, large_weight, large_sample_count);
+
+  uint32_t small_value = 1 * base::TimeTicks::kMicrosecondsPerMillisecond;
+  uint32_t small_weight = small_value;
+  size_t small_sample_count = 60 * 60 * 60;  // 1hr of 60Hz frames.
+  AddSamplesHelper(&naive_analyzer, small_value, small_weight,
+                   small_sample_count);
+  AddSamplesHelper(analyzer(), small_value, small_weight, small_sample_count);
+
+  double total_weight = static_cast<double>(large_sample_count) * large_weight +
+                        static_cast<double>(small_sample_count) * small_weight;
+
+  double large_value_f = large_value;
+  double small_value_f = small_value;
+
+  double expected_mean = client_.TransformResult(
+      (large_value_f * large_weight +
+       small_sample_count * small_value_f * small_weight) /
+      total_weight);
+  EXPECT_ABS_LT(expected_mean * .001,
+                expected_mean - naive_analyzer.ComputeMean());
+  EXPECT_DOUBLE_EQ(expected_mean, analyzer_->ComputeMean());
+
+  double large_value_squared = large_value_f * large_value_f * large_weight;
+  double small_value_squared = small_value_f * small_value_f * small_weight;
+  double mean_square =
+      (large_value_squared + small_sample_count * small_value_squared) /
+      total_weight;
+  double expected_rms = client_.TransformResult(std::sqrt(mean_square));
+  EXPECT_ABS_LT(expected_rms * .001,
+                expected_rms - naive_analyzer.ComputeRMS());
+  EXPECT_DOUBLE_EQ(expected_rms, analyzer_->ComputeRMS());
+
+  double large_value_root = std::sqrt(large_value_f) * large_weight;
+  double small_value_root = std::sqrt(small_value_f) * small_weight;
+  double mean_root =
+      (large_value_root + small_sample_count * small_value_root) / total_weight;
+  double expected_smr = client_.TransformResult(mean_root * mean_root);
+  EXPECT_ABS_LT(expected_smr * .001,
+                expected_smr - naive_analyzer.ComputeSMR());
+  EXPECT_NEAR_SMR(expected_smr, analyzer_->ComputeSMR(), 1);
+}
+
+}  // namespace
+}  // namespace frame_metrics
+}  // namespace ui
diff --git a/ui/latency/windowed_analyzer.cc b/ui/latency/windowed_analyzer.cc
index 9473f57..50271cdd 100644
--- a/ui/latency/windowed_analyzer.cc
+++ b/ui/latency/windowed_analyzer.cc
@@ -24,6 +24,18 @@
 
 WindowedAnalyzer::~WindowedAnalyzer() = default;
 
+void WindowedAnalyzer::ResetWorstValues() {
+  results_.reset();
+}
+
+void WindowedAnalyzer::ResetHistory() {
+  total_weight_ = 0;
+  accumulator_ = 0;
+  root_accumulator_ = 0;
+  square_accumulator_ = Accumulator96b();
+  window_queue_.resize(0);
+}
+
 void WindowedAnalyzer::AddSample(uint32_t value,
                                  uint32_t weight,
                                  uint64_t weighted_value,
@@ -130,11 +142,5 @@
   state->EndDictionary();
 }
 
-void WindowedAnalyzer::ResetWorstValues() {
-  // Reset the worst windows, but not the current window history so that we
-  // don't lose coverage at the reset boundaries.
-  results_.reset();
-}
-
 }  // namespace frame_metrics
 }  // namespace ui
diff --git a/ui/latency/windowed_analyzer.h b/ui/latency/windowed_analyzer.h
index 1c0786a..cf03264 100644
--- a/ui/latency/windowed_analyzer.h
+++ b/ui/latency/windowed_analyzer.h
@@ -69,6 +69,14 @@
                    const SharedWindowedAnalyzerClient* shared_client);
   virtual ~WindowedAnalyzer();
 
+  // ResetWosrtValues only resets the memory of worst values encountered,
+  // without resetting recent sample history.
+  void ResetWorstValues();
+
+  // ResetHistory only resets recent sample history without resetting memory
+  // of the worst values ecnountered.
+  void ResetHistory();
+
   // Callers of AddSample will already have calculated weighted values to
   // track cumulative results, so just let them pass in the values here
   // rather than re-calculating them.
@@ -85,8 +93,6 @@
 
   void AsValueInto(base::trace_event::TracedValue* state) const;
 
-  void ResetWorstValues();
-
  protected:
   struct QueueEntry {
     uint32_t value = 0;
diff --git a/ui/latency/windowed_analyzer_unittest.cc b/ui/latency/windowed_analyzer_unittest.cc
index 01e32a9..7ae681d5 100644
--- a/ui/latency/windowed_analyzer_unittest.cc
+++ b/ui/latency/windowed_analyzer_unittest.cc
@@ -6,87 +6,12 @@
 
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
-
-// Some convenience macros for checking expected error.
-#define EXPECT_ABS_LT(a, b) EXPECT_LT(std::abs(a), std::abs(b))
-#define EXPECT_ABS_LE(a, b) EXPECT_LE(std::abs(a), std::abs(b))
-#define EXPECT_NEAR_SMR(expected, actual, weight) \
-  EXPECT_NEAR(expected, actual, MaxErrorSMR(expected, weight))
+#include "ui/latency/frame_metrics_test_common.h"
 
 namespace ui {
 namespace frame_metrics {
 namespace {
 
-// A simple client to verify it is actually used.
-class TestWindowedAnalyzerClient : public WindowedAnalyzerClient {
- public:
-  double TransformResult(double result) const override {
-    return result * result_scale;
-  }
-
-  static constexpr double result_scale = 2.0;
-};
-
-// The WindowedAnalyzer expects the caller to give it some precomputed values,
-// even though they are redundant. Precompute them with a helper function to
-// remove boilerplate.
-template <typename AnalyzerType>
-void AddSamplesHelper(AnalyzerType* analyzer,
-                      uint64_t value,
-                      uint64_t weight,
-                      size_t iterations) {
-  DCHECK_LE(value, std::numeric_limits<uint32_t>::max());
-  DCHECK_LE(weight, std::numeric_limits<uint32_t>::max());
-  uint64_t weighted_value = weight * value;
-  uint64_t weighted_root = weight * std::sqrt(value << kFixedPointRootShift);
-  Accumulator96b weighted_square(value, weight);
-  for (size_t i = 0; i < iterations; i++) {
-    analyzer->AddSample(value, weight, weighted_value, weighted_root,
-                        weighted_square);
-  }
-}
-
-// Moves the |shared_client|'s window forward in time by 1 microsecond and
-// adds all of the elements in |values| multipled by kFixedPointMultiplier.
-template <typename AnalyzerType>
-void AddPatternHelper(SharedWindowedAnalyzerClient* shared_client,
-                      AnalyzerType* analyzer,
-                      const std::vector<uint32_t>& values,
-                      const uint32_t weight) {
-  for (auto i : values) {
-    shared_client->window_begin += base::TimeDelta::FromMicroseconds(1);
-    shared_client->window_end += base::TimeDelta::FromMicroseconds(1);
-    AddSamplesHelper(analyzer, i * kFixedPointMultiplier, weight, 1);
-  }
-}
-
-// Same as AddPatternHelper, but uses each value (+1) as its own weight.
-// The "Cubed" name comes from the fact that the squared_accumulator
-// for the RMS will effectively be a "cubed accumulator".
-template <typename AnalyzerType>
-void AddCubedPatternHelper(SharedWindowedAnalyzerClient* shared_client,
-                           AnalyzerType* analyzer,
-                           const std::vector<uint32_t>& values) {
-  for (auto i : values) {
-    shared_client->window_begin += base::TimeDelta::FromMicroseconds(1);
-    shared_client->window_end += base::TimeDelta::FromMicroseconds(1);
-    // weight is i+1 to avoid divide by zero.
-    AddSamplesHelper(analyzer, i, i + 1, 1);
-  }
-}
-
-// Mean and RMS can be exact for most values, however SMR loses a bit of
-// precision internally when accumulating the roots. Make sure the SMR
-// precision is at least within .5 (i.e. rounded to the nearest integer
-// properly), or 8 decimal places if that is less precise.
-// When used with kFixedPointMultiplier, this gives us a total precision of
-// between ~5 and ~13 decimal places.
-// The precicion should be even better when the sample's |weight| > 1 since
-// the implementation should only do any rounding after scaling by weight.
-double MaxErrorSMR(double expected_value, uint64_t weight) {
-  return std::max(.5, 1e-8 * expected_value / weight);
-}
-
 // Verify that the worst values for Mean, SMR, and RMS are all the same if
 // every value added is the same. Makes for a nice sanity check.
 TEST(FrameMetricsWindowedAnalyzerTest, AllResultsTheSame) {
@@ -393,9 +318,10 @@
 }
 
 // WindowedAnalyzerNaive is a version of WindowedAnalyzer that doesn't use
-// fixed point math and can accumulate error. This is used to verify patterns
-// that accumulate error without fixed point math, so we can then verify those
-// patterns don't result in acculated error in the actual implementation.
+// fixed point math and can accumulate error, even with double precision
+// accumulators. This is used to verify patterns that accumulate error without
+// fixed point math, so we can then verify those patterns don't result in
+// acculated error in the actual implementation.
 class WindowedAnalyzerNaive {
  public:
   WindowedAnalyzerNaive(size_t max_window_size)