Automated commit: libchrome r1001756 uprev

Merge with upstream commit r28e048a54f729d1896ab0896c8578a2afe2ffeeb.
* set USE_RUNTIME_VLOG as 1 in base/logging_buildflags.h
  r1000043 (crrev.com/c/3334864) added the buildflag. If it is set to 0,
  vlog level will not be modifiable at runtime and will cause unittests
  of e.g. authpolicy and vpn-manager which test behavior dependent on
  changing vlog level to fail.

BUG=b:238297537
TEST=CQ

Change-Id: I5ad057dbe75f2cd7b630bd0e27e941b1d1077a20
diff --git a/BASE_VER b/BASE_VER
index 4d3434d..01502f5 100644
--- a/BASE_VER
+++ b/BASE_VER
@@ -1 +1 @@
-999946
+1001756
diff --git a/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc b/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
index 719c393..74767d1 100644
--- a/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -95,7 +95,17 @@
   T* GetSlowPath();
 
   std::atomic<T*> instance_;
+  // Before C++20, having an initializer here causes a "variable does not have a
+  // constant initializer" error.  In C++20, omitting it causes a similar error.
+  // Presumably this is due to the C++20 changes to make atomic initialization
+  // (of the other members of this class) sane, so guarding under that
+  // feature-test.
+#if !defined(__cpp_lib_atomic_value_initialization) || \
+    __cpp_lib_atomic_value_initialization < 201911L
   alignas(T) uint8_t instance_buffer_[sizeof(T)];
+#else
+  alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
+#endif
   std::atomic<bool> initialization_lock_;
 };
 
@@ -241,13 +251,13 @@
   // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
   // instance) directly call PartitionFree(), so there is no risk of
   // mismatch. (see below the default_dispatch definition).
-  if (alignment <= base::kAlignment) {
+  if (alignment <= partition_alloc::internal::kAlignment) {
     // This is mandated by |posix_memalign()| and friends, so should never fire.
     PA_CHECK(base::bits::IsPowerOfTwo(alignment));
     // TODO(bartekn): See if the compiler optimizes branches down the stack on
     // Mac, where PartitionPageSize() isn't constexpr.
-    return Allocator()->AllocWithFlagsNoHooks(0, size,
-                                              base::PartitionPageSize());
+    return Allocator()->AllocWithFlagsNoHooks(
+        0, size, partition_alloc::PartitionPageSize());
   }
 
   return AlignedAllocator()->AlignedAllocWithFlags(
@@ -286,7 +296,8 @@
 void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
   ScopedDisallowAllocations guard{};
   return Allocator()->AllocWithFlagsNoHooks(
-      0 | g_alloc_flags, MaybeAdjustSize(size), PartitionPageSize());
+      0 | g_alloc_flags, MaybeAdjustSize(size),
+      partition_alloc::PartitionPageSize());
 }
 
 void* PartitionMallocUnchecked(const AllocatorDispatch*,
@@ -295,7 +306,7 @@
   ScopedDisallowAllocations guard{};
   return Allocator()->AllocWithFlagsNoHooks(
       partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
-      MaybeAdjustSize(size), PartitionPageSize());
+      MaybeAdjustSize(size), partition_alloc::PartitionPageSize());
 }
 
 void* PartitionCalloc(const AllocatorDispatch*,
@@ -306,7 +317,7 @@
   const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
   return Allocator()->AllocWithFlagsNoHooks(
       partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
-      PartitionPageSize());
+      partition_alloc::PartitionPageSize());
 }
 
 void* PartitionMemalign(const AllocatorDispatch*,
@@ -575,7 +586,7 @@
     }
     PA_DCHECK(!enable_brp);
     PA_DCHECK(!use_dedicated_aligned_partition);
-    PA_DCHECK(!current_root->with_thread_cache);
+    PA_DCHECK(!current_root->flags.with_thread_cache);
     return;
   }
 
diff --git a/base/allocator/allocator_shim_unittest.cc b/base/allocator/allocator_shim_unittest.cc
index 270cdd3..0db78d9 100644
--- a/base/allocator/allocator_shim_unittest.cc
+++ b/base/allocator/allocator_shim_unittest.cc
@@ -263,9 +263,9 @@
     // TODO(crbug.com/1077271): 64-bit iOS uses a page size that is larger than
     // SystemPageSize(), causing this test to make larger allocations, relative
     // to SystemPageSize().
-    return 6 * base::SystemPageSize();
+    return 6 * partition_alloc::internal::SystemPageSize();
 #else
-    return 2 * base::SystemPageSize();
+    return 2 * partition_alloc::internal::SystemPageSize();
 #endif
   }
 
diff --git a/base/allocator/partition_allocator/PartitionAlloc.md b/base/allocator/partition_allocator/PartitionAlloc.md
index df65931..a531f4f 100644
--- a/base/allocator/partition_allocator/PartitionAlloc.md
+++ b/base/allocator/partition_allocator/PartitionAlloc.md
@@ -25,7 +25,7 @@
 geometrically-spaced, and go all the way up to `kMaxBucketed`, which is a tad
 under 1MiB (so called *normal buckets*). There are tens of buckets, 4 between
 each power of two (except for lower sizes where buckets that aren't a multiple
-of `base::kAlignment` simply don't exist).
+of `partition_alloc::internal::kAlignment` simply don't exist).
 
 Larger allocations (&gt;`kMaxBucketed`) are realized by direct memory mapping
 (*direct map*).
@@ -85,7 +85,8 @@
 ### Alignment
 
 PartitionAlloc guarantees that returned pointers are aligned on
-`base::kAlignment` boundary (typically 16B on 64-bit systems, and 8B on 32-bit).
+`partition_alloc::internal::kAlignment` boundary (typically 16B on
+64-bit systems, and 8B on 32-bit).
 
 PartitionAlloc also supports higher levels of alignment, that can be requested
 via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
@@ -94,7 +95,8 @@
 up the requested size to the nearest power of two, greater than or equal to the
 requested alignment. This may be wasteful, but allows taking advantage of
 natural PartitionAlloc alignment guarantees. Allocations with an alignment
-requirement greater than `base::kAlignment` are expected to be very rare.
+requirement greater than `partition_alloc::internal::kAlignment` are expected
+to be very rare.
 
 ## PartitionAlloc-Everywhere
 
diff --git a/base/allocator/partition_allocator/extended_api.cc b/base/allocator/partition_allocator/extended_api.cc
index 8b12add..444c371 100644
--- a/base/allocator/partition_allocator/extended_api.cc
+++ b/base/allocator/partition_allocator/extended_api.cc
@@ -17,11 +17,11 @@
 void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
   // Some platforms don't have a thread cache, or it could already have been
   // disabled.
-  if (!root || !root->with_thread_cache)
+  if (!root || !root->flags.with_thread_cache)
     return;
 
   ThreadCacheRegistry::Instance().PurgeAll();
-  root->with_thread_cache = false;
+  root->flags.with_thread_cache = false;
   // Doesn't destroy the thread cache object(s). For background threads, they
   // will be collected (and free cached memory) at thread destruction
   // time. For the main thread, we leak it.
@@ -31,7 +31,7 @@
     ThreadSafePartitionRoot* root) {
   if (!root)
     return;
-  root->with_thread_cache = true;
+  root->flags.with_thread_cache = true;
 }
 
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
diff --git a/base/allocator/partition_allocator/page_allocator_constants.h b/base/allocator/partition_allocator/page_allocator_constants.h
index 0a996cf..2ab8fc1 100644
--- a/base/allocator/partition_allocator/page_allocator_constants.h
+++ b/base/allocator/partition_allocator/page_allocator_constants.h
@@ -163,21 +163,4 @@
 
 }  // namespace partition_alloc::internal
 
-namespace base {
-
-// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
-// the migration to the new namespaces gets done.
-using ::partition_alloc::internal::kPageMetadataShift;
-using ::partition_alloc::internal::kPageMetadataSize;
-using ::partition_alloc::internal::PageAllocationGranularity;
-using ::partition_alloc::internal::PageAllocationGranularityBaseMask;
-using ::partition_alloc::internal::PageAllocationGranularityOffsetMask;
-using ::partition_alloc::internal::PageAllocationGranularityShift;
-using ::partition_alloc::internal::SystemPageBaseMask;
-using ::partition_alloc::internal::SystemPageOffsetMask;
-using ::partition_alloc::internal::SystemPageShift;
-using ::partition_alloc::internal::SystemPageSize;
-
-}  // namespace base
-
 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.h b/base/allocator/partition_allocator/page_allocator_internals_posix.h
index 364b46e..2a72e81 100644
--- a/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -14,9 +14,9 @@
 
 #include "base/allocator/partition_allocator/oom.h"
 #include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
 #include "base/allocator/partition_allocator/partition_alloc_check.h"
 #include "base/dcheck_is_on.h"
-#include "base/posix/eintr_wrapper.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_APPLE)
@@ -197,8 +197,8 @@
     uintptr_t address,
     size_t length,
     PageAccessibilityConfiguration accessibility) {
-  return 0 == HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
-                                    GetAccessFlags(accessibility)));
+  return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
+                                       GetAccessFlags(accessibility)));
 }
 
 void SetSystemPagesAccessInternal(
@@ -206,7 +206,7 @@
     size_t length,
     PageAccessibilityConfiguration accessibility) {
   int access_flags = GetAccessFlags(accessibility);
-  const int ret = HANDLE_EINTR(
+  const int ret = PA_HANDLE_EINTR(
       mprotect(reinterpret_cast<void*>(address), length, access_flags));
 
   // On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h b/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h
new file mode 100644
index 0000000..ecaca01
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h
@@ -0,0 +1,35 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_
+
+#include <functional>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+
+namespace partition_alloc::internal::base {
+
+// C++14 implementation of C++17's std::clamp():
+// https://en.cppreference.com/w/cpp/algorithm/clamp
+// Please note that the C++ spec makes it undefined behavior to call std::clamp
+// with a value of `lo` that compares greater than the value of `hi`. This
+// implementation uses a CHECK to enforce this as a hard restriction.
+template <typename T, typename Compare>
+constexpr const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) {
+  PA_CHECK(!comp(hi, lo));
+  return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
+}
+
+template <typename T>
+constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
+  return base::clamp(v, lo, hi, std::less<T>{});
+}
+
+}  // namespace partition_alloc::internal::base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports_pa_unittest.cc b/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports_pa_unittest.cc
new file mode 100644
index 0000000..16a822c
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc_base/cxx17_backports_pa_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
+
+#include <array>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "base/test/gtest_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace partition_alloc::internal::base {
+namespace {
+
+struct OneType {
+  int some_int;
+};
+
+bool operator<(const OneType& lhs, const OneType& rhs) {
+  return lhs.some_int < rhs.some_int;
+}
+
+bool operator==(const OneType& lhs, const OneType& rhs) {
+  return lhs.some_int == rhs.some_int;
+}
+
+struct AnotherType {
+  int some_other_int;
+};
+
+bool operator==(const AnotherType& lhs, const AnotherType& rhs) {
+  return lhs.some_other_int == rhs.some_other_int;
+}
+
+TEST(Cxx17BackportTest, Clamp) {
+  EXPECT_EQ(0, base::clamp(-5, 0, 10));
+  EXPECT_EQ(0, base::clamp(0, 0, 10));
+  EXPECT_EQ(3, base::clamp(3, 0, 10));
+  EXPECT_EQ(10, base::clamp(10, 0, 10));
+  EXPECT_EQ(10, base::clamp(15, 0, 10));
+
+  EXPECT_EQ(0.0, base::clamp(-5.0, 0.0, 10.0));
+  EXPECT_EQ(0.0, base::clamp(0.0, 0.0, 10.0));
+  EXPECT_EQ(3.0, base::clamp(3.0, 0.0, 10.0));
+  EXPECT_EQ(10.0, base::clamp(10.0, 0.0, 10.0));
+  EXPECT_EQ(10.0, base::clamp(15.0, 0.0, 10.0));
+
+  EXPECT_EQ(0, base::clamp(-5, 0, 0));
+  EXPECT_EQ(0, base::clamp(0, 0, 0));
+  EXPECT_EQ(0, base::clamp(3, 0, 0));
+
+  OneType one_type_neg5{-5};
+  OneType one_type_0{0};
+  OneType one_type_3{3};
+  OneType one_type_10{10};
+  OneType one_type_15{15};
+
+  EXPECT_EQ(one_type_0, base::clamp(one_type_neg5, one_type_0, one_type_10));
+  EXPECT_EQ(one_type_0, base::clamp(one_type_0, one_type_0, one_type_10));
+  EXPECT_EQ(one_type_3, base::clamp(one_type_3, one_type_0, one_type_10));
+  EXPECT_EQ(one_type_10, base::clamp(one_type_10, one_type_0, one_type_10));
+  EXPECT_EQ(one_type_10, base::clamp(one_type_15, one_type_0, one_type_10));
+
+  AnotherType another_type_neg5{-5};
+  AnotherType another_type_0{0};
+  AnotherType another_type_3{3};
+  AnotherType another_type_10{10};
+  AnotherType another_type_15{15};
+
+  auto compare_another_type = [](const auto& lhs, const auto& rhs) {
+    return lhs.some_other_int < rhs.some_other_int;
+  };
+
+  EXPECT_EQ(another_type_0, base::clamp(another_type_neg5, another_type_0,
+                                        another_type_10, compare_another_type));
+  EXPECT_EQ(another_type_0, base::clamp(another_type_0, another_type_0,
+                                        another_type_10, compare_another_type));
+  EXPECT_EQ(another_type_3, base::clamp(another_type_3, another_type_0,
+                                        another_type_10, compare_another_type));
+  EXPECT_EQ(another_type_10,
+            base::clamp(another_type_10, another_type_0, another_type_10,
+                        compare_another_type));
+  EXPECT_EQ(another_type_10,
+            base::clamp(another_type_15, another_type_0, another_type_10,
+                        compare_another_type));
+
+  EXPECT_CHECK_DEATH(base::clamp(3, 10, 0));
+  EXPECT_CHECK_DEATH(base::clamp(3.0, 10.0, 0.0));
+  EXPECT_CHECK_DEATH(base::clamp(one_type_3, one_type_10, one_type_0));
+  EXPECT_CHECK_DEATH(base::clamp(another_type_3, another_type_10,
+                                 another_type_0, compare_another_type));
+}
+
+}  // namespace
+}  // namespace partition_alloc::internal::base
diff --git a/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc
index 9659202..2d002e7 100644
--- a/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc
+++ b/base/allocator/partition_allocator/partition_alloc_base/files/file_util_posix.cc
@@ -4,7 +4,7 @@
 
 #include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
 
-#include "base/posix/eintr_wrapper.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
 
 namespace partition_alloc::internal::base {
 
@@ -12,7 +12,7 @@
   size_t total_read = 0;
   while (total_read < bytes) {
     ssize_t bytes_read =
-        HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
+        PA_HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
     if (bytes_read <= 0)
       break;
     total_read += bytes_read;
diff --git a/base/allocator/partition_allocator/partition_alloc_base/logging.cc b/base/allocator/partition_allocator/partition_alloc_base/logging.cc
index 9170e6e..ef6cb48 100644
--- a/base/allocator/partition_allocator/partition_alloc_base/logging.cc
+++ b/base/allocator/partition_allocator/partition_alloc_base/logging.cc
@@ -61,7 +61,7 @@
 #include <string>
 
 #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
-#include "base/posix/eintr_wrapper.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
 
 #if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
 #include "base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h"
@@ -90,7 +90,8 @@
   size_t bytes_written = 0;
   int rv;
   while (bytes_written < length) {
-    rv = HANDLE_EINTR(write(fd, data + bytes_written, length - bytes_written));
+    rv = PA_HANDLE_EINTR(
+        write(fd, data + bytes_written, length - bytes_written));
     if (rv < 0) {
       // Give up, nothing we can do now.
       break;
@@ -266,7 +267,7 @@
     if (message_len > 0 && message[message_len - 1] != '\n') {
       int rv;
       do {
-        rv = HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
+        rv = PA_HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
         if (rv < 0) {
           // Give up, nothing we can do now.
           break;
diff --git a/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h b/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h
new file mode 100644
index 0000000..f15d06d
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows and Fuchsia, this wrapper macro does nothing because there are no
+// signals.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_POSIX)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define PA_HANDLE_EINTR(x)                                  \
+  ({                                                        \
+    decltype(x) eintr_wrapper_result;                       \
+    do {                                                    \
+      eintr_wrapper_result = (x);                           \
+    } while (eintr_wrapper_result == -1 && errno == EINTR); \
+    eintr_wrapper_result;                                   \
+  })
+
+#else
+
+#define PA_HANDLE_EINTR(x)                                   \
+  ({                                                         \
+    int eintr_wrapper_counter = 0;                           \
+    decltype(x) eintr_wrapper_result;                        \
+    do {                                                     \
+      eintr_wrapper_result = (x);                            \
+    } while (eintr_wrapper_result == -1 && errno == EINTR && \
+             eintr_wrapper_counter++ < 100);                 \
+    eintr_wrapper_result;                                    \
+  })
+
+#endif  // NDEBUG
+
+#else  // !BUILDFLAG(IS_POSIX)
+
+#define PA_HANDLE_EINTR(x) (x)
+
+#endif  // !BUILDFLAG(IS_POSIX)
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc b/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
index 975d701..ea8dde4 100644
--- a/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
+++ b/base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
@@ -13,9 +13,9 @@
 
 #include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
 #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
 #include "base/check.h"
 #include "base/compiler_specific.h"
-#include "base/posix/eintr_wrapper.h"
 #include "build/build_config.h"
 
 #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
@@ -41,7 +41,7 @@
 // we can use a static-local variable to handle opening it on the first access.
 class URandomFd {
  public:
-  URandomFd() : fd_(HANDLE_EINTR(open("/dev/urandom", kOpenFlags))) {
+  URandomFd() : fd_(PA_HANDLE_EINTR(open("/dev/urandom", kOpenFlags))) {
     CHECK(fd_ >= 0) << "Cannot open /dev/urandom";
   }
 
@@ -72,7 +72,7 @@
   // We have to call `getrandom` via Linux Syscall Support, rather than through
   // the libc wrapper, because we might not have an up-to-date libc (e.g. on
   // some bots).
-  const ssize_t r = HANDLE_EINTR(sys_getrandom(output, output_length, 0));
+  const ssize_t r = PA_HANDLE_EINTR(sys_getrandom(output, output_length, 0));
 
   // Return success only on total success. In case errno == ENOSYS (or any other
   // error), we'll fall through to reading from urandom below.
diff --git a/base/allocator/partition_allocator/partition_alloc_constants.h b/base/allocator/partition_allocator/partition_alloc_constants.h
index c029d0c..5f374c4 100644
--- a/base/allocator/partition_allocator/partition_alloc_constants.h
+++ b/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -23,7 +23,7 @@
 
 namespace partition_alloc {
 
-// Bit flag constants used at `flag` argument of PartitionRoot::AllocWithFlags,
+// Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
 // AlignedAllocWithFlags, etc.
 struct AllocFlags {
   // In order to support bit operations like `flag_a | flag_b`, the old-
@@ -31,12 +31,16 @@
   enum : int {
     kReturnNull = 1 << 0,
     kZeroFill = 1 << 1,
-    kNoHooks = 1 << 2,  // Internal only.
+    // Don't allow allocation override hooks. Override hooks are expected to
+    // check for the presence of this flag and return false if it is active.
+    kNoOverrideHooks = 1 << 2,
+    // Don't allow any hooks (override or observers).
+    kNoHooks = 1 << 3,  // Internal only.
     // If the allocation requires a "slow path" (such as allocating/committing a
     // new slot span), return nullptr instead. Note this makes all large
     // allocations return nullptr, such as direct-mapped ones, and even for
     // smaller ones, a nullptr value is common.
-    kFastPathOrReturnNull = 1 << 3,  // Internal only.
+    kFastPathOrReturnNull = 1 << 4,  // Internal only.
 
     kLastFlag = kFastPathOrReturnNull
   };
@@ -425,6 +429,10 @@
 
 }  // namespace internal
 
+// This function is used often enough to be worth publicizing outside the
+// `internal` namespace.
+using ::partition_alloc::internal::PartitionPageSize;
+
 }  // namespace partition_alloc
 
 namespace base {
@@ -482,7 +490,6 @@
 using ::partition_alloc::internal::PartitionPageBaseMask;
 using ::partition_alloc::internal::PartitionPageOffsetMask;
 using ::partition_alloc::internal::PartitionPageShift;
-using ::partition_alloc::internal::PartitionPageSize;
 
 }  // namespace base
 
diff --git a/base/allocator/partition_allocator/partition_alloc_forward.h b/base/allocator/partition_allocator/partition_alloc_forward.h
index f591b11..d848c6e 100644
--- a/base/allocator/partition_allocator/partition_alloc_forward.h
+++ b/base/allocator/partition_allocator/partition_alloc_forward.h
@@ -67,7 +67,6 @@
 using ::partition_alloc::PartitionRoot;
 using ::partition_alloc::PartitionStatsDumper;
 using ::partition_alloc::ThreadSafePartitionRoot;
-using ::partition_alloc::internal::kAlignment;
 
 }  // namespace base
 
diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 1ade33e..3658087 100644
--- a/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -908,13 +908,13 @@
 
   // To make both alloc(x + 1) and alloc(x + kSmallestBucket) to allocate from
   // the same bucket, partition_alloc::internal::base::bits::AlignUp(1 + x +
-  // kExtraAllocSize, base::kAlignment)
+  // kExtraAllocSize, kAlignment)
   // == partition_alloc::internal::base::bits::AlignUp(kSmallestBucket + x +
-  // kExtraAllocSize, base::kAlignment), because slot_size is multiples of
-  // base::kAlignment. So (x + kExtraAllocSize) must be multiples of
-  // base::kAlignment. x =
+  // kExtraAllocSize, kAlignment), because slot_size is multiples of
+  // kAlignment. So (x + kExtraAllocSize) must be multiples of
+  // kAlignment. x =
   // partition_alloc::internal::base::bits::AlignUp(kExtraAllocSize,
-  // base::kAlignment) - kExtraAllocSize;
+  // kAlignment) - kExtraAllocSize;
   size_t base_size = partition_alloc::internal::base::bits::AlignUp(
                          kExtraAllocSize, kAlignment) -
                      kExtraAllocSize;
@@ -3221,7 +3221,7 @@
 
 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
     // The capacity(C) is slot size - kExtraAllocSize.
-    // Since slot size is multiples of base::kAlignment,
+    // Since slot size is multiples of kAlignment,
     // C % kAlignment == (slot_size - kExtraAllocSize) % kAlignment.
     // C % kAlignment == (-kExtraAllocSize) % kAlignment.
     // Since kCookieSize is a multiple of kAlignment,
diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h
index 7bb6be1..9a5c339 100644
--- a/base/allocator/partition_allocator/partition_page.h
+++ b/base/allocator/partition_allocator/partition_page.h
@@ -117,8 +117,9 @@
 //   booted out of the active list. If there are no suitable active slot spans
 //   found, an empty or decommitted slot spans (if one exists) will be pulled
 //   from the empty/decommitted list on to the active list.
+#pragma pack(push, 1)
 template <bool thread_safe>
-struct __attribute__((packed)) SlotSpanMetadata {
+struct SlotSpanMetadata {
  private:
   PartitionFreelistEntry* freelist_head = nullptr;
 
@@ -302,6 +303,7 @@
         empty_cache_index_(0),
         unused2_(0) {}
 };
+#pragma pack(pop)
 static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
               "SlotSpanMetadata must fit into a Page Metadata slot.");
 
@@ -324,11 +326,12 @@
 // first page of a slot span, describes that slot span. If a slot span spans
 // more than 1 page, the page metadata may contain rudimentary additional
 // information.
+// "Pack" the union so that common page metadata still fits within
+// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
+#pragma pack(push, 1)
 template <bool thread_safe>
-struct __attribute__((packed)) PartitionPage {
-  // "Pack" the union so that common page metadata still fits within
-  // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
-  union __attribute__((packed)) {
+struct PartitionPage {
+  union {
     SlotSpanMetadata<thread_safe> slot_span_metadata;
 
     SubsequentPageMetadata subsequent_page_metadata;
@@ -366,7 +369,7 @@
 
   ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
 };
-
+#pragma pack(pop)
 static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
               "PartitionPage must be able to fit in a metadata slot");
 
@@ -617,7 +620,7 @@
   PA_DCHECK((::partition_alloc::internal::UnmaskPtr(object_addr) -
              ::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
                 slot_span->bucket->slot_size ==
-            root->extras_offset);
+            root->flags.extras_offset);
 #endif  // DCHECK_IS_ON()
   return slot_span;
 }
@@ -637,10 +640,10 @@
   auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
   uintptr_t shift_from_slot_start =
       (address - slot_span_start) % slot_span->bucket->slot_size;
-  PA_DCHECK(shift_from_slot_start >= root->extras_offset);
+  PA_DCHECK(shift_from_slot_start >= root->flags.extras_offset);
   // Use <= to allow an address immediately past the object.
   PA_DCHECK(shift_from_slot_start <=
-            root->extras_offset + slot_span->GetUsableSize(root));
+            root->flags.extras_offset + slot_span->GetUsableSize(root));
 #endif  // DCHECK_IS_ON()
   return slot_span;
 }
diff --git a/base/allocator/partition_allocator/partition_root.cc b/base/allocator/partition_allocator/partition_root.cc
index c15402c..97e11e8 100644
--- a/base/allocator/partition_allocator/partition_root.cc
+++ b/base/allocator/partition_allocator/partition_root.cc
@@ -631,7 +631,7 @@
   // We need to destruct the thread cache before we unreserve any of the super
   // pages below, which we currently are not doing. So, we should only call
   // this function on PartitionRoots without a thread cache.
-  PA_CHECK(!with_thread_cache);
+  PA_CHECK(!flags.with_thread_cache);
   auto pool_handle = ChoosePool();
   auto* curr = first_extent;
   while (curr != nullptr) {
@@ -674,24 +674,24 @@
     internal::PartitionAddressSpace::Init();
 #endif
 
-    allow_aligned_alloc =
+    flags.allow_aligned_alloc =
         opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
-    allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
+    flags.allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
 #if BUILDFLAG(USE_BACKUP_REF_PTR)
-    brp_enabled_ =
+    flags.brp_enabled_ =
         opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
 #else
     PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
 #endif
-    use_configurable_pool =
+    flags.use_configurable_pool =
         (opts.use_configurable_pool ==
          PartitionOptions::UseConfigurablePool::kIfAvailable) &&
         IsConfigurablePoolAvailable();
-    PA_DCHECK(!use_configurable_pool || IsConfigurablePoolAvailable());
+    PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
 
     // brp_enabled_() is not supported in the configurable pool because
     // BRP requires objects to be in a different Pool.
-    PA_CHECK(!(use_configurable_pool && brp_enabled()));
+    PA_CHECK(!(flags.use_configurable_pool && brp_enabled()));
 
     // Ref-count messes up alignment needed for AlignedAlloc, making this
     // option incompatible. However, except in the
@@ -701,28 +701,28 @@
 #endif
 
 #if defined(PA_EXTRAS_REQUIRED)
-    extras_size = 0;
-    extras_offset = 0;
+    flags.extras_size = 0;
+    flags.extras_offset = 0;
 
-    if (allow_cookie) {
-      extras_size += internal::kPartitionCookieSizeAdjustment;
+    if (flags.allow_cookie) {
+      flags.extras_size += internal::kPartitionCookieSizeAdjustment;
     }
 
     if (brp_enabled()) {
       // TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
       // stored out-of-line for single-slot slot spans, so no need to
       // add/subtract its size in this case.
-      extras_size += internal::kPartitionRefCountSizeAdjustment;
-      extras_offset += internal::kPartitionRefCountOffsetAdjustment;
+      flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
+      flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
     }
 #endif  //  defined(PA_EXTRAS_REQUIRED)
 
     // Re-confirm the above PA_CHECKs, by making sure there are no
     // pre-allocation extras when AlignedAlloc is allowed. Post-allocation
     // extras are ok.
-    PA_CHECK(!allow_aligned_alloc || !extras_offset);
+    PA_CHECK(!flags.allow_aligned_alloc || !flags.extras_offset);
 
-    quarantine_mode =
+    flags.quarantine_mode =
 #if defined(PA_ALLOW_PCSCAN)
         (opts.quarantine == PartitionOptions::Quarantine::kDisallowed
              ? QuarantineMode::kAlwaysDisabled
@@ -763,10 +763,10 @@
     with_thread_cache = false;
 #else
     ThreadCache::EnsureThreadSpecificDataInitialized();
-    with_thread_cache =
+    flags.with_thread_cache =
         (opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
 
-    if (with_thread_cache)
+    if (flags.with_thread_cache)
       ThreadCache::Init(this);
 #endif  // !defined(PA_THREAD_CACHE_SUPPORTED)
 
@@ -786,7 +786,7 @@
 template <bool thread_safe>
 PartitionRoot<thread_safe>::~PartitionRoot() {
 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
-  PA_CHECK(!with_thread_cache)
+  PA_CHECK(!flags.with_thread_cache)
       << "Must not destroy a partition with a thread cache";
 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
 
@@ -800,7 +800,7 @@
 void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
 #if defined(PA_THREAD_CACHE_SUPPORTED)
   ::partition_alloc::internal::ScopedGuard guard{lock_};
-  PA_CHECK(!with_thread_cache);
+  PA_CHECK(!flags.with_thread_cache);
   // By the time we get there, there may be multiple threads created in the
   // process. Since `with_thread_cache` is accessed without a lock, it can
   // become visible to another thread before the effects of
@@ -815,7 +815,7 @@
   PA_CHECK(before == 0);
   ThreadCache::Init(this);
   thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
-  with_thread_cache = true;
+  flags.with_thread_cache = true;
 #endif  // defined(PA_THREAD_CACHE_SUPPORTED)
 }
 
@@ -916,7 +916,7 @@
 
 #if DCHECK_IS_ON()
   // Write a new trailing cookie.
-  if (allow_cookie) {
+  if (flags.allow_cookie) {
     auto* object =
         reinterpret_cast<unsigned char*>(SlotStartToObject(slot_start));
     internal::PartitionCookieWriteValue(object +
@@ -965,7 +965,7 @@
 #if DCHECK_IS_ON()
     // Write a new trailing cookie only when it is possible to keep track
     // raw size (otherwise we wouldn't know where to look for it later).
-    if (allow_cookie) {
+    if (flags.allow_cookie) {
       internal::PartitionCookieWriteValue(
           reinterpret_cast<unsigned char*>(address) +
           slot_span->GetUsableSize(this));
@@ -1204,7 +1204,7 @@
     stats.total_active_bytes += direct_mapped_allocations_total_size;
     stats.total_active_count += num_direct_mapped_allocations;
 
-    stats.has_thread_cache = with_thread_cache;
+    stats.has_thread_cache = flags.with_thread_cache;
     if (stats.has_thread_cache) {
       ThreadCacheRegistry::Instance().DumpStats(
           true, &stats.current_thread_cache_stats);
@@ -1241,9 +1241,9 @@
 template <bool thread_safe>
 void PartitionRoot<thread_safe>::DeleteForTesting(
     PartitionRoot* partition_root) {
-  if (partition_root->with_thread_cache) {
+  if (partition_root->flags.with_thread_cache) {
     ThreadCache::SwapForTesting(nullptr);
-    partition_root->with_thread_cache = false;
+    partition_root->flags.with_thread_cache = false;
   }
 
   delete partition_root;
diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h
index e6a2768..f099676 100644
--- a/base/allocator/partition_allocator/partition_root.h
+++ b/base/allocator/partition_allocator/partition_root.h
@@ -138,7 +138,7 @@
 // Options struct used to configure PartitionRoot and PartitionAllocator.
 struct PartitionOptions {
   enum class AlignedAlloc : uint8_t {
-    // By default all allocations will be aligned to `base::kAlignment`,
+    // By default all allocations will be aligned to `kAlignment`,
     // likely to be 8B or 16B depending on platforms and toolchains.
     // AlignedAlloc() allows to enforce higher alignment.
     // This option determines whether it is supported for the partition.
@@ -219,42 +219,42 @@
     kEnabled,
   };
 
-#if !defined(PA_EXTRAS_REQUIRED)
-  // Teach the compiler that code can be optimized in builds that use no
-  // extras.
-  static inline constexpr uint32_t extras_size = 0;
-  static inline constexpr uint32_t extras_offset = 0;
-#endif  // !defined(PA_EXTRAS_REQUIRED)
+  // Flags accessed on fast paths.
+  //
+  // Careful! PartitionAlloc's performance is sensitive to its layout.  Please
+  // put the fast-path objects in the struct below, and the other ones after
+  // the union..
+  struct Flags {
+    // Defines whether objects should be quarantined for this root.
+    QuarantineMode quarantine_mode;
+
+    // Defines whether the root should be scanned.
+    ScanMode scan_mode;
+
+    bool with_thread_cache = false;
+    bool with_denser_bucket_distribution = false;
+
+    bool allow_aligned_alloc;
+    bool allow_cookie;
+#if BUILDFLAG(USE_BACKUP_REF_PTR)
+    bool brp_enabled_;
+#endif
+    bool use_configurable_pool;
+
+#if defined(PA_EXTRAS_REQUIRED)
+    uint32_t extras_size;
+    uint32_t extras_offset;
+#else
+    // Teach the compiler that code can be optimized in builds that use no
+    // extras.
+    static inline constexpr uint32_t extras_size = 0;
+    static inline constexpr uint32_t extras_offset = 0;
+#endif  // defined(PA_EXTRAS_REQUIRED)
+  };
 
   // Read-mostly flags.
   union {
-    // Flags accessed on fast paths.
-    //
-    // Careful! PartitionAlloc's performance is sensitive to its layout.  Please
-    // put the fast-path objects in the struct below, and the other ones after
-    // the union..
-    struct {
-      // Defines whether objects should be quarantined for this root.
-      QuarantineMode quarantine_mode;
-
-      // Defines whether the root should be scanned.
-      ScanMode scan_mode;
-
-      bool with_thread_cache = false;
-      bool with_denser_bucket_distribution = false;
-
-      bool allow_aligned_alloc;
-      bool allow_cookie;
-#if BUILDFLAG(USE_BACKUP_REF_PTR)
-      bool brp_enabled_;
-#endif
-      bool use_configurable_pool;
-
-#if defined(PA_EXTRAS_REQUIRED)
-      uint32_t extras_size;
-      uint32_t extras_offset;
-#endif  // defined(PA_EXTRAS_REQUIRED)
-    };
+    Flags flags;
 
     // The flags above are accessed for all (de)allocations, and are mostly
     // read-only. They should not share a cacheline with the data below, which
@@ -343,8 +343,8 @@
 #endif  // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
 
   PartitionRoot()
-      : quarantine_mode(QuarantineMode::kAlwaysDisabled),
-        scan_mode(ScanMode::kDisabled) {}
+      : flags{.quarantine_mode = QuarantineMode::kAlwaysDisabled,
+              .scan_mode = ScanMode::kDisabled} {}
   explicit PartitionRoot(PartitionOptions opts) { Init(opts); }
   ~PartitionRoot();
 
@@ -532,18 +532,18 @@
   // guaranteed to have a bucket under the new distribution when they are
   // eventually deallocated. We do not need synchronization here or below.
   void SwitchToDenserBucketDistribution() {
-    with_denser_bucket_distribution = true;
+    flags.with_denser_bucket_distribution = true;
   }
   // Switching back to the less dense bucket distribution is ok during tests.
   // At worst, we end up with deallocations that are sent to a bucket that we
   // cannot allocate from, which will not cause problems besides wasting
   // memory.
   void ResetBucketDistributionForTesting() {
-    with_denser_bucket_distribution = false;
+    flags.with_denser_bucket_distribution = false;
   }
 
   ThreadCache* thread_cache_for_testing() const {
-    return with_thread_cache ? ThreadCache::Get() : nullptr;
+    return flags.with_thread_cache ? ThreadCache::Get() : nullptr;
   }
   size_t get_total_size_of_committed_pages() const {
     return total_size_of_committed_pages.load(std::memory_order_relaxed);
@@ -565,7 +565,7 @@
   }
 
   internal::pool_handle ChoosePool() const {
-    if (use_configurable_pool) {
+    if (flags.use_configurable_pool) {
       return internal::GetConfigurablePool();
     }
 #if BUILDFLAG(USE_BACKUP_REF_PTR)
@@ -576,15 +576,15 @@
   }
 
   ALWAYS_INLINE bool IsQuarantineAllowed() const {
-    return quarantine_mode != QuarantineMode::kAlwaysDisabled;
+    return flags.quarantine_mode != QuarantineMode::kAlwaysDisabled;
   }
 
   ALWAYS_INLINE bool IsQuarantineEnabled() const {
-    return quarantine_mode == QuarantineMode::kEnabled;
+    return flags.quarantine_mode == QuarantineMode::kEnabled;
   }
 
   ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
-    if (UNLIKELY(quarantine_mode != QuarantineMode::kEnabled))
+    if (UNLIKELY(flags.quarantine_mode != QuarantineMode::kEnabled))
       return false;
 #if defined(PA_HAS_MEMORY_TAGGING)
     if (UNLIKELY(quarantine_always_for_testing))
@@ -604,8 +604,8 @@
 
   ALWAYS_INLINE bool IsScanEnabled() const {
     // Enabled scan implies enabled quarantine.
-    PA_DCHECK(scan_mode != ScanMode::kEnabled || IsQuarantineEnabled());
-    return scan_mode == ScanMode::kEnabled;
+    PA_DCHECK(flags.scan_mode != ScanMode::kEnabled || IsQuarantineEnabled());
+    return flags.scan_mode == ScanMode::kEnabled;
   }
 
   static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
@@ -674,8 +674,8 @@
   // needed.
   ALWAYS_INLINE size_t AdjustSizeForExtrasAdd(size_t size) const {
     size = AdjustSize0IfNeeded(size);
-    PA_DCHECK(size + extras_size >= size);
-    return size + extras_size;
+    PA_DCHECK(size + flags.extras_size >= size);
+    return size + flags.extras_size;
   }
 
   // Adjusts the size by subtracing extras. Doesn't include the 0->1 adjustment,
@@ -683,18 +683,18 @@
   // AdjustSizeForExtrasSubtract either expect the adjustment to be included, or
   // are indifferent.
   ALWAYS_INLINE size_t AdjustSizeForExtrasSubtract(size_t size) const {
-    return size - extras_size;
+    return size - flags.extras_size;
   }
 
   ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
     // TODO(bartekn): Move MTE tagging here.
     // TODO(bartekn): Check that |slot_start| is indeed a slot start.
-    return reinterpret_cast<void*>(slot_start + extras_offset);
+    return reinterpret_cast<void*>(slot_start + flags.extras_offset);
   }
 
   ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
     // TODO(bartekn): Move MTE untagging here.
-    return reinterpret_cast<uintptr_t>(object) - extras_offset;
+    return reinterpret_cast<uintptr_t>(object) - flags.extras_offset;
     // TODO(bartekn): Check that the result is indeed a slot start.
   }
 
@@ -714,14 +714,14 @@
 
   bool brp_enabled() const {
 #if BUILDFLAG(USE_BACKUP_REF_PTR)
-    return brp_enabled_;
+    return flags.brp_enabled_;
 #else
     return false;
 #endif
   }
 
   ALWAYS_INLINE bool uses_configurable_pool() const {
-    return use_configurable_pool;
+    return flags.use_configurable_pool;
   }
 
   // To make tests deterministic, it is necessary to uncap the amount of memory
@@ -1212,7 +1212,7 @@
   // AllocWithFlagsNoHooks().
 
 #if DCHECK_IS_ON()
-  if (allow_cookie) {
+  if (flags.allow_cookie) {
     // Verify the cookie after the allocated region.
     // If this assert fires, you probably corrupted memory.
     internal::PartitionCookieCheckValue(
@@ -1360,7 +1360,8 @@
   //
   // LIKELY: performance-sensitive partitions have a thread cache, direct-mapped
   // allocations are uncommon.
-  if (LIKELY(with_thread_cache && !IsDirectMappedBucket(slot_span->bucket))) {
+  if (LIKELY(flags.with_thread_cache &&
+             !IsDirectMappedBucket(slot_span->bucket))) {
     size_t bucket_index = slot_span->bucket - this->buckets;
     auto* thread_cache = ThreadCache::Get();
     if (LIKELY(ThreadCache::IsValid(thread_cache) &&
@@ -1670,7 +1671,7 @@
   // underneath us (between calls to |SizeToBucketIndex| during the same call),
   // which would result in an inconsistent state.
   uint16_t bucket_index =
-      SizeToBucketIndex(raw_size, with_denser_bucket_distribution);
+      SizeToBucketIndex(raw_size, this->flags.with_denser_bucket_distribution);
   size_t usable_size;
   bool is_already_zeroed = false;
   uintptr_t slot_start = 0;
@@ -1688,7 +1689,7 @@
   // thread cache will not be able to satisfy it.
   //
   // LIKELY: performance-sensitive partitions use the thread cache.
-  if (LIKELY(with_thread_cache &&
+  if (LIKELY(this->flags.with_thread_cache &&
              slot_span_alignment <= internal::PartitionPageSize())) {
     auto* tcache = ThreadCache::Get();
     // LIKELY: Typically always true, except for the very first allocation of
@@ -1782,7 +1783,7 @@
 
 #if DCHECK_IS_ON()
   // Add the cookie after the allocation.
-  if (allow_cookie) {
+  if (this->flags.allow_cookie) {
     internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
                                         usable_size);
   }
@@ -1871,8 +1872,8 @@
   // allocation from the beginning of the slot, thus messing up alignment.
   // Extras after the allocation are acceptable, but they have to be taken into
   // account in the request size calculation to avoid crbug.com/1185484.
-  PA_DCHECK(allow_aligned_alloc);
-  PA_DCHECK(!extras_offset);
+  PA_DCHECK(this->flags.allow_aligned_alloc);
+  PA_DCHECK(!this->flags.extras_offset);
   // This is mandated by |posix_memalign()|, so should never fire.
   PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
   // Catch unsupported alignment requests early.
@@ -1968,7 +1969,7 @@
   PA_DCHECK(PartitionRoot<thread_safe>::initialized);
   size = AdjustSizeForExtrasAdd(size);
   auto& bucket =
-      bucket_at(SizeToBucketIndex(size, with_denser_bucket_distribution));
+      bucket_at(SizeToBucketIndex(size, flags.with_denser_bucket_distribution));
   PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
   PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
 
diff --git a/base/allocator/partition_allocator/starscan/pcscan_internal.cc b/base/allocator/partition_allocator/starscan/pcscan_internal.cc
index 9309f55..3b7be4e 100644
--- a/base/allocator/partition_allocator/starscan/pcscan_internal.cc
+++ b/base/allocator/partition_allocator/starscan/pcscan_internal.cc
@@ -1440,8 +1440,8 @@
       return;
     PA_CHECK(!root->IsQuarantineEnabled());
     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
-    root->scan_mode = Root::ScanMode::kEnabled;
-    root->quarantine_mode = Root::QuarantineMode::kEnabled;
+    root->flags.scan_mode = Root::ScanMode::kEnabled;
+    root->flags.quarantine_mode = Root::QuarantineMode::kEnabled;
   }
   std::lock_guard<std::mutex> lock(roots_mutex_);
   PA_DCHECK(!scannable_roots_.count(root));
@@ -1462,7 +1462,7 @@
     if (root->IsQuarantineEnabled())
       return;
     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
-    root->quarantine_mode = Root::QuarantineMode::kEnabled;
+    root->flags.quarantine_mode = Root::QuarantineMode::kEnabled;
   }
   std::lock_guard<std::mutex> lock(roots_mutex_);
   PA_DCHECK(!nonscannable_roots_.count(root));
@@ -1582,12 +1582,12 @@
   // Set all roots as non-scannable and non-quarantinable.
   for (auto& pair : scannable_roots_) {
     Root* root = pair.first;
-    root->scan_mode = Root::ScanMode::kDisabled;
-    root->quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
+    root->flags.scan_mode = Root::ScanMode::kDisabled;
+    root->flags.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
   }
   for (auto& pair : nonscannable_roots_) {
     Root* root = pair.first;
-    root->quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
+    root->flags.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
   }
   // Make sure to destroy maps so that on the following ReinitForTesting() call
   // the maps don't attempt to destroy the backing.
diff --git a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc b/base/allocator/partition_allocator/starscan/pcscan_unittest.cc
index 92b83da..7e68ff9 100644
--- a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc
+++ b/base/allocator/partition_allocator/starscan/pcscan_unittest.cc
@@ -127,8 +127,8 @@
   CHECK_EQ(0u, root.get_total_size_of_committed_pages());
 
   const size_t raw_size = root.AdjustSizeForExtrasAdd(object_size);
-  const size_t bucket_index =
-      root.SizeToBucketIndex(raw_size, root.with_denser_bucket_distribution);
+  const size_t bucket_index = root.SizeToBucketIndex(
+      raw_size, root.flags.with_denser_bucket_distribution);
   ThreadSafePartitionRoot::Bucket& bucket = root.buckets[bucket_index];
   const size_t num_slots = (bucket.get_bytes_per_span()) / bucket.slot_size;
 
diff --git a/base/allocator/partition_allocator/starscan/write_protector.cc b/base/allocator/partition_allocator/starscan/write_protector.cc
index 9d8cda5..c485f43 100644
--- a/base/allocator/partition_allocator/starscan/write_protector.cc
+++ b/base/allocator/partition_allocator/starscan/write_protector.cc
@@ -10,8 +10,8 @@
 #include "base/allocator/partition_allocator/address_pool_manager.h"
 #include "base/allocator/partition_allocator/partition_address_space.h"
 #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
 #include "base/allocator/partition_allocator/partition_alloc_check.h"
-#include "base/posix/eintr_wrapper.h"
 #include "base/threading/platform_thread.h"
 #include "build/build_config.h"
 
@@ -43,12 +43,12 @@
   while (true) {
     // Pool on the uffd descriptor for page fault events.
     pollfd pollfd{.fd = uffd, .events = POLLIN};
-    const int nready = HANDLE_EINTR(poll(&pollfd, 1, -1));
+    const int nready = PA_HANDLE_EINTR(poll(&pollfd, 1, -1));
     PA_CHECK(-1 != nready);
 
     // Get page fault info.
     uffd_msg msg;
-    const int nread = HANDLE_EINTR(read(uffd, &msg, sizeof(msg)));
+    const int nread = PA_HANDLE_EINTR(read(uffd, &msg, sizeof(msg)));
     PA_CHECK(0 != nread);
 
     // We only expect page faults.
diff --git a/base/allocator/partition_allocator/thread_cache.cc b/base/allocator/partition_allocator/thread_cache.cc
index 8ed537a..eca37a3 100644
--- a/base/allocator/partition_allocator/thread_cache.cc
+++ b/base/allocator/partition_allocator/thread_cache.cc
@@ -10,6 +10,7 @@
 #include <atomic>
 #include <cstdint>
 
+#include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
 #include "base/allocator/partition_allocator/partition_alloc_check.h"
 #include "base/allocator/partition_allocator/partition_alloc_config.h"
 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@@ -17,7 +18,6 @@
 #include "base/base_export.h"
 #include "base/callback.h"
 #include "base/compiler_specific.h"
-#include "base/cxx17_backports.h"
 #include "base/dcheck_is_on.h"
 #include "build/build_config.h"
 
@@ -61,7 +61,7 @@
   // mitigated inside the thread cache (since getting to it requires querying
   // TLS), but the PartitionRoot associated wih the thread cache can be made to
   // not use the thread cache anymore.
-  g_thread_cache_root.load(std::memory_order_relaxed)->with_thread_cache =
+  g_thread_cache_root.load(std::memory_order_relaxed)->flags.with_thread_cache =
       false;
 }
 #endif
@@ -294,7 +294,7 @@
   // of cached memory cannot change between calls (since we do not purge
   // background threads, but only ask them to purge their own cache at the next
   // allocation).
-  periodic_purge_next_interval_ = base::clamp(
+  periodic_purge_next_interval_ = internal::base::clamp(
       periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval);
 
   PurgeAll();
@@ -411,8 +411,8 @@
     constexpr size_t kMinLimit = 1;
     // |PutInBucket()| is called on a full bucket, which should not overflow.
     constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1;
-    global_limits_[index] =
-        static_cast<uint8_t>(base::clamp(value, kMinLimit, kMaxLimit));
+    global_limits_[index] = static_cast<uint8_t>(
+        internal::base::clamp(value, kMinLimit, kMaxLimit));
     PA_DCHECK(global_limits_[index] >= kMinLimit);
     PA_DCHECK(global_limits_[index] <= kMaxLimit);
   }
@@ -446,9 +446,9 @@
   size_t usable_size;
   bool already_zeroed;
 
-  auto* bucket =
-      root->buckets + PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
-                          raw_size, root->with_denser_bucket_distribution);
+  auto* bucket = root->buckets +
+                 PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
+                     raw_size, root->flags.with_denser_bucket_distribution);
   uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
                                     internal::PartitionPageSize(), &usable_size,
                                     &already_zeroed);
diff --git a/base/allocator/partition_allocator/thread_cache.h b/base/allocator/partition_allocator/thread_cache.h
index 4241d57..47c8507 100644
--- a/base/allocator/partition_allocator/thread_cache.h
+++ b/base/allocator/partition_allocator/thread_cache.h
@@ -506,12 +506,13 @@
   }
 
   PA_DCHECK(bucket.count != 0);
-  auto* result = bucket.freelist_head;
+  internal::PartitionFreelistEntry* result = bucket.freelist_head;
   // Passes the bucket size to |GetNext()|, so that in case of freelist
   // corruption, we know the bucket size that lead to the crash, helping to
   // narrow down the search for culprit. |bucket| was touched just now, so this
   // does not introduce another cache miss.
-  auto* next = result->GetNextForThreadCache<true>(bucket.slot_size);
+  internal::PartitionFreelistEntry* next =
+      result->GetNextForThreadCache<true>(bucket.slot_size);
   PA_DCHECK(result != next);
   bucket.count--;
   PA_DCHECK(bucket.count != 0 || !next);
diff --git a/base/android/callback_android.cc b/base/android/callback_android.cc
index ea03b30..1a41ad5 100644
--- a/base/android/callback_android.cc
+++ b/base/android/callback_android.cc
@@ -27,6 +27,10 @@
   Java_Helper_onIntResultFromNative(AttachCurrentThread(), callback, arg);
 }
 
+void RunLongCallbackAndroid(const JavaRef<jobject>& callback, int64_t arg) {
+  Java_Helper_onLongResultFromNative(AttachCurrentThread(), callback, arg);
+}
+
 void RunTimeCallbackAndroid(const JavaRef<jobject>& callback, base::Time time) {
   Java_Helper_onTimeResultFromNative(AttachCurrentThread(), callback,
                                      time.ToJavaTime());
diff --git a/base/android/callback_android.h b/base/android/callback_android.h
index cf3cd58..048b8ed 100644
--- a/base/android/callback_android.h
+++ b/base/android/callback_android.h
@@ -27,6 +27,9 @@
 void BASE_EXPORT RunIntCallbackAndroid(const JavaRef<jobject>& callback,
                                        int arg);
 
+void BASE_EXPORT RunLongCallbackAndroid(const JavaRef<jobject>& callback,
+                                        int64_t arg);
+
 void BASE_EXPORT RunTimeCallbackAndroid(const JavaRef<jobject>& callback,
                                         base::Time time);
 
diff --git a/base/android/java/src/org/chromium/base/Callback.java b/base/android/java/src/org/chromium/base/Callback.java
index fde220b..cae4db5 100644
--- a/base/android/java/src/org/chromium/base/Callback.java
+++ b/base/android/java/src/org/chromium/base/Callback.java
@@ -55,6 +55,12 @@
 
         @SuppressWarnings("unchecked")
         @CalledByNative("Helper")
+        static void onLongResultFromNative(Callback callback, long result) {
+            callback.onResult(Long.valueOf(result));
+        }
+
+        @SuppressWarnings("unchecked")
+        @CalledByNative("Helper")
         static void onTimeResultFromNative(Callback callback, long result) {
             callback.onResult(Long.valueOf(result));
         }
diff --git a/base/android/java/src/org/chromium/base/metrics/TimingMetric.java b/base/android/java/src/org/chromium/base/metrics/TimingMetric.java
new file mode 100644
index 0000000..eac9e2a
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/metrics/TimingMetric.java
@@ -0,0 +1,125 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.metrics;
+
+import android.os.Debug;
+import android.os.SystemClock;
+
+import androidx.annotation.IntDef;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * A class to be used with a try-with-resources to record the elapsed time within the try
+ * block. Measures time elapsed between instantiation and the call to close using supplied time
+ * source.
+ */
+public class TimingMetric implements AutoCloseable {
+    @IntDef({TimeSource.WALL, TimeSource.THREAD})
+    @Retention(RetentionPolicy.SOURCE)
+    @interface TimeSource {
+        int WALL = 0;
+        int THREAD = 1;
+    }
+
+    @IntDef({TimeDuration.SHORT, TimeDuration.MEDIUM, TimeDuration.LONG})
+    @Retention(RetentionPolicy.SOURCE)
+    @interface TimeDuration {
+        int SHORT = 0;
+        int MEDIUM = 1;
+        int LONG = 2;
+    }
+
+    private final String mMetric;
+    private final @TimeSource int mTimeSource;
+    private final @TimeDuration int mTimeDuration;
+
+    /**
+     * When non-0, holds the timestamp of the instantiation time of this object. Value of 0
+     * indicates canceled or already reported metric.
+     */
+    private long mStartMillis;
+
+    /**
+     * Create a new TimingMetric measuring wall time (ie. time experienced by the User) of up to 3
+     * minutes.
+     *
+     * @param metric The name of the histogram to record.
+     */
+    public static TimingMetric mediumWallTime(String name) {
+        return new TimingMetric(name, TimeSource.WALL, TimeDuration.MEDIUM);
+    }
+
+    /**
+     * Create a new TimingMetric measuring thread time (ie. actual time spent executing the code) of
+     * up to 10 seconds.
+     *
+     * @param metric The name of the histogram to record.
+     */
+    public static TimingMetric shortThreadTime(String name) {
+        return new TimingMetric(name, TimeSource.THREAD, TimeDuration.SHORT);
+    }
+
+    /**
+     * Construct a new AutoCloseable time measuring metric.
+     * In most cases the user should defer to one of the static constructors to instantiate this
+     * class.
+     *
+     * @param metric The name of the histogram to record.
+     * @param timeSource The time source to use.
+     * @param timeDuration The anticipated duration for this metric.
+     */
+    /* package */ TimingMetric(
+            String metric, @TimeSource int timeSource, @TimeDuration int timeDuration) {
+        mMetric = metric;
+        mTimeSource = timeSource;
+        mTimeDuration = timeDuration;
+        mStartMillis = getCurrentTimeMillis();
+    }
+
+    @Override
+    public void close() {
+        // If the start time has been cancel, do not record the histogram.
+        if (mStartMillis == 0) return;
+        final long measuredTime = getCurrentTimeMillis() - mStartMillis;
+        mStartMillis = 0;
+
+        switch (mTimeDuration) {
+            case TimeDuration.SHORT:
+                RecordHistogram.recordTimesHistogram(mMetric, measuredTime);
+                break;
+            case TimeDuration.MEDIUM:
+                RecordHistogram.recordMediumTimesHistogram(mMetric, measuredTime);
+                break;
+            case TimeDuration.LONG:
+                RecordHistogram.recordLongTimesHistogram(mMetric, measuredTime);
+                break;
+        }
+    }
+
+    /**
+     * Cancel the measurement.
+     */
+    public void cancel() {
+        mStartMillis = 0;
+    }
+
+    /**
+     * Query the time source associated with this metric for current time.
+     *
+     * @return Current time expressed in milliseconds.
+     */
+    private long getCurrentTimeMillis() {
+        switch (mTimeSource) {
+            case TimeSource.WALL:
+                return SystemClock.uptimeMillis();
+            case TimeSource.THREAD:
+                return Debug.threadCpuTimeNanos() / 1000000;
+        }
+        assert false : "unknown time source requested";
+        return 0;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/time/BaseTimerImpl.java b/base/android/java/src/org/chromium/base/time/BaseTimerImpl.java
new file mode 100644
index 0000000..93bec73
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/BaseTimerImpl.java
@@ -0,0 +1,73 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import androidx.annotation.NonNull;
+
+import org.chromium.base.supplier.Supplier;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Base implementation of {@link Timer} that uses a provided time source to track elapsed real time.
+ */
+class BaseTimerImpl implements Timer {
+    /** The granularity of the time source this timer uses. */
+    private final TimeUnit mSourceTimeUnit;
+    /** The provider of the current time, which should be monotonically non-decreasing. */
+    protected Supplier<Long> mTimeSource;
+
+    /**
+     * Starting time of the timer. This is equal to 0 if the timer has never been started, and
+     * otherwise is equal to the time at which the timer was most recently started.
+     */
+    private long mStartTime;
+    /**
+     * Stop time of the timer. This is equal to 0 if the timer is started or has never been
+     * stopped, and otherwise is equal to the time at which the timer was most recently stopped.
+     */
+    private long mStopTime;
+    /**
+     * Whether the timer is currently running, i.e. if start() has been called without a
+     * corresponding call to stop().
+     */
+    private boolean mIsRunning;
+
+    /**
+     * Constructs a new BaseTimerImpl
+     * @param timeSource Provider of monotonically non-decreasing time.
+     * @param timeUnit Granularity of the provided time source.
+     */
+    public BaseTimerImpl(@NonNull Supplier<Long> timeSource, @NonNull TimeUnit timeUnit) {
+        mSourceTimeUnit = timeUnit;
+        mTimeSource = timeSource;
+    }
+
+    @Override
+    public void start() {
+        assert !mIsRunning;
+        mStopTime = 0;
+        mStartTime = mTimeSource.get();
+        mIsRunning = true;
+    }
+
+    @Override
+    public void stop() {
+        assert mIsRunning;
+        mStopTime = mTimeSource.get();
+        mIsRunning = false;
+    }
+
+    @Override
+    public long getElapsedTime(@NonNull TimeUnit timeUnit) {
+        long duration = isRunning() ? mTimeSource.get() - mStartTime : mStopTime - mStartTime;
+        return timeUnit.convert(duration, mSourceTimeUnit);
+    }
+
+    @Override
+    public boolean isRunning() {
+        return mIsRunning;
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/time/CPUTimeTimer.java b/base/android/java/src/org/chromium/base/time/CPUTimeTimer.java
new file mode 100644
index 0000000..a093e46
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/CPUTimeTimer.java
@@ -0,0 +1,20 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import android.os.SystemClock;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Implementation of {@link Timer} that uses the Android SystemClock's currentThreadTimeMillis to
+ * track CPU thread time. This is typically *not* what you want for interval timing, and should only
+ * be used if you're sure what you're measuring is CPU bound.
+ */
+public class CPUTimeTimer extends BaseTimerImpl {
+    public CPUTimeTimer() {
+        super(SystemClock::currentThreadTimeMillis, TimeUnit.MILLISECONDS);
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/time/ElapsedRealTimeTimer.java b/base/android/java/src/org/chromium/base/time/ElapsedRealTimeTimer.java
new file mode 100644
index 0000000..0b9949d
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/ElapsedRealTimeTimer.java
@@ -0,0 +1,19 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import android.os.SystemClock;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Implementation of {@link Timer} that uses the Android's SystemClock to track elapsed real time at
+ * an internal resolution of nanoseconds. This is a good default choice for interval timing.
+ */
+public class ElapsedRealTimeTimer extends BaseTimerImpl {
+    public ElapsedRealTimeTimer() {
+        super(SystemClock::elapsedRealtimeNanos, TimeUnit.NANOSECONDS);
+    }
+}
diff --git a/base/android/java/src/org/chromium/base/time/README.md b/base/android/java/src/org/chromium/base/time/README.md
new file mode 100644
index 0000000..067a483
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/README.md
@@ -0,0 +1,49 @@
+# Timing and Clocks on Android
+There are several ways to measure time on Android. First, note the distinction between wall time and elapsed time.
+Wall time is the user-visible presentation of the current moment, typically measured relative to some fixed epoch. This value can shift, jumping backwards or forwards unpredictably and should only be used when correspondence with real-world dates/times is crucial. If you do need this, just call System.currentTimeMillis().
+Elapsed time, in contrast, is a measurement of the passage of time that is guaranteed to be monotonically non-decreasing. There is one important subdivision within elapsed time; this divides the timers into categories depending on which slices of "true" time count:
+* elapsedRealTime and elapsedRealTimeNanos count everything, including deep sleep.
+* uptimeMillis excludes deep sleep, e.g. when the device's CPU is off.
+* currentThreadTimeMillis and threadCpuTimeNanos count only active CPU time in the current thread, ignoring e.g. blocking I/O.
+
+This package contains some utility classes for measuring elapsed time with these different time sources. See below for more detailed guidance on when you should use each, but ElapsedRealTimeTimer is a good default.
+
+# Scenarios
+> "I want to record a metric that measures user visible time, including things like I/O and thread pre-emption"
+
+Use ElapsedRealTimeTimer.
+> "I want to record a metric that measures user visible time but not deep sleep."
+
+Use UptimeTimer.
+> "I just want to spot check how long a single thing takes locally; I don't need telemetry"
+
+You can log the result of ElapsedRealTimeTimer.
+> "I want to measure the performance of multiple methods that call eacher other."
+
+You want TraceEvent.
+
+> "I want to measure only elapsed CPU time for some potentially expensive operation, ignoring things like I/O and thread pre-emption. I'm positive that this operation is CPU-bound."
+
+If you're absolutely sure about this, use CPUTimeTimer.
+
+# Usage
+
+```
+Timer myTimer = new ElapsedRealTimeTimer();
+myTimer.start();
+// Measure elapsed time without stopping myTimer
+long elapsedTimeMillis = myTimer.getElapsedTime(TimeUnit.MILLISECONDS);
+
+// Get a finer or coarser granularity of elapsed time.
+long elapsedTimeNanos = myTimer.getElapsedTime(TimeUnit.NANOSECONDS);
+long elapsedTimeSeconds = myTimer.getElapsedTime(TimeUnit.SECONDS);
+
+// Measure elapsed time after stopping myTimer. Repeated calls to getElapsedTime will return the same result.
+myTimer.stop();
+elapsedTimeMillis = myTimer.getElapsedTime(TimeUnit.MILLISECONDS);
+Thread.sleep(1000);
+assert elapsedTimeMillis == myTimer.getElapsedTime(TimeUnit.MILLISECONDS)
+
+// Restart myTimer, resetting its start and stop times.
+myTimer.start();
+````
diff --git a/base/android/java/src/org/chromium/base/time/Timer.java b/base/android/java/src/org/chromium/base/time/Timer.java
new file mode 100644
index 0000000..93f593b
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/Timer.java
@@ -0,0 +1,31 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import androidx.annotation.NonNull;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Interface for a provider of elapsed time, useful for general purpose interval timing.
+ * Implementations should guarantee that returned values are monotonically non-decreasing.
+ */
+public interface Timer {
+    /** Starts the timer. This resets the stop time to 0 if it had been previously set. */
+    void start();
+
+    /** Stops the timer. */
+    void stop();
+
+    /** Returns whether the timer is currently running. */
+    boolean isRunning();
+
+    /**
+     * Returns the elapsed time. This is either the delta between start and stop, or the difference
+     * between start and when this method is called (if the timer is still running). The result is
+     * in terms of the desired TimeUnit, rounding down.
+     */
+    long getElapsedTime(@NonNull TimeUnit timeUnit);
+}
diff --git a/base/android/java/src/org/chromium/base/time/UptimeTimer.java b/base/android/java/src/org/chromium/base/time/UptimeTimer.java
new file mode 100644
index 0000000..a0ca4c5
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/time/UptimeTimer.java
@@ -0,0 +1,19 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import android.os.SystemClock;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Implementation of {@link Timer} that uses the Android SystemClock's uptimeMillis to track elapsed
+ * time excluding deep sleep.
+ */
+public class UptimeTimer extends BaseTimerImpl {
+    public UptimeTimer() {
+        super(SystemClock::uptimeMillis, TimeUnit.MILLISECONDS);
+    }
+}
diff --git a/base/android/junit/src/org/chromium/base/time/BaseTimerImplTest.java b/base/android/junit/src/org/chromium/base/time/BaseTimerImplTest.java
new file mode 100644
index 0000000..a64c49b
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/time/BaseTimerImplTest.java
@@ -0,0 +1,96 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertTrue;
+
+import androidx.test.filters.SmallTest;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Unit tests for {@link BaseTimerImpl}.
+ */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class BaseTimerImplTest {
+    private long mCurrentTime;
+    private BaseTimerImpl mBaseTimer;
+
+    @Before
+    public void setUp() {
+        mBaseTimer = new BaseTimerImpl(() -> mCurrentTime, TimeUnit.NANOSECONDS);
+    }
+
+    @Test
+    @SmallTest
+    public void testStartStopRunning() {
+        assertFalse(mBaseTimer.isRunning());
+
+        for (int i = 0; i < 5; i++) {
+            mBaseTimer.start();
+            assertTrue(mBaseTimer.isRunning());
+
+            mBaseTimer.stop();
+            assertFalse(mBaseTimer.isRunning());
+        }
+    }
+
+    @Test
+    @SmallTest
+    public void testGetElapsedTime() {
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 0L);
+        mBaseTimer.start();
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 0L);
+
+        mCurrentTime = 1000L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 1000L);
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.MICROSECONDS), 1L);
+
+        mBaseTimer.stop();
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 1000L);
+        mCurrentTime = 2000L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 1000L);
+    }
+
+    @Test
+    @SmallTest
+    public void testRounding() {
+        mBaseTimer.start();
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.NANOSECONDS), 0L);
+
+        mCurrentTime = 1500L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.MICROSECONDS), 1L);
+        mCurrentTime = 1501L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.MICROSECONDS), 1L);
+        mCurrentTime = 1999L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.MICROSECONDS), 1L);
+        mCurrentTime = 2000L;
+        assertEquals(mBaseTimer.getElapsedTime(TimeUnit.MICROSECONDS), 2L);
+    }
+
+    @Test
+    @SmallTest
+    public void testLowerGranularitySource() {
+        BaseTimerImpl lowerGranularityTimer =
+                new BaseTimerImpl(() -> mCurrentTime, TimeUnit.MILLISECONDS);
+        lowerGranularityTimer.start();
+
+        mCurrentTime = 1001L;
+        assertEquals(lowerGranularityTimer.getElapsedTime(TimeUnit.SECONDS), 1L);
+        assertEquals(lowerGranularityTimer.getElapsedTime(TimeUnit.MILLISECONDS), 1001L);
+        assertEquals(lowerGranularityTimer.getElapsedTime(TimeUnit.MICROSECONDS), 1001000L);
+        assertEquals(lowerGranularityTimer.getElapsedTime(TimeUnit.NANOSECONDS), 1001000000L);
+    }
+}
\ No newline at end of file
diff --git a/base/bits.h b/base/bits.h
index f71bc3f..c22a8cd 100644
--- a/base/bits.h
+++ b/base/bits.h
@@ -25,6 +25,8 @@
 namespace bits {
 
 // Returns true iff |value| is a power of 2.
+//
+// TODO(pkasting): When C++20 is available, replace with std::has_single_bit().
 template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
 constexpr bool IsPowerOfTwo(T value) {
   // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
@@ -79,6 +81,9 @@
 //
 // Prefer the clang path on Windows, as _BitScanReverse() and friends are not
 // constexpr.
+//
+// TODO(pkasting): When C++20 is available, replace with std::countl_zero() and
+// similar.
 #if defined(COMPILER_MSVC) && !defined(__clang__)
 
 template <typename T, unsigned bits = sizeof(T) * 8>
@@ -153,13 +158,9 @@
 #endif
 }
 
-ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
-  return CountLeadingZeroBits(x);
-}
-
-ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
-  return CountLeadingZeroBits(x);
-}
+// Used in place of "constexpr" below for things which are conditionally
+// constexpr depending on whether the functions above are constexpr.
+#define BASE_BITOPS_CONSTEXPR
 
 #elif defined(COMPILER_GCC) || defined(__clang__)
 
@@ -191,29 +192,38 @@
                        : bits;
 }
 
-ALWAYS_INLINE constexpr uint32_t CountLeadingZeroBits32(uint32_t x) {
-  return CountLeadingZeroBits(x);
-}
-
-ALWAYS_INLINE constexpr uint64_t CountLeadingZeroBits64(uint64_t x) {
-  return CountLeadingZeroBits(x);
-}
+#define BASE_BITOPS_CONSTEXPR constexpr
 
 #endif
 
-ALWAYS_INLINE constexpr size_t CountLeadingZeroBitsSizeT(size_t x) {
+ALWAYS_INLINE BASE_BITOPS_CONSTEXPR uint32_t
+CountLeadingZeroBits32(uint32_t x) {
   return CountLeadingZeroBits(x);
 }
 
-ALWAYS_INLINE constexpr size_t CountTrailingZeroBitsSizeT(size_t x) {
+ALWAYS_INLINE BASE_BITOPS_CONSTEXPR uint64_t
+CountLeadingZeroBits64(uint64_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE BASE_BITOPS_CONSTEXPR size_t CountLeadingZeroBitsSizeT(size_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE BASE_BITOPS_CONSTEXPR size_t
+CountTrailingZeroBitsSizeT(size_t x) {
   return CountTrailingZeroBits(x);
 }
 
+#undef BASE_BITOPS_CONSTEXPR
+
 // Returns the integer i such as 2^i <= n < 2^(i+1).
 //
 // There is a common `BitLength` function, which returns the number of bits
 // required to represent a value. Rather than implement that function,
 // use `Log2Floor` and add 1 to the result.
+//
+// TODO(pkasting): When C++20 is available, replace with std::bit_xxx().
 constexpr int Log2Floor(uint32_t n) {
   return 31 - CountLeadingZeroBits(n);
 }
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index 8d80abf..2c5a6b2 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -29,6 +29,13 @@
 #define HAS_CPP_ATTRIBUTE(x) 0
 #endif
 
+// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
+#if defined(__has_attribute)
+#define HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define HAS_ATTRIBUTE(x) 0
+#endif
+
 // A wrapper around `__has_builtin`, similar to HAS_CPP_ATTRIBUTE.
 #if defined(__has_builtin)
 #define HAS_BUILTIN(x) __has_builtin(x)
@@ -63,7 +70,7 @@
 // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
 // Use like:
 //   void NOT_TAIL_CALLED FooBar();
-#if defined(__clang__) && __has_attribute(not_tail_called)
+#if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called)
 #define NOT_TAIL_CALLED __attribute__((not_tail_called))
 #else
 #define NOT_TAIL_CALLED
@@ -131,11 +138,9 @@
 //   __attribute__((format(wprintf, format_param, dots_param)))
 
 // Sanitizers annotations.
-#if defined(__has_attribute)
-#if __has_attribute(no_sanitize)
+#if HAS_ATTRIBUTE(no_sanitize)
 #define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
 #endif
-#endif
 #if !defined(NO_SANITIZE)
 #define NO_SANITIZE(what)
 #endif
@@ -243,7 +248,7 @@
 #endif
 #endif
 
-#if defined(__clang__) && __has_attribute(uninitialized)
+#if defined(__clang__) && HAS_ATTRIBUTE(uninitialized)
 // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
 // the specified variable.
 // Library-wide alternative is
@@ -290,13 +295,9 @@
 // In some cases it's desirable to remove this, e.g. on hot functions, or if
 // we have purposely changed the reference canary.
 #if defined(COMPILER_GCC) || defined(__clang__)
-#if defined(__has_attribute)
-#if __has_attribute(__no_stack_protector__)
+#if HAS_ATTRIBUTE(__no_stack_protector__)
 #define NO_STACK_PROTECTOR __attribute__((__no_stack_protector__))
-#else  // __has_attribute(__no_stack_protector__)
-#define NO_STACK_PROTECTOR __attribute__((__optimize__("-fno-stack-protector")))
-#endif
-#else  // defined(__has_attribute)
+#else
 #define NO_STACK_PROTECTOR __attribute__((__optimize__("-fno-stack-protector")))
 #endif
 #else
@@ -333,7 +334,7 @@
 #endif  // defined(__clang_analyzer__)
 
 // Use nomerge attribute to disable optimization of merging multiple same calls.
-#if defined(__clang__) && __has_attribute(nomerge)
+#if defined(__clang__) && HAS_ATTRIBUTE(nomerge)
 #define NOMERGE [[clang::nomerge]]
 #else
 #define NOMERGE
@@ -360,7 +361,7 @@
 // See also:
 //   https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
 //   https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html
-#if defined(__clang__) && __has_attribute(trivial_abi)
+#if defined(__clang__) && HAS_ATTRIBUTE(trivial_abi)
 #define TRIVIAL_ABI [[clang::trivial_abi]]
 #else
 #define TRIVIAL_ABI
@@ -369,7 +370,7 @@
 // Marks a member function as reinitializing a moved-from variable.
 // See also
 // https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
-#if defined(__clang__) && __has_attribute(reinitializes)
+#if defined(__clang__) && HAS_ATTRIBUTE(reinitializes)
 #define REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
 #else
 #define REINITIALIZES_AFTER_MOVE
@@ -378,11 +379,9 @@
 // Requires constant initialization. See constinit in C++20. Allows to rely on a
 // variable being initialized before execution, and not requiring a global
 // constructor.
-#if defined(__has_attribute)
-#if __has_attribute(require_constant_initialization)
+#if HAS_ATTRIBUTE(require_constant_initialization)
 #define CONSTINIT __attribute__((require_constant_initialization))
 #endif
-#endif
 #if !defined(CONSTINIT)
 #define CONSTINIT
 #endif
diff --git a/base/containers/contiguous_iterator_unittest.cc b/base/containers/contiguous_iterator_unittest.cc
index ad2b4fa..59eed6c 100644
--- a/base/containers/contiguous_iterator_unittest.cc
+++ b/base/containers/contiguous_iterator_unittest.cc
@@ -40,10 +40,6 @@
   static_assert(!IsContiguousIterator<const ForwardIterator&>::value,
                 "Error: const ForwardIterator& should not be considered a "
                 "contiguous iterator.");
-  static_assert(
-      !IsContiguousIterator<std::reverse_iterator<ForwardIterator>>::value,
-      "Error: A reverse ForwardIterator should not be considered a "
-      "contiguous iterator.");
 }
 
 TEST(ContiguousIteratorTest, BidirectionalIterator) {
diff --git a/base/containers/id_map.h b/base/containers/id_map.h
index b782a38..8fb4d5b 100644
--- a/base/containers/id_map.h
+++ b/base/containers/id_map.h
@@ -217,7 +217,13 @@
  private:
   // Transforms a map iterator to an iterator on the keys of the map.
   // Used by Clear() to populate |removed_ids_| in bulk.
-  struct KeyIterator : std::iterator<std::forward_iterator_tag, KeyType> {
+  struct KeyIterator {
+    using iterator_category = std::forward_iterator_tag;
+    using value_type = KeyType;
+    using difference_type = std::ptrdiff_t;
+    using pointer = KeyType*;
+    using reference = KeyType&;
+
     using inner_iterator = typename HashTable::iterator;
     inner_iterator iter_;
 
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
index fd25bb5..071f87c 100644
--- a/base/containers/small_map.h
+++ b/base/containers/small_map.h
@@ -243,9 +243,6 @@
       return !(*this == other);
     }
 
-    bool operator==(const const_iterator& other) const;
-    bool operator!=(const const_iterator& other) const;
-
    private:
     friend class small_map;
     friend class const_iterator;
@@ -603,24 +600,6 @@
   }
 };
 
-template <typename NormalMap,
-          size_t kArraySize,
-          typename EqualKey,
-          typename Functor>
-inline bool small_map<NormalMap, kArraySize, EqualKey, Functor>::iterator::
-operator==(const const_iterator& other) const {
-  return other == *this;
-}
-
-template <typename NormalMap,
-          size_t kArraySize,
-          typename EqualKey,
-          typename Functor>
-inline bool small_map<NormalMap, kArraySize, EqualKey, Functor>::iterator::
-operator!=(const const_iterator& other) const {
-  return other != *this;
-}
-
 }  // namespace base
 
 #endif  // BASE_CONTAINERS_SMALL_MAP_H_
diff --git a/base/cxx17_backports_unittest.cc b/base/cxx17_backports_unittest.cc
index 5a788bb..7367091 100644
--- a/base/cxx17_backports_unittest.cc
+++ b/base/cxx17_backports_unittest.cc
@@ -18,138 +18,6 @@
 namespace base {
 namespace {
 
-TEST(Cxx17BackportTest, Size) {
-  {
-    std::vector<int> vector = {1, 2, 3, 4, 5};
-    static_assert(
-        std::is_same<decltype(std::size(vector)),
-                     decltype(vector.size())>::value,
-        "std::size(vector) should have the same type as vector.size()");
-    EXPECT_EQ(vector.size(), std::size(vector));
-  }
-
-  {
-    std::string empty_str;
-    static_assert(
-        std::is_same<decltype(std::size(empty_str)),
-                     decltype(empty_str.size())>::value,
-        "std::size(empty_str) should have the same type as empty_str.size()");
-    EXPECT_EQ(0u, std::size(empty_str));
-  }
-
-  {
-    std::array<int, 4> array = {{1, 2, 3, 4}};
-    static_assert(
-        std::is_same<decltype(std::size(array)), decltype(array.size())>::value,
-        "std::size(array) should have the same type as array.size()");
-    static_assert(std::size(array) == array.size(),
-                  "std::size(array) should be equal to array.size()");
-  }
-
-  {
-    int array[] = {1, 2, 3};
-    static_assert(std::is_same<size_t, decltype(std::size(array))>::value,
-                  "std::size(array) should be of type size_t");
-    static_assert(3u == std::size(array), "std::size(array) should be 3");
-  }
-}
-
-TEST(Cxx17BackportTest, Empty) {
-  {
-    std::vector<int> vector;
-    static_assert(
-        std::is_same<decltype(std::empty(vector)),
-                     decltype(vector.empty())>::value,
-        "std::empty(vector) should have the same type as vector.empty()");
-    EXPECT_EQ(vector.empty(), std::empty(vector));
-  }
-
-  {
-    std::array<int, 4> array = {{1, 2, 3, 4}};
-    static_assert(
-        std::is_same<decltype(std::empty(array)),
-                     decltype(array.empty())>::value,
-        "std::empty(array) should have the same type as array.empty()");
-    static_assert(std::empty(array) == array.empty(),
-                  "std::empty(array) should be equal to array.empty()");
-  }
-
-  {
-    int array[] = {1, 2, 3};
-    static_assert(std::is_same<bool, decltype(std::empty(array))>::value,
-                  "std::empty(array) should be of type bool");
-    static_assert(!std::empty(array), "std::empty(array) should be false");
-  }
-
-  {
-    constexpr std::initializer_list<int> il;
-    static_assert(std::is_same<bool, decltype(std::empty(il))>::value,
-                  "std::empty(il) should be of type bool");
-    static_assert(std::empty(il), "std::empty(il) should be true");
-  }
-}
-
-TEST(Cxx17BackportTest, Data) {
-  {
-    std::vector<int> vector = {1, 2, 3, 4, 5};
-    static_assert(
-        std::is_same<decltype(std::data(vector)),
-                     decltype(vector.data())>::value,
-        "std::data(vector) should have the same type as vector.data()");
-    EXPECT_EQ(vector.data(), std::data(vector));
-  }
-
-  {
-    const std::string cstr = "const string";
-    static_assert(
-        std::is_same<decltype(std::data(cstr)), decltype(cstr.data())>::value,
-        "std::data(cstr) should have the same type as cstr.data()");
-
-    EXPECT_EQ(cstr.data(), std::data(cstr));
-  }
-
-  {
-    std::string str = "mutable string";
-    static_assert(std::is_same<decltype(std::data(str)), char*>::value,
-                  "std::data(str) should be of type char*");
-    EXPECT_EQ(str.data(), std::data(str));
-  }
-
-  {
-    std::string empty_str;
-    static_assert(std::is_same<decltype(std::data(empty_str)), char*>::value,
-                  "std::data(empty_str) should be of type char*");
-    EXPECT_EQ(empty_str.data(), std::data(empty_str));
-  }
-
-  {
-    std::array<int, 4> array = {{1, 2, 3, 4}};
-    static_assert(
-        std::is_same<decltype(std::data(array)), decltype(array.data())>::value,
-        "std::data(array) should have the same type as array.data()");
-    // std::array::data() is not constexpr prior to C++17, hence the runtime
-    // check.
-    EXPECT_EQ(array.data(), std::data(array));
-  }
-
-  {
-    constexpr int array[] = {1, 2, 3};
-    static_assert(std::is_same<const int*, decltype(std::data(array))>::value,
-                  "std::data(array) should be of type const int*");
-    static_assert(array == std::data(array),
-                  "std::data(array) should be array");
-  }
-
-  {
-    constexpr std::initializer_list<int> il;
-    static_assert(
-        std::is_same<decltype(il.begin()), decltype(std::data(il))>::value,
-        "std::data(il) should have the same type as il.begin()");
-    static_assert(il.begin() == std::data(il),
-                  "std::data(il) should be equal to il.begin()");
-  }
-}
-
 struct OneType {
   int some_int;
 };
diff --git a/base/files/file_util.cc b/base/files/file_util.cc
index dbc12d4..c3ffff8 100644
--- a/base/files/file_util.cc
+++ b/base/files/file_util.cc
@@ -35,13 +35,13 @@
 namespace base {
 
 #if !BUILDFLAG(IS_WIN)
-OnceCallback<void(const FilePath&)> GetDeleteFileCallback() {
-  return BindOnce(IgnoreResult(&DeleteFile));
+OnceClosure GetDeleteFileCallback(const FilePath& path) {
+  return BindOnce(IgnoreResult(&DeleteFile), path);
 }
 #endif  // !BUILDFLAG(IS_WIN)
 
-OnceCallback<void(const FilePath&)> GetDeletePathRecursivelyCallback() {
-  return BindOnce(IgnoreResult(&DeletePathRecursively));
+OnceClosure GetDeletePathRecursivelyCallback(const FilePath& path) {
+  return BindOnce(IgnoreResult(&DeletePathRecursively), path);
 }
 
 int64_t ComputeDirectorySize(const FilePath& root_path) {
diff --git a/base/files/file_util.h b/base/files/file_util.h
index df747d6..099a236 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -78,12 +78,11 @@
 // Simplified way to get a callback to do DeleteFile(path) and ignore the
 // DeleteFile() result. On Windows, this will retry the delete via delayed tasks
 // for up to 2 seconds before giving up, to deal with AV S/W locking the file.
-BASE_EXPORT OnceCallback<void(const FilePath&)> GetDeleteFileCallback();
+BASE_EXPORT OnceClosure GetDeleteFileCallback(const FilePath& path);
 
 // Simplified way to get a callback to do DeletePathRecursively(path) and ignore
 // the DeletePathRecursively() result.
-BASE_EXPORT OnceCallback<void(const FilePath&)>
-GetDeletePathRecursivelyCallback();
+BASE_EXPORT OnceClosure GetDeletePathRecursivelyCallback(const FilePath& path);
 
 #if BUILDFLAG(IS_WIN)
 // Schedules to delete the given path, whether it's a file or a directory, until
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
index 0344539..c7b5037 100644
--- a/base/files/important_file_writer.cc
+++ b/base/files/important_file_writer.cc
@@ -147,6 +147,7 @@
                                                   StringPiece data,
                                                   StringPiece histogram_suffix,
                                                   bool from_instance) {
+  const TimeTicks write_start = TimeTicks::Now();
   if (!from_instance)
     ImportantFileWriterCleaner::AddDirectory(path.DirName());
 
@@ -259,6 +260,10 @@
     DeleteTmpFileWithRetry(File(), tmp_file_path);
   }
 
+  const TimeDelta write_duration = TimeTicks::Now() - write_start;
+  UmaHistogramTimesWithSuffix("ImportantFile.WriteDuration", histogram_suffix,
+                              write_duration);
+
   return result;
 }
 
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index b4cb431..9dd772c 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -358,6 +358,7 @@
   EXPECT_FALSE(PathExists(writer.path()));
   // We don't record metrics in case the serialization fails.
   histogram_tester.ExpectTotalCount("ImportantFile.SerializationDuration", 0);
+  histogram_tester.ExpectTotalCount("ImportantFile.WriteDuration", 0);
 }
 
 TEST_F(ImportantFileWriterTest, ScheduleWriteWithBackgroundDataSerializer) {
@@ -393,6 +394,7 @@
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("foo", GetFileContent(writer.path()));
   histogram_tester.ExpectTotalCount("ImportantFile.SerializationDuration", 1);
+  histogram_tester.ExpectTotalCount("ImportantFile.WriteDuration", 1);
 }
 
 TEST_F(ImportantFileWriterTest,
@@ -428,6 +430,7 @@
   // We record the foreground serialization metric despite later failure in
   // background sequence.
   histogram_tester.ExpectTotalCount("ImportantFile.SerializationDuration", 1);
+  histogram_tester.ExpectTotalCount("ImportantFile.WriteDuration", 0);
 }
 
 // Test that the chunking to avoid very large writes works.
@@ -450,6 +453,7 @@
   writer.DoScheduledWrite();
   RunLoop().RunUntilIdle();
   histogram_tester.ExpectTotalCount("ImportantFile.SerializationDuration", 1);
+  histogram_tester.ExpectTotalCount("ImportantFile.WriteDuration", 1);
 }
 
 // Verify that a UMA metric for the serialization duration is recorded if the
@@ -463,6 +467,7 @@
   RunLoop().RunUntilIdle();
   histogram_tester.ExpectTotalCount("ImportantFile.SerializationDuration.Foo",
                                     1);
+  histogram_tester.ExpectTotalCount("ImportantFile.WriteDuration.Foo", 1);
 }
 
 }  // namespace base
diff --git a/base/i18n/number_formatting_unittest.cc b/base/i18n/number_formatting_unittest.cc
index cb50e25..6466ff1 100644
--- a/base/i18n/number_formatting_unittest.cc
+++ b/base/i18n/number_formatting_unittest.cc
@@ -107,11 +107,11 @@
     const char* expected_arabic;
     const char* expected_arabic_egypt;
   } cases[] = {
-      {0, "0%", u8"0\u00a0%", u8"\u06f0\u066a", u8"0\u200e%\u200e",
-       u8"\u0660\u066a\u061c"},
-      {42, "42%", "42\u00a0%", u8"\u06f4\u06f2\u066a", u8"42\u200e%\u200e",
+      {0, "0%", "0\u00a0%", "\u06f0\u066a", "0\u200e%\u200e",
+       "\u0660\u066a\u061c"},
+      {42, "42%", "42\u00a0%", "\u06f4\u06f2\u066a", "42\u200e%\u200e",
        "\u0664\u0662\u066a\u061c"},
-      {1024, "1,024%", "1.024\u00a0%", u8"\u06f1\u066c\u06f0\u06f2\u06f4\u066a",
+      {1024, "1,024%", "1.024\u00a0%", "\u06f1\u066c\u06f0\u06f2\u06f4\u066a",
        "1,024\u200e%\u200e", "\u0661\u066c\u0660\u0662\u0664\u066a\u061c"},
   };
 
diff --git a/base/logging.cc b/base/logging.cc
index 46484b9..26f8e04 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -140,8 +140,10 @@
 
 namespace {
 
+#if BUILDFLAG(USE_RUNTIME_VLOG)
 VlogInfo* g_vlog_info = nullptr;
 VlogInfo* g_vlog_info_prev = nullptr;
+#endif  // BUILDFLAG(USE_RUNTIME_VLOG)
 
 const char* const log_severity_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
 static_assert(LOGGING_NUM_SEVERITIES == std::size(log_severity_names),
@@ -415,6 +417,7 @@
   g_log_format = settings.log_format;
 #endif
 
+#if BUILDFLAG(USE_RUNTIME_VLOG)
   if (base::CommandLine::InitializedForCurrentProcess()) {
     base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
     // Don't bother initializing |g_vlog_info| unless we use one of the
@@ -433,6 +436,7 @@
                        &g_min_log_level);
     }
   }
+#endif  // defined(USE_RUNTIME_VLOG)
 
   g_logging_destination = settings.logging_dest;
 
@@ -511,12 +515,17 @@
 
 int GetVlogLevelHelper(const char* file, size_t N) {
   DCHECK_GT(N, 0U);
+
+#if BUILDFLAG(USE_RUNTIME_VLOG)
   // Note: |g_vlog_info| may change on a different thread during startup
   // (but will always be valid or nullptr).
   VlogInfo* vlog_info = g_vlog_info;
   return vlog_info ?
       vlog_info->GetVlogLevel(base::StringPiece(file, N - 1)) :
       GetVlogVerbosity();
+#else
+  return GetVlogVerbosity();
+#endif  // BUILDFLAG(USE_RUNTIME_VLOG)
 }
 
 void SetLogItems(bool enable_process_id, bool enable_thread_id,
@@ -1158,6 +1167,12 @@
 }
 #endif
 
+#if !BUILDFLAG(USE_RUNTIME_VLOG)
+int GetDisableAllVLogLevel() {
+  return -1;
+}
+#endif  // !BUILDFLAG(USE_RUNTIME_VLOG)
+
 }  // namespace logging
 
 std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
diff --git a/base/logging.h b/base/logging.h
index a3ff92f..30d05d7 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -16,6 +16,7 @@
 #include "base/callback_forward.h"
 #include "base/compiler_specific.h"
 #include "base/dcheck_is_on.h"
+#include "base/logging_buildflags.h"
 #include "base/scoped_clear_last_error.h"
 #include "base/strings/string_piece_forward.h"
 #include "build/build_config.h"
@@ -88,7 +89,17 @@
 //   VLOG(2) << "I'm printed when you run the program with --v=2 or more";
 //
 // These always log at the INFO log level (when they log at all).
-// The verbose logging can also be turned on module-by-module.  For instance,
+//
+// There is a build flag USE_RUNTIME_VLOG that controls whether verbose
+// logging is processed at runtime or at build time.
+//
+// When USE_RUNTIME_VLOG is not set, the verbose logging is processed at
+// build time. VLOG(n) is only included and compiled when `n` is less than or
+// equal to the verbose level defined by ENABLED_VLOG_LEVEL macro. Command line
+// switch --v and --vmodule are ignored in this mode.
+//
+// When USE_RUNTIME_VLOG is set, the verbose logging is controlled at
+// runtime and can be turned on module-by-module.  For instance,
 //    --vmodule=profile=2,icon_loader=1,browser_*=3,*/chromeos/*=4 --v=0
 // will cause:
 //   a. VLOG(2) and lower messages to be printed from profile.{h,cc}
@@ -428,13 +439,55 @@
 #define LOG_IS_ON(severity) \
   (::logging::ShouldCreateLogMessage(::logging::LOGGING_##severity))
 
+#if !BUILDFLAG(USE_RUNTIME_VLOG)
+
+// When USE_RUNTIME_VLOG is not set, --vmodule is completely ignored and
+// ENABLED_VLOG_LEVEL macro is used to determine the enabled VLOG levels
+// at build time.
+//
+// Files that need VLOG would need to redefine ENABLED_VLOG_LEVEL to a desired
+// VLOG level number,
+// e.g.
+//   To enable VLOG(1) output,
+//
+//   For a source cc file:
+//
+//     #undef ENABLED_VLOG_LEVEL
+//     #define ENABLED_VLOG_LEVEL 1
+//
+//   For all cc files in a build target of a BUILD.gn:
+//
+//     source_set("build_target") {
+//       ...
+//
+//       defines = ["ENABLED_VLOG_LEVEL=1"]
+//     }
+
+// Returns a vlog level that suppresses all vlogs. Using this function so that
+// compiler cannot calculate VLOG_IS_ON() and generate unreached code
+// warnings.
+BASE_EXPORT int GetDisableAllVLogLevel();
+
+// Define the default ENABLED_VLOG_LEVEL if it is not defined. This is to
+// allow ENABLED_VLOG_LEVEL to be overridden from defines in cc flags.
+#if !defined(ENABLED_VLOG_LEVEL)
+#define ENABLED_VLOG_LEVEL (logging::GetDisableAllVLogLevel())
+#endif  // !defined(ENABLED_VLOG_LEVEL)
+
+#define VLOG_IS_ON(verboselevel) ((verboselevel) <= (ENABLED_VLOG_LEVEL))
+
+#else
+
 // We don't do any caching tricks with VLOG_IS_ON() like the
 // google-glog version since it increases binary size.  This means
 // that using the v-logging functions in conjunction with --vmodule
 // may be slow.
+
 #define VLOG_IS_ON(verboselevel) \
   ((verboselevel) <= ::logging::GetVlogLevel(__FILE__))
 
+#endif
+
 // Helper macro which avoids evaluating the arguments to a stream if
 // the condition doesn't hold. Condition is evaluated once and only once.
 #define LAZY_STREAM(stream, condition)                                  \
diff --git a/base/logging_buildflags.h b/base/logging_buildflags.h
index cb182df..f3897ee 100644
--- a/base/logging_buildflags.h
+++ b/base/logging_buildflags.h
@@ -7,4 +7,5 @@
 #else
 #define BUILDFLAG_INTERNAL_ENABLE_LOG_ERROR_NOT_REACHED() (0)
 #endif
+#define BUILDFLAG_INTERNAL_USE_RUNTIME_VLOG() (1)
 #endif  // BASE_LOGGING_BUILDFLAGS_H_
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 911340d..c4fa743 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -86,15 +86,37 @@
 
 TEST_F(LoggingTest, BasicLogging) {
   MockLogSource mock_log_source;
+
+  // 4 base logs: LOG, LOG_IF, PLOG, and PLOG_IF
+  int expected_logs = 4;
+
+  // 4 verbose logs: VLOG, VLOG_IF, PVLOG, PVLOG_IF.
+  if (VLOG_IS_ON(0))
+    expected_logs += 4;
+
+  // 4 debug logs: DLOG, DLOG_IF, DPLOG, DPLOG_IF.
+  if (DCHECK_IS_ON())
+    expected_logs += 4;
+
+  // 4 verbose debug logs: DVLOG, DVLOG_IF, DVPLOG, DVPLOG_IF
+  if (VLOG_IS_ON(0) && DCHECK_IS_ON())
+    expected_logs += 4;
+
   EXPECT_CALL(mock_log_source, Log())
-      .Times(DCHECK_IS_ON() ? 16 : 8)
+      .Times(expected_logs)
       .WillRepeatedly(Return("log message"));
 
   SetMinLogLevel(LOGGING_INFO);
 
   EXPECT_TRUE(LOG_IS_ON(INFO));
   EXPECT_EQ(DCHECK_IS_ON(), DLOG_IS_ON(INFO));
+
+#if BUILDFLAG(USE_RUNTIME_VLOG)
   EXPECT_TRUE(VLOG_IS_ON(0));
+#else
+  // VLOG defaults to off when not USE_RUNTIME_VLOG.
+  EXPECT_FALSE(VLOG_IS_ON(0));
+#endif  // BUILDFLAG(USE_RUNTIME_VLOG)
 
   LOG(INFO) << mock_log_source.Log();
   LOG_IF(INFO, true) << mock_log_source.Log();
@@ -840,6 +862,40 @@
   }
 }
 
+#if !BUILDFLAG(USE_RUNTIME_VLOG)
+TEST_F(LoggingTest, BuildTimeVLOG) {
+  // Use a static because only captureless lambdas can be converted to a
+  // function pointer for SetLogMessageHandler().
+  static base::NoDestructor<std::string> log_string;
+  SetLogMessageHandler([](int severity, const char* file, int line,
+                          size_t start, const std::string& str) -> bool {
+    *log_string = str;
+    return true;
+  });
+
+  // No VLOG by default.
+  EXPECT_FALSE(VLOG_IS_ON(0));
+  VLOG(1) << "Expect not logged";
+  EXPECT_TRUE(log_string->empty());
+
+  // Re-define ENABLED_VLOG_LEVEL to enable VLOG(1).
+  // Note that ENABLED_VLOG_LEVEL has impact on all the code after it so please
+  // keep this test case the last one in this file.
+#undef ENABLED_VLOG_LEVEL
+#define ENABLED_VLOG_LEVEL 1
+
+  EXPECT_TRUE(VLOG_IS_ON(1));
+  EXPECT_FALSE(VLOG_IS_ON(2));
+
+  VLOG(1) << "Expect logged";
+  EXPECT_THAT(*log_string, ::testing::MatchesRegex(".* Expect logged\n"));
+
+  log_string->clear();
+  VLOG(2) << "Expect not logged";
+  EXPECT_TRUE(log_string->empty());
+}
+#endif  // !BUILDFLAG(USE_RUNTIME_VLOG)
+
 }  // namespace
 
 }  // namespace logging
diff --git a/base/memory/nonscannable_memory.cc b/base/memory/nonscannable_memory.cc
index e79112b..146b453 100644
--- a/base/memory/nonscannable_memory.cc
+++ b/base/memory/nonscannable_memory.cc
@@ -38,12 +38,12 @@
   // TODO(bikineev): Change to LIKELY once PCScan is enabled by default.
   if (UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
     PA_DCHECK(allocator_.get());
-    return allocator_->root()->AllocWithFlagsNoHooks(0, size,
-                                                     PartitionPageSize());
+    return allocator_->root()->AllocWithFlagsNoHooks(
+        0, size, partition_alloc::PartitionPageSize());
   }
   // Otherwise, dispatch to default partition.
   return PartitionAllocMalloc::Allocator()->AllocWithFlagsNoHooks(
-      0, size, PartitionPageSize());
+      0, size, partition_alloc::PartitionPageSize());
 }
 
 template <bool Quarantinable>
diff --git a/base/memory/platform_shared_memory_region.cc b/base/memory/platform_shared_memory_region.cc
index 9277273..0f7d28e 100644
--- a/base/memory/platform_shared_memory_region.cc
+++ b/base/memory/platform_shared_memory_region.cc
@@ -4,11 +4,13 @@
 
 #include "base/memory/platform_shared_memory_region.h"
 
+#include "base/bits.h"
 #include "base/memory/aligned_memory.h"
 #include "base/memory/shared_memory_mapping.h"
 #include "base/memory/shared_memory_security_policy.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/numerics/checked_math.h"
+#include "base/system/sys_info.h"
 
 namespace base {
 namespace subtle {
@@ -60,6 +62,9 @@
     return absl::nullopt;
   }
 
+  // TODO(dcheng): Presumably the actual size of the mapping is rounded to
+  // `SysInfo::VMAllocationGranularity()`. Should this accounting be done with
+  // that in mind?
   if (!SharedMemorySecurityPolicy::AcquireReservationForMapping(size)) {
     RecordMappingWasBlockedHistogram(/*blocked=*/true);
     return absl::nullopt;
@@ -70,11 +75,23 @@
   if (!mapper)
     mapper = SharedMemoryMapper::GetDefaultInstance();
 
+  // The backing mapper expects offset to be aligned to
+  // `SysInfo::VMAllocationGranularity()`.
+  size_t aligned_offset =
+      bits::AlignDown(offset, SysInfo::VMAllocationGranularity());
+  size_t adjustment_for_alignment = offset - aligned_offset;
+
   bool write_allowed = mode_ != Mode::kReadOnly;
-  auto result = mapper->Map(GetPlatformHandle(), write_allowed, offset, size);
+  auto result = mapper->Map(GetPlatformHandle(), write_allowed, aligned_offset,
+                            size + adjustment_for_alignment);
 
   if (result.has_value()) {
     DCHECK(IsAligned(result.value().data(), kMapMinimumAlignment));
+    if (offset != 0) {
+      // Undo the previous adjustment so the returned mapping respects the exact
+      // requested `offset` and `size`.
+      result = result->subspan(adjustment_for_alignment);
+    }
   } else {
     SharedMemorySecurityPolicy::ReleaseReservationForMapping(size);
   }
diff --git a/base/memory/raw_ptr.cc b/base/memory/raw_ptr.cc
index 8e097e8..bc81fdb 100644
--- a/base/memory/raw_ptr.cc
+++ b/base/memory/raw_ptr.cc
@@ -71,10 +71,10 @@
 void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
   if (IsManagedByDirectMap(address)) {
     uintptr_t reservation_start = GetDirectMapReservationStart(address);
-    CHECK(address - reservation_start >= PartitionPageSize());
+    CHECK(address - reservation_start >= partition_alloc::PartitionPageSize());
   } else {
     CHECK(IsManagedByNormalBuckets(address));
-    CHECK(address % kSuperPageSize >= PartitionPageSize());
+    CHECK(address % kSuperPageSize >= partition_alloc::PartitionPageSize());
   }
 }
 #endif  // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
diff --git a/base/memory/read_only_shared_memory_region.h b/base/memory/read_only_shared_memory_region.h
index cbabdcd..bf311a3 100644
--- a/base/memory/read_only_shared_memory_region.h
+++ b/base/memory/read_only_shared_memory_region.h
@@ -83,10 +83,16 @@
   // the region can be provided using the optional |mapper| parameter.
   ReadOnlySharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
 
-  // Same as above, but maps only |size| bytes of the shared memory region
-  // starting with the given |offset|. |offset| must be aligned to value of
-  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
-  // requested bytes are out of the region limits.
+  // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+  // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+  // the region limits.
+  //
+  // `offset` does not need to be aligned; if `offset` is not a multiple of
+  // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+  // returned mapping will not respect alignment either. Internally, `offset`
+  // and `size` are still first adjusted to respect alignment when mapping in
+  // the shared memory region, but the returned mapping will be "unadjusted" to
+  // match the exact `offset` and `size` requested.
   ReadOnlySharedMemoryMapping MapAt(uint64_t offset,
                                     size_t size,
                                     SharedMemoryMapper* mapper = nullptr) const;
diff --git a/base/memory/shared_memory_mapping.cc b/base/memory/shared_memory_mapping.cc
index 0ece5bb..6afcf2f 100644
--- a/base/memory/shared_memory_mapping.cc
+++ b/base/memory/shared_memory_mapping.cc
@@ -4,11 +4,14 @@
 
 #include "base/memory/shared_memory_mapping.h"
 
+#include <cstdint>
 #include <utility>
 
+#include "base/bits.h"
 #include "base/logging.h"
 #include "base/memory/shared_memory_security_policy.h"
 #include "base/memory/shared_memory_tracker.h"
+#include "base/system/sys_info.h"
 #include "base/unguessable_token.h"
 #include "build/build_config.h"
 
@@ -41,6 +44,7 @@
                                          const UnguessableToken& guid,
                                          SharedMemoryMapper* mapper)
     : mapped_span_(mapped_span), size_(size), guid_(guid), mapper_(mapper) {
+  // Note: except on Windows, `mapped_span_.size() == size_`.
   SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
 }
 
@@ -54,7 +58,16 @@
   SharedMemoryMapper* mapper = mapper_;
   if (!mapper)
     mapper = SharedMemoryMapper::GetDefaultInstance();
-  mapper->Unmap(mapped_span_);
+
+  // The backing mapper expects offset to be aligned to
+  // `SysInfo::VMAllocationGranularity()`, so replicate the alignment that was
+  // done when originally mapping in the region.
+  uint8_t* aligned_data =
+      bits::AlignDown(mapped_span_.data(), SysInfo::VMAllocationGranularity());
+  size_t adjusted_size =
+      mapped_span_.size() + (mapped_span_.data() - aligned_data);
+  span<uint8_t> span_to_unmap = make_span(aligned_data, adjusted_size);
+  mapper->Unmap(span_to_unmap);
 }
 
 ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
diff --git a/base/memory/shared_memory_mapping.h b/base/memory/shared_memory_mapping.h
index ca25a01..93c7bd3 100644
--- a/base/memory/shared_memory_mapping.h
+++ b/base/memory/shared_memory_mapping.h
@@ -105,8 +105,8 @@
   ReadOnlySharedMemoryMapping& operator=(
       ReadOnlySharedMemoryMapping&&) noexcept;
 
-  // Returns the base address of the mapping. This is read-only memory. This is
-  // page-aligned. This is nullptr for invalid instances.
+  // Returns the base address of the read-only mapping. Returns nullptr for
+  // invalid instances.
   const void* memory() const { return raw_memory_ptr(); }
 
   // Returns a pointer to a page-aligned const T if the mapping is valid and
@@ -180,8 +180,8 @@
   WritableSharedMemoryMapping& operator=(
       WritableSharedMemoryMapping&&) noexcept;
 
-  // Returns the base address of the mapping. This is writable memory. This is
-  // page-aligned. This is nullptr for invalid instances.
+  // Returns the base address of the writable mapping. Returns nullptr for
+  // invalid instances.
   void* memory() const { return raw_memory_ptr(); }
 
   // Returns a pointer to a page-aligned T if the mapping is valid and large
diff --git a/base/memory/shared_memory_region_unittest.cc b/base/memory/shared_memory_region_unittest.cc
index 0a09b03..3ffecc3 100644
--- a/base/memory/shared_memory_region_unittest.cc
+++ b/base/memory/shared_memory_region_unittest.cc
@@ -166,30 +166,21 @@
     ptr[i] = i;
 
   rw_mapping = WritableSharedMemoryMapping();
-  size_t bytes_offset = kPageSize;
-  typename TypeParam::MappingType mapping =
-      region.MapAt(bytes_offset, kDataSize - bytes_offset);
-  ASSERT_TRUE(mapping.IsValid());
 
-  size_t int_offset = bytes_offset / sizeof(uint32_t);
-  const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
-  for (size_t i = int_offset; i < kCount; ++i) {
-    EXPECT_EQ(ptr2[i - int_offset], i);
+  for (size_t bytes_offset = sizeof(uint32_t); bytes_offset <= kPageSize;
+       bytes_offset += sizeof(uint32_t)) {
+    typename TypeParam::MappingType mapping =
+        region.MapAt(bytes_offset, kDataSize - bytes_offset);
+    ASSERT_TRUE(mapping.IsValid());
+
+    size_t int_offset = bytes_offset / sizeof(uint32_t);
+    const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
+    for (size_t i = int_offset; i < kCount; ++i) {
+      EXPECT_EQ(ptr2[i - int_offset], i);
+    }
   }
 }
 
-TYPED_TEST(SharedMemoryRegionTest, MapAtNotAlignedOffsetFails) {
-  const size_t kDataSize = SysInfo::VMAllocationGranularity();
-
-  auto [region, rw_mapping] = CreateMappedRegion<TypeParam>(kDataSize);
-  ASSERT_TRUE(region.IsValid());
-  ASSERT_TRUE(rw_mapping.IsValid());
-  size_t offset = kDataSize / 2;
-  typename TypeParam::MappingType mapping =
-      region.MapAt(offset, kDataSize - offset);
-  EXPECT_FALSE(mapping.IsValid());
-}
-
 TYPED_TEST(SharedMemoryRegionTest, MapZeroBytesFails) {
   typename TypeParam::MappingType mapping = this->region_.MapAt(0, 0);
   EXPECT_FALSE(mapping.IsValid());
diff --git a/base/memory/unsafe_shared_memory_region.h b/base/memory/unsafe_shared_memory_region.h
index 1f0458f..fc1927b 100644
--- a/base/memory/unsafe_shared_memory_region.h
+++ b/base/memory/unsafe_shared_memory_region.h
@@ -80,10 +80,16 @@
   // the region can be provided using the optional |mapper| parameter.
   WritableSharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
 
-  // Same as above, but maps only |size| bytes of the shared memory region
-  // starting with the given |offset|. |offset| must be aligned to value of
-  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
-  // requested bytes are out of the region limits.
+  // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+  // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+  // the region limits.
+  //
+  // `offset` does not need to be aligned; if `offset` is not a multiple of
+  // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+  // returned mapping will not respect alignment either. Internally, `offset`
+  // and `size` are still first adjusted to respect alignment when mapping in
+  // the shared memory region, but the returned mapping will be "unadjusted" to
+  // match the exact `offset` and `size` requested.
   WritableSharedMemoryMapping MapAt(uint64_t offset,
                                     size_t size,
                                     SharedMemoryMapper* mapper = nullptr) const;
diff --git a/base/memory/writable_shared_memory_region.h b/base/memory/writable_shared_memory_region.h
index 02f56e3..bec7a81 100644
--- a/base/memory/writable_shared_memory_region.h
+++ b/base/memory/writable_shared_memory_region.h
@@ -88,10 +88,16 @@
   // the region can be provided using the optional |mapper| parameter.
   WritableSharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
 
-  // Same as above, but maps only |size| bytes of the shared memory block
-  // starting with the given |offset|. |offset| must be aligned to value of
-  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
-  // requested bytes are out of the region limits.
+  // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+  // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+  // the region limits.
+  //
+  // `offset` does not need to be aligned; if `offset` is not a multiple of
+  // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+  // returned mapping will not respect alignment either. Internally, `offset`
+  // and `size` are still first adjusted to respect alignment when mapping in
+  // the shared memory region, but the returned mapping will be "unadjusted" to
+  // match the exact `offset` and `size` requested.
   WritableSharedMemoryMapping MapAt(uint64_t offset,
                                     size_t size,
                                     SharedMemoryMapper* mapper = nullptr) const;
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index f9304d8..d6fa89c 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -703,6 +703,14 @@
   EXPECT_EQ(callback_callcount, 1u);
 }
 
+#if BUILDFLAG(USE_RUNTIME_VLOG)
+// The following check that StatisticsRecorder::InitLogOnShutdownWhileLocked
+// dumps the histogram graph to vlog if VLOG_IS_ON(1) at runtime. When
+// USE_RUNTIME_VLOG is not set, all vlog levels are determined at build time
+// and default to off. Since we do not want StatisticsRecorder to dump all the
+// time, VLOG in its code stays off. As a result, the following tests would
+// fail.
+
 TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
   ResetVLogInitialized();
   logging::SetMinLogLevel(logging::LOG_WARNING);
@@ -732,6 +740,7 @@
   EXPECT_TRUE(VLOG_IS_ON(1));
   EXPECT_TRUE(IsVLogInitialized());
 }
+#endif  // BUILDFLAG(USE_RUNTIME_VLOG)
 
 class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
  public:
diff --git a/base/observer_list_perftest.cc b/base/observer_list_perftest.cc
index 9a6ee7e..619ec20 100644
--- a/base/observer_list_perftest.cc
+++ b/base/observer_list_perftest.cc
@@ -37,7 +37,9 @@
   ObserverInterface(const ObserverInterface&) = delete;
   ObserverInterface& operator=(const ObserverInterface&) = delete;
   virtual ~ObserverInterface() = default;
-  virtual void Observe() const { ++g_observer_list_perf_test_counter; }
+  virtual void Observe() const {
+    g_observer_list_perf_test_counter = g_observer_list_perf_test_counter + 1;
+  }
 };
 
 class UnsafeObserver : public ObserverInterface {};
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 57bf0f7..c5eb76e 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -706,7 +706,7 @@
   alignas(16) char stack_buf[PTHREAD_STACK_MIN];
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) ||   \
     defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \
-    defined(ARCH_CPU_PPC64_FAMILY)
+    defined(ARCH_CPU_PPC64_FAMILY) || defined(ARCH_CPU_LOONG_FAMILY)
   // The stack grows downward.
   void* stack = stack_buf + sizeof(stack_buf);
 #else
diff --git a/base/profiler/stack_sampler_impl_unittest.cc b/base/profiler/stack_sampler_impl_unittest.cc
index 4c402bf..276098c 100644
--- a/base/profiler/stack_sampler_impl_unittest.cc
+++ b/base/profiler/stack_sampler_impl_unittest.cc
@@ -70,13 +70,14 @@
                  TimeTicks* timestamp,
                  RegisterContext* thread_context,
                  Delegate* delegate) override {
-    std::memcpy(stack_buffer->buffer(), &fake_stack_[0], fake_stack_.size());
-    *stack_top =
-        reinterpret_cast<uintptr_t>(&fake_stack_[0] + fake_stack_.size());
-    // Set the stack pointer to be consistent with the provided fake stack.
+    std::memcpy(stack_buffer->buffer(), &fake_stack_[0],
+                fake_stack_.size() * sizeof(fake_stack_[0]));
+    *stack_top = reinterpret_cast<uintptr_t>(stack_buffer->buffer() +
+                                             fake_stack_.size());
+    // Set the stack pointer to be consistent with the copied stack.
     *thread_context = {};
     RegisterContextStackPointer(thread_context) =
-        reinterpret_cast<uintptr_t>(&fake_stack_[0]);
+        reinterpret_cast<uintptr_t>(stack_buffer->buffer());
 
     *timestamp = timestamp_;
 
@@ -107,37 +108,23 @@
 // Trivial unwinder implementation for testing.
 class TestUnwinder : public Unwinder {
  public:
-  TestUnwinder(size_t stack_size = 0,
-               std::vector<uintptr_t>* stack_copy = nullptr,
-               // Variable to fill in with the bottom address of the
-               // copied stack. This will be different than
-               // &(*stack_copy)[0] because |stack_copy| is a copy of the
-               // copy so does not share memory with the actual copy.
-               uintptr_t* stack_copy_bottom = nullptr)
-      : stack_size_(stack_size),
-        stack_copy_(stack_copy),
-        stack_copy_bottom_(stack_copy_bottom) {}
+  explicit TestUnwinder(std::vector<uintptr_t>* stack_copy)
+      : stack_copy_(stack_copy) {}
 
   bool CanUnwindFrom(const Frame& current_frame) const override { return true; }
 
   UnwindResult TryUnwind(RegisterContext* thread_context,
                          uintptr_t stack_top,
                          std::vector<Frame>* stack) const override {
-    if (stack_copy_) {
-      auto* bottom = reinterpret_cast<uintptr_t*>(
-          RegisterContextStackPointer(thread_context));
-      auto* top = bottom + stack_size_;
-      *stack_copy_ = std::vector<uintptr_t>(bottom, top);
-    }
-    if (stack_copy_bottom_)
-      *stack_copy_bottom_ = RegisterContextStackPointer(thread_context);
+    auto* bottom = reinterpret_cast<uintptr_t*>(
+        RegisterContextStackPointer(thread_context));
+    *stack_copy_ =
+        std::vector<uintptr_t>(bottom, reinterpret_cast<uintptr_t*>(stack_top));
     return UnwindResult::kCompleted;
   }
 
  private:
-  size_t stack_size_;
   raw_ptr<std::vector<uintptr_t>> stack_copy_;
-  raw_ptr<uintptr_t> stack_copy_bottom_;
 };
 
 // Records invocations of calls to OnStackCapture()/UpdateModules().
@@ -279,8 +266,7 @@
   std::vector<uintptr_t> stack_copy;
   StackSamplerImpl stack_sampler_impl(
       std::make_unique<TestStackCopier>(stack),
-      MakeUnwindersFactory(
-          std::make_unique<TestUnwinder>(stack.size(), &stack_copy)),
+      MakeUnwindersFactory(std::make_unique<TestUnwinder>(&stack_copy)),
       &module_cache);
 
   stack_sampler_impl.Initialize();
@@ -302,8 +288,7 @@
   TimeTicks timestamp = TimeTicks::UnixEpoch();
   StackSamplerImpl stack_sampler_impl(
       std::make_unique<TestStackCopier>(stack, timestamp),
-      MakeUnwindersFactory(
-          std::make_unique<TestUnwinder>(stack.size(), &stack_copy)),
+      MakeUnwindersFactory(std::make_unique<TestUnwinder>(&stack_copy)),
       &module_cache);
 
   stack_sampler_impl.Initialize();
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index 776bca5..fa0f8b9 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -36,10 +36,10 @@
 // We also use it so that the compiler doesn't discard certain return values
 // as something we don't need (see the comment with calloc below).
 template <typename Type>
-NOINLINE Type HideValueFromCompiler(volatile Type value) {
+NOINLINE Type HideValueFromCompiler(Type value) {
 #if defined(__GNUC__)
   // In a GCC compatible compiler (GCC or Clang), make this compiler barrier
-  // more robust than merely using "volatile".
+  // more robust.
   __asm__ volatile ("" : "+r" (value));
 #endif  // __GNUC__
   return value;
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
index 5516be7..df3b1e5 100644
--- a/base/sys_byteorder.h
+++ b/base/sys_byteorder.h
@@ -19,10 +19,20 @@
 #include <stdlib.h>
 #endif
 
+#if defined(COMPILER_MSVC) && !defined(__clang__)
+// TODO(pkasting): See
+// https://developercommunity.visualstudio.com/t/Mark-some-built-in-functions-as-constexp/362558
+// https://developercommunity.visualstudio.com/t/constexpr-byte-swapping-optimization/983963
+#define BASE_BYTESWAPS_CONSTEXPR
+#else
+#define BASE_BYTESWAPS_CONSTEXPR constexpr
+#endif
+
 namespace base {
 
 // Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
-inline uint16_t ByteSwap(uint16_t x) {
+// TODO(pkasting): Once C++23 is available, replace with std::byteswap.
+inline BASE_BYTESWAPS_CONSTEXPR uint16_t ByteSwap(uint16_t x) {
 #if defined(COMPILER_MSVC) && !defined(__clang__)
   return _byteswap_ushort(x);
 #else
@@ -30,7 +40,7 @@
 #endif
 }
 
-inline constexpr uint32_t ByteSwap(uint32_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint32_t ByteSwap(uint32_t x) {
 #if defined(COMPILER_MSVC) && !defined(__clang__)
   return _byteswap_ulong(x);
 #else
@@ -38,7 +48,7 @@
 #endif
 }
 
-inline constexpr uint64_t ByteSwap(uint64_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint64_t ByteSwap(uint64_t x) {
   // Per build/build_config.h, clang masquerades as MSVC on Windows. If we are
   // actually using clang, we can rely on the builtin.
   //
@@ -53,7 +63,7 @@
 #endif
 }
 
-inline constexpr uintptr_t ByteSwapUintPtrT(uintptr_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uintptr_t ByteSwapUintPtrT(uintptr_t x) {
   // We do it this way because some build configurations are ILP32 even when
   // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
   // because these conditionals are constexprs, the irrelevant branches will
@@ -68,21 +78,21 @@
 
 // Converts the bytes in |x| from host order (endianness) to little endian, and
 // returns the result.
-inline uint16_t ByteSwapToLE16(uint16_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint16_t ByteSwapToLE16(uint16_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return x;
 #else
   return ByteSwap(x);
 #endif
 }
-inline uint32_t ByteSwapToLE32(uint32_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint32_t ByteSwapToLE32(uint32_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return x;
 #else
   return ByteSwap(x);
 #endif
 }
-inline uint64_t ByteSwapToLE64(uint64_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint64_t ByteSwapToLE64(uint64_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return x;
 #else
@@ -92,21 +102,21 @@
 
 // Converts the bytes in |x| from network to host order (endianness), and
 // returns the result.
-inline uint16_t NetToHost16(uint16_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint16_t NetToHost16(uint16_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
   return x;
 #endif
 }
-inline uint32_t NetToHost32(uint32_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint32_t NetToHost32(uint32_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
   return x;
 #endif
 }
-inline uint64_t NetToHost64(uint64_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint64_t NetToHost64(uint64_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
@@ -116,21 +126,21 @@
 
 // Converts the bytes in |x| from host to network order (endianness), and
 // returns the result.
-inline uint16_t HostToNet16(uint16_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint16_t HostToNet16(uint16_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
   return x;
 #endif
 }
-inline uint32_t HostToNet32(uint32_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint32_t HostToNet32(uint32_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
   return x;
 #endif
 }
-inline uint64_t HostToNet64(uint64_t x) {
+inline BASE_BYTESWAPS_CONSTEXPR uint64_t HostToNet64(uint64_t x) {
 #if defined(ARCH_CPU_LITTLE_ENDIAN)
   return ByteSwap(x);
 #else
@@ -140,4 +150,6 @@
 
 }  // namespace base
 
+#undef BASE_BYTESWAPS_CONSTEXPR
+
 #endif  // BASE_SYS_BYTEORDER_H_
diff --git a/base/test/android/javatests/src/org/chromium/base/time/TestTimer.java b/base/test/android/javatests/src/org/chromium/base/time/TestTimer.java
new file mode 100644
index 0000000..789596e
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/time/TestTimer.java
@@ -0,0 +1,48 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.time;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test implementation of {@link Timer} with a directly increment-able elapsed time.
+ */
+public class TestTimer implements Timer {
+    private final BaseTimerImpl mBaseTimer;
+    private long mTime;
+
+    public TestTimer(long startTime) {
+        mTime = startTime;
+        mBaseTimer = new BaseTimerImpl(this::getTime, TimeUnit.NANOSECONDS);
+    }
+
+    public void advanceBy(TimeUnit timeUnit, long increment) {
+        mTime += TimeUnit.NANOSECONDS.convert(increment, timeUnit);
+    }
+
+    private long getTime() {
+        return mTime;
+    }
+
+    @Override
+    public void start() {
+        mBaseTimer.start();
+    }
+
+    @Override
+    public void stop() {
+        mBaseTimer.stop();
+    }
+
+    @Override
+    public long getElapsedTime(TimeUnit timeUnit) {
+        return mBaseTimer.getElapsedTime(timeUnit);
+    }
+
+    @Override
+    public boolean isRunning() {
+        return mBaseTimer.isRunning();
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
index 6d854c5..f4c71fa 100644
--- a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
@@ -39,7 +39,7 @@
         public BadTestClassWithTwoArgumentConstructor(int a, int b) {}
     }
 
-    static abstract class BadTestClassAbstract {
+    abstract static class BadTestClassAbstract {
         public BadTestClassAbstract() {}
     }
 
diff --git a/base/test/repeating_test_future_unittest.cc b/base/test/repeating_test_future_unittest.cc
index 3f00198..b78f2b8 100644
--- a/base/test/repeating_test_future_unittest.cc
+++ b/base/test/repeating_test_future_unittest.cc
@@ -18,6 +18,7 @@
 struct MoveOnlyValue {
  public:
   MoveOnlyValue() = default;
+  MoveOnlyValue(std::string data) : data(std::move(data)) {}
   MoveOnlyValue(const MoveOnlyValue&) = delete;
   auto& operator=(const MoveOnlyValue&) = delete;
   MoveOnlyValue(MoveOnlyValue&&) = default;
@@ -153,7 +154,7 @@
   RepeatingTestFuture<MoveOnlyValue> future;
 
   RunLater(BindLambdaForTesting(
-      [&future]() { future.AddValue({.data = "move only value"}); }));
+      [&future]() { future.AddValue(MoveOnlyValue("move only value")); }));
 
   MoveOnlyValue result = future.Take();
 
diff --git a/base/test/test_future_unittest.cc b/base/test/test_future_unittest.cc
index 4142fef..7be8245 100644
--- a/base/test/test_future_unittest.cc
+++ b/base/test/test_future_unittest.cc
@@ -30,6 +30,7 @@
 struct MoveOnlyValue {
  public:
   MoveOnlyValue() = default;
+  MoveOnlyValue(int data) : data(data) {}
   MoveOnlyValue(const MoveOnlyValue&) = delete;
   auto& operator=(const MoveOnlyValue&) = delete;
   MoveOnlyValue(MoveOnlyValue&&) = default;
@@ -129,7 +130,7 @@
   const int expected_data = 99;
   TestFuture<MoveOnlyValue> future;
 
-  RunLater(base::BindOnce(future.GetCallback(), MoveOnlyValue{expected_data}));
+  RunLater(base::BindOnce(future.GetCallback(), MoveOnlyValue(expected_data)));
 
   MoveOnlyValue actual_value = future.Take();
 
diff --git a/base/threading/counter_perftest.cc b/base/threading/counter_perftest.cc
index 16744b3..b3209fc 100644
--- a/base/threading/counter_perftest.cc
+++ b/base/threading/counter_perftest.cc
@@ -38,7 +38,7 @@
 class Uint64_NoLock {
  public:
   Uint64_NoLock() = default;
-  void Increment() { ++counter_; }
+  void Increment() { counter_ = counter_ + 1; }
   uint64_t value() const { return counter_; }
 
  private:
diff --git a/base/threading/thread_local_storage_perftest.cc b/base/threading/thread_local_storage_perftest.cc
index 7436029..ac56458 100644
--- a/base/threading/thread_local_storage_perftest.cc
+++ b/base/threading/thread_local_storage_perftest.cc
@@ -119,7 +119,7 @@
                   base::BindLambdaForTesting([&]() {
                     volatile intptr_t total = 0;
                     for (size_t i = 0; i < num_operation; ++i)
-                      total += read();
+                      total = total + read();
                   }),
                   num_operation, num_threads);
 
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index db37af5..114677b 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -278,6 +278,9 @@
 namespace rlz_lib {
 class FinancialPing;
 }
+namespace storage {
+class ObfuscatedFileUtil;
+}
 namespace syncer {
 class GetLocalChangesRequest;
 class HttpBridge;
@@ -592,6 +595,7 @@
   friend class rlz_lib::FinancialPing;
   friend class shell_integration_linux::
       LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+  friend class storage::ObfuscatedFileUtil;
   friend class syncer::HttpBridge;
   friend class syncer::GetLocalChangesRequest;
   friend class webrtc::DesktopConfigurationMonitor;
diff --git a/base/time/time.h b/base/time/time.h
index e31931b..4cfa619 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -442,13 +442,8 @@
   // the other subclasses can vary each time the application is restarted.
   constexpr TimeDelta since_origin() const;
 
-  constexpr TimeClass& operator=(TimeClass other) {
-    us_ = other.us_;
-    return *(static_cast<TimeClass*>(this));
-  }
-
   // Compute the difference between two times.
-  constexpr TimeDelta operator-(TimeClass other) const;
+  constexpr TimeDelta operator-(const TimeBase<TimeClass>& other) const;
 
   // Return a new time modified by some delta.
   constexpr TimeClass operator+(TimeDelta delta) const;
@@ -463,12 +458,24 @@
   }
 
   // Comparison operators
-  constexpr bool operator==(TimeClass other) const { return us_ == other.us_; }
-  constexpr bool operator!=(TimeClass other) const { return us_ != other.us_; }
-  constexpr bool operator<(TimeClass other) const { return us_ < other.us_; }
-  constexpr bool operator<=(TimeClass other) const { return us_ <= other.us_; }
-  constexpr bool operator>(TimeClass other) const { return us_ > other.us_; }
-  constexpr bool operator>=(TimeClass other) const { return us_ >= other.us_; }
+  constexpr bool operator==(const TimeBase<TimeClass>& other) const {
+    return us_ == other.us_;
+  }
+  constexpr bool operator!=(const TimeBase<TimeClass>& other) const {
+    return us_ != other.us_;
+  }
+  constexpr bool operator<(const TimeBase<TimeClass>& other) const {
+    return us_ < other.us_;
+  }
+  constexpr bool operator<=(const TimeBase<TimeClass>& other) const {
+    return us_ <= other.us_;
+  }
+  constexpr bool operator>(const TimeBase<TimeClass>& other) const {
+    return us_ > other.us_;
+  }
+  constexpr bool operator>=(const TimeBase<TimeClass>& other) const {
+    return us_ >= other.us_;
+  }
 
  protected:
   constexpr explicit TimeBase(int64_t us) : us_(us) {}
@@ -947,7 +954,8 @@
 }
 
 template <class TimeClass>
-constexpr TimeDelta TimeBase<TimeClass>::operator-(TimeClass other) const {
+constexpr TimeDelta TimeBase<TimeClass>::operator-(
+    const TimeBase<TimeClass>& other) const {
   return Microseconds(us_ - other.us_);
 }
 
diff --git a/base/trace_event/builtin_categories.h b/base/trace_event/builtin_categories.h
index 7528d88..a1a9341 100644
--- a/base/trace_event/builtin_categories.h
+++ b/base/trace_event/builtin_categories.h
@@ -87,6 +87,7 @@
   X("explore_sites")                                                     \
   X("FileSystem")                                                        \
   X("file_system_provider")                                              \
+  X("fledge")                                                            \
   X("fonts")                                                             \
   X("GAMEPAD")                                                           \
   X("gpu")                                                               \
diff --git a/base/trace_event/memory_infra_background_allowlist.cc b/base/trace_event/memory_infra_background_allowlist.cc
index ca4d4a2..43b482d 100644
--- a/base/trace_event/memory_infra_background_allowlist.cc
+++ b/base/trace_event/memory_infra_background_allowlist.cc
@@ -277,6 +277,7 @@
     "sync/0x?/model_type/DICTIONARY",
     "sync/0x?/model_type/EXTENSION",
     "sync/0x?/model_type/EXTENSION_SETTING",
+    "sync/0x?/model_type/HISTORY",
     "sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
     "sync/0x?/model_type/MANAGED_USER",
     "sync/0x?/model_type/MANAGED_USER_SETTING",
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 07f805a..84fda31 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -9,6 +9,7 @@
 #include <memory>
 #include <vector>
 
+#include "base/bits.h"
 #include "base/logging.h"
 #include "base/memory/page_size.h"
 #include "base/memory/ptr_util.h"
@@ -176,12 +177,25 @@
 absl::optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
     void* start_address,
     size_t mapped_size) {
+  // `MapAt()` performs some internal arithmetic to allow non-page-aligned
+  // offsets, but the memory accounting still expects to work with page-aligned
+  // allocations.
+  //
+  // TODO(dcheng): one peculiarity here is that the shmem implementation uses
+  // `base::SysInfo::VMAllocationGranularity()` while this file uses
+  // `GetSystemPageSize()`. It'd be nice not to have two names for the same
+  // thing...
+  uint8_t* aligned_start_address = base::bits::AlignDown(
+      static_cast<uint8_t*>(start_address), GetSystemPageSize());
+  size_t adjusted_size = mapped_size + (static_cast<uint8_t*>(start_address) -
+                                        aligned_start_address);
+
 #if BUILDFLAG(IS_MAC)
   // On macOS, use mach_vm_region instead of mincore for performance
   // (crbug.com/742042).
   mach_vm_size_t dummy_size = 0;
   mach_vm_address_t address =
-      reinterpret_cast<mach_vm_address_t>(start_address);
+      reinterpret_cast<mach_vm_address_t>(aligned_start_address);
   vm_region_top_info_data_t info;
   MachVMRegionResult result =
       GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
@@ -223,9 +237,9 @@
   // Sanity check in case the mapped size is less than the total size of the
   // region.
   size_t pages_to_fault =
-      std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
+      std::min(resident_pages, (adjusted_size + PAGE_SIZE - 1) / PAGE_SIZE);
 
-  volatile char* base_address = static_cast<char*>(start_address);
+  volatile uint8_t* base_address = const_cast<uint8_t*>(aligned_start_address);
   for (size_t i = 0; i < pages_to_fault; ++i) {
     // Reading from a volatile is a visible side-effect for the purposes of
     // optimization. This guarantees that the optimizer will not kill this line.
@@ -234,7 +248,7 @@
 
   return resident_pages * PAGE_SIZE;
 #else
-  return CountResidentBytes(start_address, mapped_size);
+  return CountResidentBytes(aligned_start_address, adjusted_size);
 #endif  // BUILDFLAG(IS_MAC)
 }
 
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index b383f0e..d045e01 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -529,6 +529,21 @@
     ASSERT_EQ(res1.value(), kDirtyMemorySize);
   }
 
+  // Allocate a shared memory segment but map at a non-page-aligned offset.
+  {
+    const size_t kDirtyMemorySize = 5 * page_size;
+    auto region =
+        base::WritableSharedMemoryRegion::Create(kDirtyMemorySize + page_size);
+    base::WritableSharedMemoryMapping mapping =
+        region.MapAt(page_size / 2, kDirtyMemorySize);
+    memset(mapping.memory(), 0, kDirtyMemorySize);
+    absl::optional<size_t> res1 =
+        ProcessMemoryDump::CountResidentBytesInSharedMemory(
+            mapping.memory(), mapping.mapped_size());
+    ASSERT_TRUE(res1.has_value());
+    ASSERT_EQ(res1.value(), kDirtyMemorySize + page_size);
+  }
+
   // Allocate a large memory segment (> 8Mib).
   {
     const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
diff --git a/base/tracing/protos/chrome_track_event.proto b/base/tracing/protos/chrome_track_event.proto
index a79e122..ccc9d07 100644
--- a/base/tracing/protos/chrome_track_event.proto
+++ b/base/tracing/protos/chrome_track_event.proto
@@ -420,6 +420,9 @@
   // Layout update breakdown
   optional uint64 layout_update_us = 4;
 
+  // Accessibility update breakdown
+  optional uint64 accessibility_update_us = 12;
+
   // Prepaint breakdown
   optional uint64 prepaint_us = 5;
 
@@ -638,7 +641,10 @@
     TASK_TYPE_WORKER_THREAD_TASK_QUEUE_V8 = 47;
     TASK_TYPE_WORKER_THREAD_TASK_QUEUE_COMPOSITOR = 48;
     TASK_TYPE_COMPOSITOR_THREAD_TASK_QUEUE_INPUT = 49;
+
+    // TODO(crbug.com/860545): Obsolete. Remove.
     TASK_TYPE_NETWORKING_WITH_URL_LOADER_ANNOTATION = 50;
+
     TASK_TYPE_WORKER_ANIMATION = 51;
 
     TASK_TYPE_INTERNAL_TRANSLATION = 55;
diff --git a/components/policy/core/common/default_chrome_apps_migrator.cc b/components/policy/core/common/default_chrome_apps_migrator.cc
index ecee508..7dc65b9 100644
--- a/components/policy/core/common/default_chrome_apps_migrator.cc
+++ b/components/policy/core/common/default_chrome_apps_migrator.cc
@@ -35,24 +35,23 @@
   std::vector<std::string> chrome_app_ids =
       RemoveChromeAppsFromExtensionForcelist(policies);
 
-  // If no chrome apps need to be replaced, we have nothing to do.
+  // If no Chrome Apps need to be replaced, we have nothing to do.
   if (chrome_app_ids.empty())
     return;
 
-  EnsurePolicyValueIsList(policies, key::kExtensionInstallBlocklist);
-  base::Value* blocklist_value = policies->GetMutableValue(
-      key::kExtensionInstallBlocklist, base::Value::Type::LIST);
-  for (const std::string& chrome_app_id : chrome_app_ids) {
-    blocklist_value->Append(chrome_app_id);
-  }
-
   EnsurePolicyValueIsList(policies, key::kWebAppInstallForceList);
-  base::Value* web_app_policy_value = policies->GetMutableValue(
-      key::kWebAppInstallForceList, base::Value::Type::LIST);
+  base::Value::List& web_app_policy_value =
+      policies
+          ->GetMutableValue(key::kWebAppInstallForceList,
+                            base::Value::Type::LIST)
+          ->GetList();
   for (const std::string& chrome_app_id : chrome_app_ids) {
-    base::Value web_app(base::Value::Type::DICTIONARY);
-    web_app.SetStringKey("url", chrome_app_to_web_app_.at(chrome_app_id));
-    web_app_policy_value->Append(std::move(web_app));
+    base::Value::Dict web_app;
+    web_app.Set("url", chrome_app_to_web_app_.at(chrome_app_id));
+    base::Value::List uninstall_list;
+    uninstall_list.Append(chrome_app_id);
+    web_app.Set("uninstall_and_replace", std::move(uninstall_list));
+    web_app_policy_value.Append(std::move(web_app));
   }
 
   MigratePinningPolicy(policies);
@@ -73,7 +72,7 @@
 
   std::vector<std::string> chrome_app_ids;
   base::Value new_forcelist_value(base::Value::Type::LIST);
-  for (const auto& list_entry : forcelist_value->GetListDeprecated()) {
+  for (const auto& list_entry : forcelist_value->GetList()) {
     if (!list_entry.is_string()) {
       new_forcelist_value.Append(list_entry.Clone());
       continue;
@@ -120,7 +119,7 @@
       key::kPinnedLauncherApps, base::Value::Type::LIST);
   if (!pinned_apps_value)
     return;
-  for (auto& list_entry : pinned_apps_value->GetListDeprecated()) {
+  for (auto& list_entry : pinned_apps_value->GetList()) {
     if (!list_entry.is_string())
       continue;
     const std::string pinned_app = list_entry.GetString();
diff --git a/components/policy/core/common/default_chrome_apps_migrator.h b/components/policy/core/common/default_chrome_apps_migrator.h
index 6220db4..4a72d08 100644
--- a/components/policy/core/common/default_chrome_apps_migrator.h
+++ b/components/policy/core/common/default_chrome_apps_migrator.h
@@ -15,10 +15,9 @@
 
 // This class is used as a temporary solution to handle force install policies
 // for deprecated Chrome apps. It replaces ExtensionInstallForcelist policy
-// for Chrome app with ExtensionInstallBlocklist for Chrome app and
-// WebAppInstallForceList policy for the corresponding Web App. To preserve the
-// pinning state, PinnedLauncherApps policy for Chrome app is replaced with the
-// one for Web App.
+// for Chrome app with WebAppInstallForceList policy for the corresponding Web
+// App. To preserve the pinning state, PinnedLauncherApps policy for Chrome app
+// is replaced with the one for Web App.
 // This code will be removed when the following steps are done:
 // 1. Build discoverability for default apps in Admin panel (Dpanel).
 // 2. Build new control logic for blocking installation (but not blocking use
@@ -39,12 +38,12 @@
 
   ~DefaultChromeAppsMigrator();
 
-  // Replaces ExtensionInstallForcelist policy for Chrome apps listed in
+  // Replaces ExtensionInstallForcelist policy for Chrome Apps listed in
   // `chrome_app_to_web_app_`.
   void Migrate(PolicyMap* policies) const;
 
  private:
-  // Removes chrome apps listed in `chrome_app_to_web_app_` from
+  // Removes chrome Apps listed in `chrome_app_to_web_app_` from
   // ExtensionInstallForcelist policy. Returns ids of removed apps.
   std::vector<std::string> RemoveChromeAppsFromExtensionForcelist(
       PolicyMap* policies) const;
@@ -54,12 +53,12 @@
   void EnsurePolicyValueIsList(PolicyMap* policies,
                                const std::string& policy_name) const;
 
-  // Replaces policy to pin Chrome app from `chrome_app_to_web_app_` with policy
+  // Replaces policy to pin Chrome App from `chrome_app_to_web_app_` with policy
   // to pin corresponding Web App. It only changes PinnedLauncherApps policy,
   // which specifies pinned apps on Chrome OS.
   void MigratePinningPolicy(PolicyMap* policies) const;
 
-  // Maps from ids of Chrome apps that need to be replaced to Web App urls.
+  // Maps from ids of Chrome Apps that need to be replaced to Web App urls.
   std::map<std::string, std::string> chrome_app_to_web_app_;
 };
 
diff --git a/components/policy/core/common/default_chrome_apps_migrator_unittest.cc b/components/policy/core/common/default_chrome_apps_migrator_unittest.cc
index d62ca8c..3efc647 100644
--- a/components/policy/core/common/default_chrome_apps_migrator_unittest.cc
+++ b/components/policy/core/common/default_chrome_apps_migrator_unittest.cc
@@ -19,6 +19,20 @@
 constexpr char kAppId2[] = "bbbb";
 constexpr char kWebAppUrl1[] = "https://gmail.com";
 constexpr char kWebAppUrl2[] = "https://google.com";
+constexpr char kUninstallAndReplaceKey[] = "uninstall_and_replace";
+
+// Creates Dict object for WebAppInstallForceList policy from Web App
+// parameters.
+base::Value::Dict CreateWebAppDict(std::string url,
+                                   std::string replaced_extension_id) {
+  base::Value::Dict web_app;
+  web_app.Set("url", url);
+  base::Value::List uninstall_list;
+  uninstall_list.Append(replaced_extension_id);
+  web_app.Set(kUninstallAndReplaceKey, std::move(uninstall_list));
+  return web_app;
+}
+
 }  // namespace
 
 class DefaultChromeAppsMigratorTest : public testing::Test {
@@ -32,25 +46,19 @@
                     POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD,
                     base::Value(base::Value::Type::LIST), nullptr);
 
-    base::Value blocklist_value(base::Value::Type::LIST);
-    blocklist_value.Append("eeee");
-    policy_map_.Set(key::kExtensionInstallBlocklist, POLICY_LEVEL_MANDATORY,
-                    POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD,
-                    std::move(blocklist_value), nullptr);
-
-    base::Value web_app_value(base::Value::Type::LIST);
-    base::Value maps_web_app(base::Value::Type::DICTIONARY);
-    maps_web_app.SetStringKey("url", "https://google.com/maps");
-    web_app_value.Append(std::move(maps_web_app));
+    base::Value::List web_app_list;
+    base::Value::Dict maps_web_app;
+    maps_web_app.Set("url", "https://google.com/maps");
+    web_app_list.Append(std::move(maps_web_app));
     policy_map_.Set(key::kWebAppInstallForceList, POLICY_LEVEL_MANDATORY,
                     POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD,
-                    std::move(web_app_value), nullptr);
+                    base::Value(std::move(web_app_list)), nullptr);
 
-    base::Value pinned_apps_value(base::Value::Type::LIST);
-    pinned_apps_value.Append("ffff");
+    base::Value::List pinned_apps_list;
+    pinned_apps_list.Append("ffff");
     policy_map_.Set(key::kPinnedLauncherApps, POLICY_LEVEL_MANDATORY,
                     POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD,
-                    std::move(pinned_apps_value), nullptr);
+                    base::Value(std::move(pinned_apps_list)), nullptr);
   }
 
  protected:
@@ -62,7 +70,7 @@
   PolicyMap expected_map(policy_map_.Clone());
   migrator_.Migrate(&policy_map_);
 
-  // No chrome apps in ExtensionInstallForcelist policy, policy map should not
+  // No Chrome Apps in ExtensionInstallForcelist policy, policy map should not
   // change.
   EXPECT_TRUE(policy_map_.Equals(expected_map));
 }
@@ -70,22 +78,20 @@
 TEST_F(DefaultChromeAppsMigratorTest, ChromeAppWithUpdateUrl) {
   PolicyMap expected_map(policy_map_.Clone());
 
-  // Add force installed chrome app that should be migrated.
+  // Add force installed Chrome App that should be migrated.
   base::Value* forcelist_value = policy_map_.GetMutableValue(
       key::kExtensionInstallForcelist, base::Value::Type::LIST);
   forcelist_value->Append(std::string(kAppId1) + ";https://example.com");
 
-  // Chrome app should be blocked after migration.
-  base::Value* blocklist_value = expected_map.GetMutableValue(
-      key::kExtensionInstallBlocklist, base::Value::Type::LIST);
-  blocklist_value->Append(std::string(kAppId1));
-
   // Corresponding web app should be force installed after migration.
-  base::Value first_app(base::Value::Type::DICTIONARY);
-  first_app.SetStringKey("url", kWebAppUrl1);
-  base::Value* web_app_value = expected_map.GetMutableValue(
-      key::kWebAppInstallForceList, base::Value::Type::LIST);
-  web_app_value->Append(std::move(first_app));
+  base::Value::Dict web_app = CreateWebAppDict(kWebAppUrl1, kAppId1);
+
+  base::Value::List& web_app_list =
+      expected_map
+          .GetMutableValue(key::kWebAppInstallForceList,
+                           base::Value::Type::LIST)
+          ->GetList();
+  web_app_list.Append(std::move(web_app));
 
   migrator_.Migrate(&policy_map_);
 
@@ -95,7 +101,7 @@
 TEST_F(DefaultChromeAppsMigratorTest, ChromeAppsAndExtensions) {
   PolicyMap expected_map(policy_map_.Clone());
 
-  // Add two force installed chrome apps and two extensions.
+  // Add two force installed Chrome Apps and two extensions.
   base::Value* forcelist_value = policy_map_.GetMutableValue(
       key::kExtensionInstallForcelist, base::Value::Type::LIST);
   forcelist_value->Append("extension1");
@@ -109,57 +115,16 @@
   expected_forcelist->Append("extension1");
   expected_forcelist->Append("extension2");
 
-  // Chrome apps should be blocked after migration.
-  base::Value* blocklist_value = expected_map.GetMutableValue(
-      key::kExtensionInstallBlocklist, base::Value::Type::LIST);
-  blocklist_value->Append(kAppId1);
-  blocklist_value->Append(kAppId2);
-
   // Corresponding web apps should be force installed after migration.
-  base::Value first_app(base::Value::Type::DICTIONARY);
-  first_app.SetStringKey("url", kWebAppUrl1);
-  base::Value second_app(base::Value::Type::DICTIONARY);
-  second_app.SetStringKey("url", kWebAppUrl2);
-  base::Value* web_app_value = expected_map.GetMutableValue(
-      key::kWebAppInstallForceList, base::Value::Type::LIST);
-  web_app_value->Append(std::move(first_app));
-  web_app_value->Append(std::move(second_app));
-
-  migrator_.Migrate(&policy_map_);
-
-  EXPECT_TRUE(policy_map_.Equals(expected_map));
-}
-
-// Tests the case when ExtensionInstallBlocklist is initially set to wrong type
-// and we have to append chrome app id to it. The value should be overridden and
-// error message should be added.
-TEST_F(DefaultChromeAppsMigratorTest, ExtensionBlocklistPolicyWrongType) {
-  PolicyMap expected_map(policy_map_.Clone());
-
-  // Add force installed chrome app.
-  base::Value* forcelist_value = policy_map_.GetMutableValue(
-      key::kExtensionInstallForcelist, base::Value::Type::LIST);
-  forcelist_value->Append(kAppId1);
-
-  // Set ExtensionInstallBlocklist to non-list type.
-  base::Value blocklist_value(base::Value::Type::DICTIONARY);
-  policy_map_.GetMutable(key::kExtensionInstallBlocklist)
-      ->set_value(std::move(blocklist_value));
-
-  base::Value blocklist_expected_value(base::Value::Type::LIST);
-  blocklist_expected_value.Append(kAppId1);
-  PolicyMap::Entry* blocklist_expected_entry =
-      expected_map.GetMutable(key::kExtensionInstallBlocklist);
-  blocklist_expected_entry->set_value(std::move(blocklist_expected_value));
-  blocklist_expected_entry->AddMessage(PolicyMap::MessageType::kError,
-                                       IDS_POLICY_TYPE_ERROR);
-
-  // Corresponding web app should be force installed after migration.
-  base::Value first_app(base::Value::Type::DICTIONARY);
-  first_app.SetStringKey("url", kWebAppUrl1);
-  base::Value* web_app_value = expected_map.GetMutableValue(
-      key::kWebAppInstallForceList, base::Value::Type::LIST);
-  web_app_value->Append(std::move(first_app));
+  base::Value::Dict first_app = CreateWebAppDict(kWebAppUrl1, kAppId1);
+  base::Value::Dict second_app = CreateWebAppDict(kWebAppUrl2, kAppId2);
+  base::Value::List& web_app_list =
+      expected_map
+          .GetMutableValue(key::kWebAppInstallForceList,
+                           base::Value::Type::LIST)
+          ->GetList();
+  web_app_list.Append(std::move(first_app));
+  web_app_list.Append(std::move(second_app));
 
   migrator_.Migrate(&policy_map_);
 
@@ -172,7 +137,7 @@
 TEST_F(DefaultChromeAppsMigratorTest, WebAppPolicyWrongType) {
   PolicyMap expected_map(policy_map_.Clone());
 
-  // Add force installed chrome app.
+  // Add force installed Chrome App.
   base::Value* forcelist_value = policy_map_.GetMutableValue(
       key::kExtensionInstallForcelist, base::Value::Type::LIST);
   forcelist_value->Append(kAppId1);
@@ -182,18 +147,13 @@
   policy_map_.GetMutable(key::kWebAppInstallForceList)
       ->set_value(std::move(web_app_value));
 
-  // Chrome app should be blocked after migration.
-  base::Value* blocklist_value = expected_map.GetMutableValue(
-      key::kExtensionInstallBlocklist, base::Value::Type::LIST);
-  blocklist_value->Append(kAppId1);
-
-  base::Value web_app_expected_value(base::Value::Type::LIST);
-  base::Value first_app(base::Value::Type::DICTIONARY);
-  first_app.SetStringKey("url", kWebAppUrl1);
-  web_app_expected_value.Append(std::move(first_app));
+  base::Value::List web_app_expected_list;
+  base::Value::Dict web_app = CreateWebAppDict(kWebAppUrl1, kAppId1);
+  web_app_expected_list.Append(std::move(web_app));
   PolicyMap::Entry* web_app_expected_entry =
       expected_map.GetMutable(key::kWebAppInstallForceList);
-  web_app_expected_entry->set_value(std::move(web_app_expected_value));
+  web_app_expected_entry->set_value(
+      base::Value(std::move(web_app_expected_list)));
   web_app_expected_entry->AddMessage(PolicyMap::MessageType::kError,
                                      IDS_POLICY_TYPE_ERROR);
 
@@ -205,27 +165,24 @@
 TEST_F(DefaultChromeAppsMigratorTest, PinnedApp) {
   PolicyMap expected_map(policy_map_.Clone());
 
-  // Add force installed chrome app that should be migrated.
+  // Add force installed Chrome App that should be migrated.
   base::Value* forcelist_value = policy_map_.GetMutableValue(
       key::kExtensionInstallForcelist, base::Value::Type::LIST);
   forcelist_value->Append(std::string(kAppId1));
 
-  // Make the chrome app pinned.
+  // Make the Chrome App pinned.
   base::Value* pinned_apps_value = policy_map_.GetMutableValue(
       key::kPinnedLauncherApps, base::Value::Type::LIST);
   pinned_apps_value->Append(std::string(kAppId1));
 
-  // Chrome app should be blocked after migration.
-  base::Value* blocklist_value = expected_map.GetMutableValue(
-      key::kExtensionInstallBlocklist, base::Value::Type::LIST);
-  blocklist_value->Append(std::string(kAppId1));
-
   // Corresponding web app should be force installed after migration.
-  base::Value first_app(base::Value::Type::DICTIONARY);
-  first_app.SetStringKey("url", kWebAppUrl1);
-  base::Value* web_app_value = expected_map.GetMutableValue(
-      key::kWebAppInstallForceList, base::Value::Type::LIST);
-  web_app_value->Append(std::move(first_app));
+  base::Value::Dict web_app = CreateWebAppDict(kWebAppUrl1, kAppId1);
+  base::Value::List& web_app_list =
+      expected_map
+          .GetMutableValue(key::kWebAppInstallForceList,
+                           base::Value::Type::LIST)
+          ->GetList();
+  web_app_list.Append(std::move(web_app));
 
   // The corresponding Web App should be pinned.
   base::Value* pinned_expected_value = expected_map.GetMutableValue(
diff --git a/components/policy/core/common/policy_loader_lacros.cc b/components/policy/core/common/policy_loader_lacros.cc
index 9bb144f..64dcab7 100644
--- a/components/policy/core/common/policy_loader_lacros.cc
+++ b/components/policy/core/common/policy_loader_lacros.cc
@@ -66,6 +66,10 @@
     LOG(ERROR) << "No init params";
     return;
   }
+  if (per_profile_ == PolicyPerProfileFilter::kTrue &&
+      init_params->device_account_component_policy) {
+    SetComponentPolicy(init_params->device_account_component_policy.value());
+  }
   if (!init_params->device_account_policy) {
     LOG(ERROR) << "No policy data";
     return;
@@ -100,6 +104,10 @@
   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
   std::unique_ptr<PolicyBundle> bundle = std::make_unique<PolicyBundle>();
 
+  // If per_profile loader is used, apply policy for extensions.
+  if (per_profile_ == PolicyPerProfileFilter::kTrue && component_policy_)
+    bundle->MergeFrom(*component_policy_);
+
   if (!policy_fetch_response_ || policy_fetch_response_->empty()) {
     return bundle;
   }
@@ -161,6 +169,43 @@
   last_fetch_timestamp_ = base::Time::Now();
 }
 
+void PolicyLoaderLacros::OnComponentPolicyUpdated(
+    const policy::ComponentPolicyMap& component_policy) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  // The component policy is per_profile=true policy. If Lacros is using
+  // secondary profile, that policy is loaded directly from DMServer. In case
+  // it is using the device account, there are two PolicyLoaderLacros objects
+  // present, and we need to store it only in the object with per_profile:True.
+  if (per_profile_ == PolicyPerProfileFilter::kFalse) {
+    return;
+  }
+
+  SetComponentPolicy(component_policy);
+  Reload(true);
+}
+
+void PolicyLoaderLacros::SetComponentPolicy(
+    const policy::ComponentPolicyMap& component_policy) {
+  if (component_policy_) {
+    component_policy_->Clear();
+  } else {
+    component_policy_ = std::make_unique<PolicyBundle>();
+  }
+  for (auto& policy_pair : component_policy) {
+    PolicyMap component_policy_map;
+    std::string error;
+    // The component policy received from Ash is the JSON data corresponding to
+    // the policy for the namespace.
+    ParseComponentPolicy(policy_pair.second.Clone(), POLICY_SCOPE_USER,
+                         POLICY_SOURCE_CLOUD_FROM_ASH, &component_policy_map,
+                         &error);
+    DCHECK(error.empty());
+
+    // The data is also good; expose the policies.
+    component_policy_->Get(policy_pair.first).Swap(&component_policy_map);
+  }
+}
+
 enterprise_management::PolicyData* PolicyLoaderLacros::GetPolicyData() {
   if (!policy_fetch_response_ || !policy_data_)
     return nullptr;
diff --git a/components/policy/core/common/policy_loader_lacros.h b/components/policy/core/common/policy_loader_lacros.h
index 376191b..2e9199f 100644
--- a/components/policy/core/common/policy_loader_lacros.h
+++ b/components/policy/core/common/policy_loader_lacros.h
@@ -16,6 +16,7 @@
 #include "chromeos/lacros/lacros_service.h"
 #include "components/policy/core/common/async_policy_loader.h"
 #include "components/policy/core/common/policy_proto_decoders.h"
+#include "components/policy/core/common/values_util.h"
 #include "components/policy/proto/device_management_backend.pb.h"
 #include "third_party/abseil-cpp/absl/types/optional.h"
 
@@ -61,6 +62,15 @@
   // Update the latest policy fetch attempt timestamp.
   void OnPolicyFetchAttempt() override;
 
+  // chromeos::LacrosService::Observer implementation.
+  void OnComponentPolicyUpdated(
+      const policy::ComponentPolicyMap& component_policy) override;
+
+  // Returns the current device account policies for components.
+  const PolicyBundle* component_policy() const {
+    return component_policy_.get();
+  }
+
   // Return if the main user is a device local account (i.e. Kiosk, MGS) user.
   static bool IsDeviceLocalAccountUser();
 
@@ -81,12 +91,17 @@
   base::Time last_fetch_timestamp() { return last_fetch_timestamp_; }
 
  private:
+  void SetComponentPolicy(const policy::ComponentPolicyMap& component_policy);
+
   // The filter for policy data to install.
   const PolicyPerProfileFilter per_profile_;
 
   // Serialized blob of PolicyFetchResponse object received from the server.
   absl::optional<std::vector<uint8_t>> policy_fetch_response_;
 
+  // The component policy of the device account.
+  std::unique_ptr<PolicyBundle> component_policy_;
+
   // The parsed policy objects received from Ash.
   std::unique_ptr<enterprise_management::PolicyData> policy_data_;
 
diff --git a/components/policy/core/common/policy_pref_names.cc b/components/policy/core/common/policy_pref_names.cc
index ca7e266..fcf1d53 100644
--- a/components/policy/core/common/policy_pref_names.cc
+++ b/components/policy/core/common/policy_pref_names.cc
@@ -113,5 +113,10 @@
 const char kLastPolicyCheckTime[] = "policy.last_policy_check_time";
 #endif
 
+#if BUILDFLAG(IS_IOS)
+const char kUserPolicyNotificationWasShown[] =
+    "policy.user_policy_notification_was_shown";
+#endif
+
 }  // namespace policy_prefs
 }  // namespace policy
diff --git a/components/policy/core/common/policy_pref_names.h b/components/policy/core/common/policy_pref_names.h
index 04fa503..21273d7 100644
--- a/components/policy/core/common/policy_pref_names.h
+++ b/components/policy/core/common/policy_pref_names.h
@@ -43,6 +43,9 @@
 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS)
 POLICY_EXPORT extern const char kLastPolicyCheckTime[];
 #endif
+#if BUILDFLAG(IS_IOS)
+POLICY_EXPORT extern const char kUserPolicyNotificationWasShown[];
+#endif
 
 }  // namespace policy_prefs
 }  // namespace policy
diff --git a/components/policy/core/common/policy_proto_decoders.cc b/components/policy/core/common/policy_proto_decoders.cc
index 11521b3..963c27e 100644
--- a/components/policy/core/common/policy_proto_decoders.cc
+++ b/components/policy/core/common/policy_proto_decoders.cc
@@ -10,6 +10,7 @@
 
 #include "base/json/json_reader.h"
 #include "base/logging.h"
+#include "base/strings/strcat.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/values.h"
@@ -25,6 +26,10 @@
 
 namespace {
 
+const char kValue[] = "Value";
+const char kLevel[] = "Level";
+const char kRecommended[] = "Recommended";
+
 // Returns true and sets |level| to a PolicyLevel if the policy has been set
 // at that level. Returns false if the policy is not set, or has been set at
 // the level of PolicyOptions::UNSET.
@@ -210,4 +215,42 @@
   }
 }
 
+bool ParseComponentPolicy(base::Value json,
+                          PolicyScope scope,
+                          PolicySource source,
+                          PolicyMap* policy,
+                          std::string* error) {
+  // Each top-level key maps a policy name to its description.
+  //
+  // Each description is an object that contains the policy value under the
+  // "Value" key. The optional "Level" key is either "Mandatory" (default) or
+  // "Recommended".
+  for (auto it : json.DictItems()) {
+    const std::string& policy_name = it.first;
+    base::Value description = std::move(it.second);
+    if (!description.is_dict()) {
+      *error = "The JSON blob dictionary value is not a dictionary.";
+      return false;
+    }
+
+    absl::optional<base::Value> value = description.ExtractKey(kValue);
+    if (!value.has_value()) {
+      *error = base::StrCat(
+          {"The JSON blob dictionary value doesn't contain the required ",
+           kValue, " field."});
+      return false;
+    }
+
+    PolicyLevel level = POLICY_LEVEL_MANDATORY;
+    const std::string* level_string = description.FindStringKey(kLevel);
+    if (level_string && *level_string == kRecommended)
+      level = POLICY_LEVEL_RECOMMENDED;
+
+    policy->Set(policy_name, level, scope, source, std::move(value.value()),
+                nullptr);
+  }
+
+  return true;
+}
+
 }  // namespace policy
diff --git a/components/policy/core/common/policy_proto_decoders.h b/components/policy/core/common/policy_proto_decoders.h
index 4a47256..b9a241f 100644
--- a/components/policy/core/common/policy_proto_decoders.h
+++ b/components/policy/core/common/policy_proto_decoders.h
@@ -6,6 +6,7 @@
 #define COMPONENTS_POLICY_CORE_COMMON_POLICY_PROTO_DECODERS_H_
 
 #include "base/memory/weak_ptr.h"
+#include "base/values.h"
 #include "components/policy/core/common/policy_types.h"
 #include "components/policy/policy_export.h"
 
@@ -40,6 +41,16 @@
     PolicyMap* map,
     PolicyPerProfileFilter per_profile);
 
+// Parses the JSON policy in |data| into |policy|, and returns true if the
+// parse was successful. The |scope| and |source| are set as scope and source of
+// the policy in the result. In case of failure, the |error| is populated with
+// error message and false is returned.
+POLICY_EXPORT bool ParseComponentPolicy(base::Value json,
+                                        PolicyScope scope,
+                                        PolicySource source,
+                                        PolicyMap* policy,
+                                        std::string* error);
+
 }  // namespace policy
 
 #endif  // COMPONENTS_POLICY_CORE_COMMON_POLICY_PROTO_DECODERS_H_
diff --git a/components/policy/core/common/schema.cc b/components/policy/core/common/schema.cc
index 32f578a..702ded2 100644
--- a/components/policy/core/common/schema.cc
+++ b/components/policy/core/common/schema.cc
@@ -231,6 +231,10 @@
   return strategy == SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY;
 }
 
+bool StrategyAllowUnknownWithoutWarning(SchemaOnErrorStrategy strategy) {
+  return strategy == SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING;
+}
+
 void SchemaErrorFound(std::string* out_error_path,
                       std::string* out_error,
                       const std::string& msg) {
@@ -1219,8 +1223,10 @@
       SchemaList schema_list = GetMatchingProperties(dict_item.first);
       if (schema_list.empty()) {
         // Unknown property was detected.
-        SchemaErrorFound(out_error_path, out_error,
-                         "Unknown property: " + dict_item.first);
+        if (!StrategyAllowUnknownWithoutWarning(strategy)) {
+          SchemaErrorFound(out_error_path, out_error,
+                           "Unknown property: " + dict_item.first);
+        }
         if (!StrategyAllowUnknown(strategy))
           return false;
       } else {
@@ -1311,11 +1317,15 @@
       SchemaList schema_list = GetMatchingProperties(dict_item.first);
       if (schema_list.empty()) {
         // Unknown property was detected.
-        SchemaErrorFound(out_error_path, out_error,
-                         "Unknown property: " + dict_item.first);
+        if (!StrategyAllowUnknownWithoutWarning(strategy)) {
+          SchemaErrorFound(out_error_path, out_error,
+                           "Unknown property: " + dict_item.first);
+        }
         if (!StrategyAllowUnknown(strategy))
           return false;
-        drop_list.push_back(dict_item.first);
+        if (!StrategyAllowUnknownWithoutWarning(strategy)) {
+          drop_list.push_back(dict_item.first);
+        }
       } else {
         for (const auto& subschema : schema_list) {
           std::string new_error;
diff --git a/components/policy/core/common/schema.h b/components/policy/core/common/schema.h
index d07629f..111e295 100644
--- a/components/policy/core/common/schema.h
+++ b/components/policy/core/common/schema.h
@@ -49,6 +49,11 @@
   // is safe. For example, can't be used if an empty list has a special meaning,
   // like allowing everything.
   SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY,
+  // Same as |SCHEMA_ALLOW_UNKNOWN|, but unknown properties won't cause errors
+  // messages to be added. Used to allow adding extra fields to the policy
+  // internally, without adding those fields to the schema. This option should
+  // be avoided, since it suppresses the errors.
+  SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
 };
 
 // Schema validation options for Schema::ParseToDictAndValidate().
diff --git a/components/policy/core/common/schema_fuzzer.cc b/components/policy/core/common/schema_fuzzer.cc
index a236f88..8eb3ef0 100644
--- a/components/policy/core/common/schema_fuzzer.cc
+++ b/components/policy/core/common/schema_fuzzer.cc
@@ -38,7 +38,8 @@
 void TestValidation(const Environment& env, const base::Value& parsed_json) {
   // Exercise with every possible strategy.
   for (auto strategy : {SCHEMA_STRICT, SCHEMA_ALLOW_UNKNOWN,
-                        SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY}) {
+                        SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY,
+                        SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING}) {
     env.chrome_policy_schema.Validate(parsed_json, strategy,
                                       /*out_error_path=*/nullptr,
                                       /*out_error=*/nullptr);
diff --git a/components/policy/core/common/schema_unittest.cc b/components/policy/core/common/schema_unittest.cc
index 5d930f9..bc2b6a2 100644
--- a/components/policy/core/common/schema_unittest.cc
+++ b/components/policy/core/common/schema_unittest.cc
@@ -180,6 +180,9 @@
       schema.Normalize(&cloned_value, strategy, nullptr, &error, &touched);
   EXPECT_EQ(expected_return_value, returned) << source << ": " << error;
 
+  if (strategy == SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING)
+    return;
+
   bool strictly_valid = schema.Validate(value, SCHEMA_STRICT, nullptr, &error);
   EXPECT_EQ(touched, !strictly_valid && returned) << source;
 
@@ -789,6 +792,8 @@
   TestSchemaValidation(schema, bundle, SCHEMA_ALLOW_UNKNOWN, true);
   TestSchemaValidation(schema, bundle,
                        SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+  TestSchemaValidation(schema, bundle, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                       true);
   TestSchemaValidationWithPath(schema, bundle, "");
   bundle.RemoveKey("boom");
 
@@ -798,6 +803,8 @@
   TestSchemaValidation(schema, bundle, SCHEMA_ALLOW_UNKNOWN, false);
   TestSchemaValidation(schema, bundle,
                        SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+  TestSchemaValidation(schema, bundle, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                       false);
   TestSchemaValidationWithPath(schema, bundle, "Boolean");
   bundle.SetBoolKey("Boolean", true);
 
@@ -813,6 +820,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, true);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         true);
     TestSchemaValidationWithPath(subschema, root, "Object");
     root.RemovePath("Object.three");
 
@@ -822,6 +831,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
     TestSchemaValidationWithPath(subschema, root, "Object.one");
     root.RemovePath("Object.one");
   }
@@ -841,6 +852,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, true);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         true);
     TestSchemaValidationWithPath(subschema, root, "items[0]");
     root.EraseListIter(root_view.begin() + (root_view.size() - 1));
 
@@ -852,6 +865,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
     TestSchemaValidationWithPath(subschema, root, "items[0].two");
     root.EraseListIter(root_view.begin() + (root_view.size() - 1));
   }
@@ -871,6 +886,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, true);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         true);
 
     // Invalid list item.
     list_value->Append("blabla");
@@ -878,6 +895,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
     TestSchemaValidationWithPath(subschema, root, "List.items[1]");
   }
 
@@ -897,6 +916,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, true);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         true);
 
     // Invalid list item.
     list_value->GetList().Append(12345);
@@ -904,6 +925,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
     TestSchemaValidationWithPath(subschema, root, "items[0].List.items[1]");
   }
 
@@ -970,6 +993,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, true);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, true);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         true);
     root.RemoveKey("foobar");
   }
 
@@ -986,6 +1011,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
 
     // Invalid required property.
     root.SetIntKey("String", 123);
@@ -993,6 +1020,8 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
     root.SetStringKey("String", "a string");
 
     // Invalid subschema of required property with multiple subschemas.
@@ -1007,12 +1036,16 @@
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
 
     root.SetIntKey("Integer", 3);
     TestSchemaValidation(subschema, root, SCHEMA_STRICT, false);
     TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN, false);
     TestSchemaValidation(subschema, root,
                          SCHEMA_ALLOW_UNKNOWN_AND_INVALID_LIST_ENTRY, false);
+    TestSchemaValidation(subschema, root, SCHEMA_ALLOW_UNKNOWN_WITHOUT_WARNING,
+                         false);
   }
 
   // Test that integer to double promotion is allowed.
diff --git a/ipc/ipc_channel.h b/ipc/ipc_channel.h
index 1646384..b5cc3dd 100644
--- a/ipc/ipc_channel.h
+++ b/ipc/ipc_channel.h
@@ -50,11 +50,10 @@
 
  public:
   // Flags to test modes
-  enum ModeFlags {
-    MODE_NO_FLAG = 0x0,
-    MODE_SERVER_FLAG = 0x1,
-    MODE_CLIENT_FLAG = 0x2,
-  };
+  using ModeFlags = int;
+  static constexpr ModeFlags MODE_NO_FLAG = 0x0;
+  static constexpr ModeFlags MODE_SERVER_FLAG = 0x1;
+  static constexpr ModeFlags MODE_CLIENT_FLAG = 0x2;
 
   // Some Standard Modes
   // TODO(morrita): These are under deprecation work. You should use Create*()
diff --git a/ipc/ipc_message_unittest.cc b/ipc/ipc_message_unittest.cc
index b9dfd39..e7c4196 100644
--- a/ipc/ipc_message_unittest.cc
+++ b/ipc/ipc_message_unittest.cc
@@ -91,8 +91,8 @@
   }
   {
     base::Value list(base::Value::Type::LIST);
-    list.Append(42);
-    list.Append("hello");
+    list.GetList().Append(42);
+    list.GetList().Append("hello");
     expect_value_equals(list);
   }
 
@@ -106,9 +106,9 @@
 
 TEST(IPCMessageTest, ListValue) {
   base::ListValue input;
-  input.Append(42.42);
-  input.Append("forty");
-  input.Append(std::make_unique<base::Value>());
+  input.GetList().Append(42.42);
+  input.GetList().Append("forty");
+  input.GetList().Append(base::Value());
 
   IPC::Message msg(1, 2, IPC::Message::PRIORITY_NORMAL);
   IPC::WriteParam(&msg, input);
@@ -138,9 +138,9 @@
   subdict.SetBoolean("bool", false);
 
   base::ListValue sublist;
-  sublist.Append(42.42);
-  sublist.Append("forty");
-  sublist.Append("two");
+  sublist.GetList().Append(42.42);
+  sublist.GetList().Append("forty");
+  sublist.GetList().Append("two");
   subdict.SetKey("list", std::move(sublist));
 
   input.SetKey("dict", std::move(subdict));
diff --git a/libchrome_tools/patches/disable-SharedMemoryTracker.patch b/libchrome_tools/patches/disable-SharedMemoryTracker.patch
index dca1e37..4913ee9 100644
--- a/libchrome_tools/patches/disable-SharedMemoryTracker.patch
+++ b/libchrome_tools/patches/disable-SharedMemoryTracker.patch
@@ -1,46 +1,44 @@
-From 86a64848816164947efaaab7c0aec6ae4e7d09a6 Mon Sep 17 00:00:00 2001
+From dfb808bf1e8a496f00a4bd90999fb22659645801 Mon Sep 17 00:00:00 2001
 From: Grace Cham <hscham@chromium.org>
 Date: Wed, 15 Jun 2022 21:59:29 +0900
 Subject: [PATCH] disable SharedMemoryTracker
 
 ---
- base/memory/shared_memory_mapping.cc | 9 ++++++---
- 1 file changed, 6 insertions(+), 3 deletions(-)
+ base/memory/shared_memory_mapping.cc | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
 
 diff --git a/base/memory/shared_memory_mapping.cc b/base/memory/shared_memory_mapping.cc
-index d54af134db..805d12569f 100644
+index 82c03f3ec371..19092acc7f9a 100644
 --- a/base/memory/shared_memory_mapping.cc
 +++ b/base/memory/shared_memory_mapping.cc
-@@ -8,7 +8,8 @@
- 
+@@ -10,7 +10,8 @@
+ #include "base/bits.h"
  #include "base/logging.h"
  #include "base/memory/shared_memory_security_policy.h"
 -#include "base/memory/shared_memory_tracker.h"
 +// Unsupported in libchrome
 +// #include "base/memory/shared_memory_tracker.h"
+ #include "base/system/sys_info.h"
  #include "base/unguessable_token.h"
  #include "build/build_config.h"
- 
-@@ -59,7 +60,8 @@ SharedMemoryMapping::SharedMemoryMapping(void* memory,
-                                          size_t mapped_size,
-                                          const UnguessableToken& guid)
+@@ -45,7 +46,7 @@ SharedMemoryMapping::SharedMemoryMapping(span<uint8_t> mapped_span,
+                                          SharedMemoryMapper* mapper)
      : mapped_span_(mapped_span), size_(size), guid_(guid), mapper_(mapper) {
+   // Note: except on Windows, `mapped_span_.size() == size_`.
 -  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
-+  // Unsupported in libchrome.
 +  // SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
  }
  
  void SharedMemoryMapping::Unmap() {
-@@ -67,7 +69,8 @@ void SharedMemoryMapping::Unmap() {
+@@ -53,7 +54,7 @@ void SharedMemoryMapping::Unmap() {
      return;
  
    SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_);
 -  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
-+  // Unsupported in libchrome.
 +  // SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
  
    SharedMemoryMapper* mapper = mapper_;
    if (!mapper)
 -- 
-2.36.1.476.g0c4daa206d-goog
+2.37.1.359.gd136c6c3e2-goog
 
diff --git a/mojo/public/cpp/base/fuchsia/DIR_METADATA b/mojo/public/cpp/base/fuchsia/DIR_METADATA
new file mode 100644
index 0000000..210aa6a
--- /dev/null
+++ b/mojo/public/cpp/base/fuchsia/DIR_METADATA
@@ -0,0 +1 @@
+mixins: "//build/fuchsia/COMMON_METADATA"
diff --git a/mojo/public/cpp/base/fuchsia/example.mojom b/mojo/public/cpp/base/fuchsia/example.mojom
new file mode 100644
index 0000000..b4ff8b0
--- /dev/null
+++ b/mojo/public/cpp/base/fuchsia/example.mojom
@@ -0,0 +1,11 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module fuchsia.test.mojom;
+
+// Mojo struct of fidl::InterfaceRequest<TestInterface>. |request| is a
+// channel handle.
+struct TestInterfaceRequest {
+  handle<platform> request;
+};
diff --git a/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h b/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h
new file mode 100644
index 0000000..67455a4
--- /dev/null
+++ b/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_PUBLIC_CPP_BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_MOJOM_TRAITS_H_
+#define MOJO_PUBLIC_CPP_BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_MOJOM_TRAITS_H_
+
+#include <lib/fidl/cpp/interface_request.h>
+
+#include "mojo/public/cpp/platform/platform_handle.h"
+
+namespace mojo {
+
+// Implementation of StructTratis<DataView, fidl::InterfaceRequest<Interface>>.
+// Different Interface still needs to define its own StructTraits by subclassing
+// this struct. Read test_request_interface_mojom_traits.h for an example.
+template <typename DataView, typename Interface>
+struct FidlInterfaceRequestStructTraits {
+  static PlatformHandle request(fidl::InterfaceRequest<Interface>& request) {
+    DCHECK(request.is_valid());
+    return PlatformHandle(request.TakeChannel());
+  }
+
+  static bool Read(DataView input, fidl::InterfaceRequest<Interface>* output) {
+    PlatformHandle handle = input.TakeRequest();
+    if (!handle.is_valid_handle())
+      return false;
+
+    output->set_channel(zx::channel(handle.TakeHandle()));
+    return true;
+  }
+};
+
+}  // namespace mojo
+
+#endif  // MOJO_PUBLIC_CPP_BASE_FUCHSIA_FIDL_INTERFACE_REQUEST_MOJOM_TRAITS_H_
diff --git a/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits_unittest.cc b/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits_unittest.cc
new file mode 100644
index 0000000..3619016
--- /dev/null
+++ b/mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits_unittest.cc
@@ -0,0 +1,30 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/task_environment.h"
+#include "base/testfidl/cpp/fidl.h"
+#include "mojo/public/cpp/base/fuchsia/example.mojom.h"
+#include "mojo/public/cpp/base/fuchsia/test_interface_request_mojom_traits.h"
+#include "mojo/public/cpp/test_support/test_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace fuchsia {
+
+using base::testfidl::TestInterface;
+using base::testfidl::TestInterfacePtr;
+
+TEST(InterfaceRequestStructTraitsTest, Serialization) {
+  base::test::SingleThreadTaskEnvironment task_environment(
+      base::test::SingleThreadTaskEnvironment::MainThreadType::IO);
+  TestInterfacePtr test_ptr;
+  fidl::InterfaceRequest<TestInterface> input_request = test_ptr.NewRequest();
+  fidl::InterfaceRequest<TestInterface> output_request;
+
+  EXPECT_TRUE(mojo::test::SerializeAndDeserialize<
+              fuchsia::test::mojom::TestInterfaceRequest>(input_request,
+                                                          output_request));
+  EXPECT_TRUE(output_request.is_valid());
+}
+
+}  // namespace fuchsia
diff --git a/mojo/public/cpp/base/fuchsia/test_interface_request_mojom_traits.h b/mojo/public/cpp/base/fuchsia/test_interface_request_mojom_traits.h
new file mode 100644
index 0000000..7b14d09
--- /dev/null
+++ b/mojo/public/cpp/base/fuchsia/test_interface_request_mojom_traits.h
@@ -0,0 +1,21 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MOJO_PUBLIC_CPP_BASE_FUCHSIA_TEST_INTERFACE_REQUEST_MOJOM_TRAITS_H_
+#define MOJO_PUBLIC_CPP_BASE_FUCHSIA_TEST_INTERFACE_REQUEST_MOJOM_TRAITS_H_
+
+#include "mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<fuchsia::test::mojom::TestInterfaceRequestDataView,
+                    fidl::InterfaceRequest<base::testfidl::TestInterface>>
+    : public FidlInterfaceRequestStructTraits<
+          fuchsia::test::mojom::TestInterfaceRequestDataView,
+          base::testfidl::TestInterface> {};
+
+}  // namespace mojo
+
+#endif  // MOJO_PUBLIC_CPP_BASE_FUCHSIA_TEST_INTERFACE_REQUEST_MOJOM_TRAITS_H_
diff --git a/mojo/public/cpp/bindings/pending_associated_receiver.h b/mojo/public/cpp/bindings/pending_associated_receiver.h
index a05eab5..7f1fb1d 100644
--- a/mojo/public/cpp/bindings/pending_associated_receiver.h
+++ b/mojo/public/cpp/bindings/pending_associated_receiver.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 
+#include <type_traits>
 #include <utility>
 
 #include "base/compiler_specific.h"
@@ -40,12 +41,13 @@
 #if !BUILDFLAG(IS_NACL)
   // Move conversion operator for custom receiver types. Only participates in
   // overload resolution if a typesafe conversion is supported.
-  template <typename T,
-            std::enable_if_t<std::is_same<
-                PendingAssociatedReceiver<Interface>,
-                std::result_of_t<decltype (&PendingAssociatedReceiverConverter<
-                                           T>::template To<Interface>)(T&&)>>::
-                                 value>* = nullptr>
+  template <
+      typename T,
+      std::enable_if_t<std::is_same<
+          PendingAssociatedReceiver<Interface>,
+          std::invoke_result_t<decltype(&PendingAssociatedReceiverConverter<
+                                        T>::template To<Interface>),
+                               T&&>>::value>* = nullptr>
   PendingAssociatedReceiver(T&& other)
       : PendingAssociatedReceiver(
             PendingAssociatedReceiverConverter<T>::template To<Interface>(
diff --git a/mojo/public/cpp/bindings/pending_associated_remote.h b/mojo/public/cpp/bindings/pending_associated_remote.h
index e70fb2a..10f7169 100644
--- a/mojo/public/cpp/bindings/pending_associated_remote.h
+++ b/mojo/public/cpp/bindings/pending_associated_remote.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 
+#include <type_traits>
 #include <utility>
 
 #include "base/compiler_specific.h"
@@ -44,9 +45,9 @@
   template <typename T,
             std::enable_if_t<std::is_same<
                 PendingAssociatedRemote<Interface>,
-                std::result_of_t<decltype (&PendingAssociatedRemoteConverter<
-                                           T>::template To<Interface>)(T&&)>>::
-                                 value>* = nullptr>
+                std::invoke_result_t<decltype(&PendingAssociatedRemoteConverter<
+                                              T>::template To<Interface>),
+                                     T&&>>::value>* = nullptr>
   PendingAssociatedRemote(T&& other)
       : PendingAssociatedRemote(
             PendingAssociatedRemoteConverter<T>::template To<Interface>(
diff --git a/mojo/public/cpp/bindings/pending_receiver.h b/mojo/public/cpp/bindings/pending_receiver.h
index 3d0ec9a..920f6b7 100644
--- a/mojo/public/cpp/bindings/pending_receiver.h
+++ b/mojo/public/cpp/bindings/pending_receiver.h
@@ -66,12 +66,12 @@
 #if !BUILDFLAG(IS_NACL)
   // Move conversion operator for custom receiver types. Only participates in
   // overload resolution if a typesafe conversion is supported.
-  template <
-      typename T,
-      std::enable_if_t<std::is_same<
-          PendingReceiver<Interface>,
-          std::result_of_t<decltype (&PendingReceiverConverter<T>::template To<
-                                     Interface>)(T&&)>>::value>* = nullptr>
+  template <typename T,
+            std::enable_if_t<std::is_same<
+                PendingReceiver<Interface>,
+                std::invoke_result_t<decltype(&PendingReceiverConverter<
+                                              T>::template To<Interface>),
+                                     T&&>>::value>* = nullptr>
   PendingReceiver(T&& other)
       : PendingReceiver(PendingReceiverConverter<T>::template To<Interface>(
             std::forward<T>(other))) {}
diff --git a/mojo/public/cpp/bindings/pending_remote.h b/mojo/public/cpp/bindings/pending_remote.h
index eb63c2c..69ac0c6 100644
--- a/mojo/public/cpp/bindings/pending_remote.h
+++ b/mojo/public/cpp/bindings/pending_remote.h
@@ -68,12 +68,12 @@
 #if !BUILDFLAG(IS_NACL)
   // Move conversion operator for custom remote types. Only participates in
   // overload resolution if a typesafe conversion is supported.
-  template <
-      typename T,
-      std::enable_if_t<std::is_same<
-          PendingRemote<Interface>,
-          std::result_of_t<decltype (&PendingRemoteConverter<T>::template To<
-                                     Interface>)(T&&)>>::value>* = nullptr>
+  template <typename T,
+            std::enable_if_t<std::is_same<
+                PendingRemote<Interface>,
+                std::invoke_result_t<decltype(&PendingRemoteConverter<
+                                              T>::template To<Interface>),
+                                     T&&>>::value>* = nullptr>
   PendingRemote(T&& other)
       : PendingRemote(PendingRemoteConverter<T>::template To<Interface>(
             std::move(other))) {}
diff --git a/mojo/public/cpp/bindings/remote_set.h b/mojo/public/cpp/bindings/remote_set.h
index db758cb..12dbb8c 100644
--- a/mojo/public/cpp/bindings/remote_set.h
+++ b/mojo/public/cpp/bindings/remote_set.h
@@ -79,8 +79,8 @@
 
     reference operator*() const { return it_->second; }
     pointer operator->() const { return &it_->second; }
-    bool operator==(const self_type& rhs) { return it_ == rhs.it_; }
-    bool operator!=(const self_type& rhs) { return it_ != rhs.it_; }
+    bool operator==(const self_type& rhs) const { return it_ == rhs.it_; }
+    bool operator!=(const self_type& rhs) const { return it_ != rhs.it_; }
 
    private:
     typename Storage::const_iterator it_;
diff --git a/mojo/public/cpp/platform/named_platform_channel.cc b/mojo/public/cpp/platform/named_platform_channel.cc
index 3ddc8ec..7ce2d36 100644
--- a/mojo/public/cpp/platform/named_platform_channel.cc
+++ b/mojo/public/cpp/platform/named_platform_channel.cc
@@ -46,7 +46,15 @@
 PlatformChannelEndpoint NamedPlatformChannel::ConnectToServer(
     const ServerName& server_name) {
   DCHECK(!server_name.empty());
-  return CreateClientEndpoint(server_name);
+  Options options = {.server_name = server_name};
+  return CreateClientEndpoint(options);
+}
+
+// static
+PlatformChannelEndpoint NamedPlatformChannel::ConnectToServer(
+    const Options& options) {
+  DCHECK(!options.server_name.empty());
+  return CreateClientEndpoint(options);
 }
 
 // static
diff --git a/mojo/public/cpp/platform/named_platform_channel.h b/mojo/public/cpp/platform/named_platform_channel.h
index 9e79ba9..00bbd16 100644
--- a/mojo/public/cpp/platform/named_platform_channel.h
+++ b/mojo/public/cpp/platform/named_platform_channel.h
@@ -60,6 +60,9 @@
     // with a random name. This controls the directory where that happens.
     // Ignored if |server_name| was set explicitly.
     base::FilePath socket_dir;
+
+    // Use an abstract socket address instead of a filesystem path.
+    bool use_abstract_namespace = false;
 #endif
   };
 
@@ -103,6 +106,11 @@
   [[nodiscard]] static PlatformChannelEndpoint ConnectToServer(
       const ServerName& server_name);
 
+  // Like above, but passing an Options struct instead. |options.server_name|
+  // must be a non-empty string.
+  [[nodiscard]] static PlatformChannelEndpoint ConnectToServer(
+      const Options& options);
+
   // Like above, but extracts the server name from |command_line| using the
   // common |kNamedHandleSwitch| flag.
   [[nodiscard]] static PlatformChannelEndpoint ConnectToServer(
@@ -112,8 +120,7 @@
   static PlatformChannelServerEndpoint CreateServerEndpoint(
       const Options& options,
       ServerName* server_name);
-  static PlatformChannelEndpoint CreateClientEndpoint(
-      const ServerName& server_name);
+  static PlatformChannelEndpoint CreateClientEndpoint(const Options& options);
 
   ServerName server_name_;
   PlatformChannelServerEndpoint server_endpoint_;
diff --git a/mojo/public/cpp/platform/named_platform_channel_posix.cc b/mojo/public/cpp/platform/named_platform_channel_posix.cc
index cbedae6..f0235cd 100644
--- a/mojo/public/cpp/platform/named_platform_channel_posix.cc
+++ b/mojo/public/cpp/platform/named_platform_channel_posix.cc
@@ -16,6 +16,8 @@
 #include "base/posix/eintr_wrapper.h"
 #include "base/rand_util.h"
 #include "base/strings/string_number_conversions.h"
+#include "net/base/sockaddr_storage.h"
+#include "net/base/sockaddr_util_posix.h"
 
 namespace mojo {
 
@@ -28,33 +30,30 @@
       .value();
 }
 
-// This function fills in |unix_addr| with the appropriate data for the socket,
-// and sets |unix_addr_len| to the length of the data therein.
-// Returns true on success, or false on failure (typically because |server_name|
-// violated the naming rules).
+// This function fills in |addr_storage| with the appropriate data for the
+// socket as well as the data's length. Returns true on success, or false on
+// failure (typically because |server_name| violated the naming rules). On
+// Linux and Android, setting |use_abstract_namespace| to true will return a
+// socket address for an abstract non-filesystem socket.
 bool MakeUnixAddr(const NamedPlatformChannel::ServerName& server_name,
-                  struct sockaddr_un* unix_addr,
-                  size_t* unix_addr_len) {
-  DCHECK(unix_addr);
-  DCHECK(unix_addr_len);
+                  bool use_abstract_namespace,
+                  net::SockaddrStorage* addr_storage) {
+  DCHECK(addr_storage);
   DCHECK(!server_name.empty());
 
   constexpr size_t kMaxSocketNameLength = 104;
 
   // We reject server_name.length() == kMaxSocketNameLength to make room for the
-  // NUL terminator at the end of the string.
+  // NUL terminator at the end of the string. For the Linux abstract namespace,
+  // the path has a leading NUL character instead (with no NUL terminator
+  // required). In both cases N+1 bytes are needed to fill the server name.
   if (server_name.length() >= kMaxSocketNameLength) {
     LOG(ERROR) << "Socket name too long: " << server_name;
     return false;
   }
 
-  // Create unix_addr structure.
-  memset(unix_addr, 0, sizeof(struct sockaddr_un));
-  unix_addr->sun_family = AF_UNIX;
-  strncpy(unix_addr->sun_path, server_name.c_str(), kMaxSocketNameLength);
-  *unix_addr_len =
-      offsetof(struct sockaddr_un, sun_path) + server_name.length();
-  return true;
+  return net::FillUnixAddress(server_name, use_abstract_namespace,
+                              addr_storage);
 }
 
 // This function creates a unix domain socket, and set it as non-blocking.
@@ -99,9 +98,8 @@
     return PlatformChannelServerEndpoint();
   }
 
-  struct sockaddr_un unix_addr;
-  size_t unix_addr_len;
-  if (!MakeUnixAddr(name, &unix_addr, &unix_addr_len))
+  net::SockaddrStorage storage;
+  if (!MakeUnixAddr(name, options.use_abstract_namespace, &storage))
     return PlatformChannelServerEndpoint();
 
   PlatformHandle handle = CreateUnixDomainSocket();
@@ -109,8 +107,7 @@
     return PlatformChannelServerEndpoint();
 
   // Bind the socket.
-  if (bind(handle.GetFD().get(), reinterpret_cast<const sockaddr*>(&unix_addr),
-           unix_addr_len) < 0) {
+  if (bind(handle.GetFD().get(), storage.addr, storage.addr_len) < 0) {
     PLOG(ERROR) << "bind " << name;
     return PlatformChannelServerEndpoint();
   }
@@ -128,22 +125,21 @@
 
 // static
 PlatformChannelEndpoint NamedPlatformChannel::CreateClientEndpoint(
-    const ServerName& server_name) {
-  DCHECK(!server_name.empty());
+    const Options& options) {
+  DCHECK(!options.server_name.empty());
 
-  struct sockaddr_un unix_addr;
-  size_t unix_addr_len;
-  if (!MakeUnixAddr(server_name, &unix_addr, &unix_addr_len))
+  net::SockaddrStorage storage;
+  if (!MakeUnixAddr(options.server_name, options.use_abstract_namespace,
+                    &storage))
     return PlatformChannelEndpoint();
 
   PlatformHandle handle = CreateUnixDomainSocket();
   if (!handle.is_valid())
     return PlatformChannelEndpoint();
 
-  if (HANDLE_EINTR(connect(handle.GetFD().get(),
-                           reinterpret_cast<sockaddr*>(&unix_addr),
-                           unix_addr_len)) < 0) {
-    PLOG(ERROR) << "connect " << server_name;
+  if (HANDLE_EINTR(
+          connect(handle.GetFD().get(), storage.addr, storage.addr_len)) < 0) {
+    PLOG(ERROR) << "connect " << options.server_name;
     return PlatformChannelEndpoint();
   }
   return PlatformChannelEndpoint(std::move(handle));
diff --git a/mojo/public/java/bindings/src/org/chromium/mojo/bindings/Struct.java b/mojo/public/java/bindings/src/org/chromium/mojo/bindings/Struct.java
index 14f4e1e..5fd579c 100644
--- a/mojo/public/java/bindings/src/org/chromium/mojo/bindings/Struct.java
+++ b/mojo/public/java/bindings/src/org/chromium/mojo/bindings/Struct.java
@@ -61,8 +61,9 @@
         // UnsupportedOperationException.
         Message message = serialize(null);
 
-        if (!message.getHandles().isEmpty())
+        if (!message.getHandles().isEmpty()) {
             throw new UnsupportedOperationException("Handles are discarded.");
+        }
 
         return message.getData();
     }
diff --git a/mojo/public/tools/bindings/checks/mojom_attributes_check.py b/mojo/public/tools/bindings/checks/mojom_attributes_check.py
index 35c450b..fd15dfd 100644
--- a/mojo/public/tools/bindings/checks/mojom_attributes_check.py
+++ b/mojo/public/tools/bindings/checks/mojom_attributes_check.py
@@ -80,9 +80,6 @@
 
 # TODO(https://crbug.com/1193875) empty this set and remove the allowlist.
 _STABLE_ONLY_ALLOWLISTED_ENUMS = {
-    'ash.ime.mojom.CommitTextCursorBehavior',
-    'ash.ime.mojom.KeyEventResult',
-    'ash.ime.mojom.KeyEventType',
     'crosapi.mojom.OptionalBool',
     'crosapi.mojom.RequestActivityIconsStatus',
     'crosapi.mojom.RequestTextSelectionActionsStatus',
diff --git a/ui/gfx/geometry/insets_outsets_base.h b/ui/gfx/geometry/insets_outsets_base.h
index ce8fc3f..6211c74 100644
--- a/ui/gfx/geometry/insets_outsets_base.h
+++ b/ui/gfx/geometry/insets_outsets_base.h
@@ -107,12 +107,14 @@
     right_ = std::max(right_, other.right_);
   }
 
-  bool operator==(const T& other) const {
+  bool operator==(const InsetsOutsetsBase<T>& other) const {
     return top_ == other.top_ && left_ == other.left_ &&
            bottom_ == other.bottom_ && right_ == other.right_;
   }
 
-  bool operator!=(const T& other) const { return !(*this == other); }
+  bool operator!=(const InsetsOutsetsBase<T>& other) const {
+    return !(*this == other);
+  }
 
   void operator+=(const T& other) {
     top_ = base::ClampAdd(top_, other.top_);
diff --git a/ui/gfx/geometry/insets_outsets_f_base.h b/ui/gfx/geometry/insets_outsets_f_base.h
index 175655b..98c4296 100644
--- a/ui/gfx/geometry/insets_outsets_f_base.h
+++ b/ui/gfx/geometry/insets_outsets_f_base.h
@@ -90,12 +90,14 @@
   }
   void Scale(float scale) { Scale(scale, scale); }
 
-  bool operator==(const T& other) const {
+  bool operator==(const InsetsOutsetsFBase<T>& other) const {
     return top_ == other.top_ && left_ == other.left_ &&
            bottom_ == other.bottom_ && right_ == other.right_;
   }
 
-  bool operator!=(const T& other) const { return !(*this == other); }
+  bool operator!=(const InsetsOutsetsFBase<T>& other) const {
+    return !(*this == other);
+  }
 
   void operator+=(const T& other) {
     top_ += other.top_;
diff --git a/ui/gfx/geometry/mask_filter_info.cc b/ui/gfx/geometry/mask_filter_info.cc
index c033155..7762108 100644
--- a/ui/gfx/geometry/mask_filter_info.cc
+++ b/ui/gfx/geometry/mask_filter_info.cc
@@ -17,7 +17,9 @@
   if (!transform.TransformRRectF(&rounded_corner_bounds_))
     return false;
 
-  gradient_mask_.Transform(transform);
+  if (!gradient_mask_.IsEmpty())
+    gradient_mask_.Transform(transform);
+
   return true;
 }
 
diff --git a/ui/gfx/geometry/mask_filter_info.h b/ui/gfx/geometry/mask_filter_info.h
index c96bd18..6c33e8c 100644
--- a/ui/gfx/geometry/mask_filter_info.h
+++ b/ui/gfx/geometry/mask_filter_info.h
@@ -63,7 +63,7 @@
   RRectF rounded_corner_bounds_;
 
   // Shader based linear gradient mask to be applied to a layer.
-  gfx::LinearGradient gradient_mask_;
+  gfx::LinearGradient gradient_mask_ = gfx::LinearGradient::GetEmpty();
 };
 
 inline bool operator==(const MaskFilterInfo& lhs, const MaskFilterInfo& rhs) {