Automated commit: libchrome r1115543 uprev

Merge with upstream commit 13f84979b46045c5226bb7ad16312247668729a2

Files:
  * Add testing gmock-matcher.h header.

Long term patch:
  * Add usage template_util over C++20's std::remove_cvref_t.

BUG=None
TEST=sudo emerge libchrome

Cq-Depend: chromium:4328868
Change-Id: I094148517563a1883ca814eb944cebe07d0d05af
diff --git a/BASE_VER b/BASE_VER
index 91cd32a..049817e 100644
--- a/BASE_VER
+++ b/BASE_VER
@@ -1 +1 @@
-1114942
+1115543
diff --git a/BUILD.gn b/BUILD.gn
index 1c08b18..435fdde 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1234,6 +1234,7 @@
   executable("optional_unittests") {
     sources = [
       "base/test/gtest_util.cc",
+      "base/test/values_test_util.cc",
       "base/types/optional_unittest.cc",
     ]
     configs += [
diff --git a/base/allocator/partition_allocator/compressed_pointer.h b/base/allocator/partition_allocator/compressed_pointer.h
index 109b8a3..20de170 100644
--- a/base/allocator/partition_allocator/compressed_pointer.h
+++ b/base/allocator/partition_allocator/compressed_pointer.h
@@ -460,13 +460,13 @@
 
   template <typename U,
             std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  constexpr PA_ALWAYS_INLINE UncompressedPointer(
+  constexpr PA_ALWAYS_INLINE explicit UncompressedPointer(
       const UncompressedPointer<U>& other)
       : ptr_(other.ptr_) {}
 
   template <typename U,
             std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
-  constexpr PA_ALWAYS_INLINE UncompressedPointer(
+  constexpr PA_ALWAYS_INLINE explicit UncompressedPointer(
       UncompressedPointer<U>&& other) noexcept
       : ptr_(std::move(other.ptr_)) {}
 
diff --git a/base/allocator/partition_allocator/page_allocator.cc b/base/allocator/partition_allocator/page_allocator.cc
index 04fb98d..aaa6906 100644
--- a/base/allocator/partition_allocator/page_allocator.cc
+++ b/base/allocator/partition_allocator/page_allocator.cc
@@ -113,10 +113,11 @@
 
   uintptr_t actual_offset = address & (alignment - 1);
   uintptr_t new_address;
-  if (actual_offset <= requested_offset)
+  if (actual_offset <= requested_offset) {
     new_address = address + requested_offset - actual_offset;
-  else
+  } else {
     new_address = address + alignment + requested_offset - actual_offset;
+  }
   PA_DCHECK(new_address >= address);
   PA_DCHECK(new_address - address < alignment);
   PA_DCHECK(new_address % alignment == requested_offset);
@@ -135,8 +136,9 @@
   PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
   uintptr_t ret = internal::SystemAllocPagesInternal(
       hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
-  if (ret)
+  if (ret) {
     g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
+  }
 
   return ret;
 }
@@ -210,14 +212,16 @@
                                     file_descriptor_for_shared_alloc);
     if (ret) {
       // If the alignment is to our liking, we're done.
-      if ((ret & align_offset_mask) == align_offset)
+      if ((ret & align_offset_mask) == align_offset) {
         return ret;
+      }
       // Free the memory and try again.
       FreePages(ret, length);
     } else {
       // |ret| is null; if this try was unhinted, we're OOM.
-      if (internal::kHintIsAdvisory || !address)
+      if (internal::kHintIsAdvisory || !address) {
         return 0;
+      }
     }
 
 #if defined(ARCH_CPU_32_BITS)
@@ -368,8 +372,9 @@
 bool ReleaseReservation() {
   // To avoid deadlock, call only FreePages.
   internal::ScopedGuard guard(GetReserveLock());
-  if (!s_reservation_address)
+  if (!s_reservation_address) {
     return false;
+  }
 
   FreePages(s_reservation_address, s_reservation_size);
   s_reservation_address = 0;
diff --git a/base/allocator/partition_allocator/page_allocator_constants.h b/base/allocator/partition_allocator/page_allocator_constants.h
index de43af7..1de88e2 100644
--- a/base/allocator/partition_allocator/page_allocator_constants.h
+++ b/base/allocator/partition_allocator/page_allocator_constants.h
@@ -7,8 +7,8 @@
 
 #include <stddef.h>
 
-#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
 #include "build/build_config.h"
 
 #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.h b/base/allocator/partition_allocator/page_allocator_internals_posix.h
index c3b5dbf..c7ab6de 100644
--- a/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -130,8 +130,9 @@
   base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
       SecTaskCopyValueForEntitlement(
           task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
-  if (!jit_entitlement)
+  if (!jit_entitlement) {
     return false;
+  }
 
   return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
          kCFBooleanTrue;
@@ -248,8 +249,9 @@
   //
   // In this case, we are almost certainly bumping into the sandbox limit, mark
   // the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
-  if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
+  if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) {
     OOM_CRASH(length);
+  }
 
   PA_PCHECK(0 == ret);
 }
@@ -365,8 +367,9 @@
   if (accessibility_disposition ==
       PageAccessibilityDisposition::kRequireUpdate) {
     bool ok = TrySetSystemPagesAccess(address, length, accessibility);
-    if (!ok)
+    if (!ok) {
       return false;
+    }
   }
 
 #if BUILDFLAG(IS_APPLE)
diff --git a/base/allocator/partition_allocator/page_allocator_unittest.cc b/base/allocator/partition_allocator/page_allocator_unittest.cc
index 95fe33d..8fef6e6 100644
--- a/base/allocator/partition_allocator/page_allocator_unittest.cc
+++ b/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -127,8 +127,9 @@
 
   size_t size = HugeMemoryAmount();
   // Skip the test for sanitizers and platforms with ASLR turned off.
-  if (size == 0)
+  if (size == 0) {
     return;
+  }
 
   uintptr_t result =
       AllocPages(size, PageAllocationGranularity(),
@@ -160,8 +161,9 @@
 
   size_t size = HugeMemoryAmount();
   // Skip the test for sanitizers and platforms with ASLR turned off.
-  if (size == 0)
+  if (size == 0) {
     return;
+  }
 
   bool success = ReserveAddressSpace(size);
   if (!success) {
@@ -517,8 +519,9 @@
 #endif  // BUILDFLAG(IS_ANDROID)
 
 TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
-  if (!DecommittedMemoryIsAlwaysZeroed())
+  if (!DecommittedMemoryIsAlwaysZeroed()) {
     return;
+  }
 
   size_t size = PageAllocationGranularity();
   uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
diff --git a/base/allocator/partition_allocator/partition_alloc.gni b/base/allocator/partition_allocator/partition_alloc.gni
index f0fb597..eec5ce3 100644
--- a/base/allocator/partition_allocator/partition_alloc.gni
+++ b/base/allocator/partition_allocator/partition_alloc.gni
@@ -68,6 +68,10 @@
   # recommended to enable PA-E above, but isn't strictly necessary. Embedders
   # can create and use PA partitions explicitly.
   enable_pointer_compression_support = false
+
+  # Enables a bounds check when two pointers (at least one being raw_ptr) are
+  # subtracted (if supported by the underlying implementation).
+  enable_pointer_subtraction_check = false
 }
 
 declare_args() {
@@ -204,6 +208,7 @@
   enable_backup_ref_ptr_slow_checks = false
   enable_dangling_raw_ptr_checks = false
   enable_dangling_raw_ptr_perf_experiment = false
+  enable_pointer_subtraction_check = false
   backup_ref_ptr_poison_oob_ptr = false
   use_starscan = false
 }
diff --git a/base/allocator/partition_allocator/partition_alloc_check.h b/base/allocator/partition_allocator/partition_alloc_check.h
index 243fc49..4539abc 100644
--- a/base/allocator/partition_allocator/partition_alloc_check.h
+++ b/base/allocator/partition_allocator/partition_alloc_check.h
@@ -134,8 +134,9 @@
 
     for (int index = 0; index < 8; index++) {
       k[index] = key[index];
-      if (key[index] == '\0')
+      if (key[index] == '\0') {
         break;
+      }
     }
   }
 };
diff --git a/base/allocator/partition_allocator/partition_alloc_hooks.cc b/base/allocator/partition_allocator/partition_alloc_hooks.cc
index 61dfc37..79c4ccf 100644
--- a/base/allocator/partition_allocator/partition_alloc_hooks.cc
+++ b/base/allocator/partition_allocator/partition_alloc_hooks.cc
@@ -71,8 +71,9 @@
     void* address,
     size_t size,
     const char* type_name) {
-  if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
+  if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) {
     hook(address, size, type_name);
+  }
 }
 
 bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
@@ -80,19 +81,22 @@
     unsigned int flags,
     size_t size,
     const char* type_name) {
-  if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
+  if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {
     return hook(out, flags, size, type_name);
+  }
   return false;
 }
 
 void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
-  if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
+  if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) {
     hook(address);
+  }
 }
 
 bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
-  if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
+  if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) {
     return hook(address);
+  }
   return false;
 }
 
diff --git a/base/allocator/partition_allocator/partition_alloc_perftest.cc b/base/allocator/partition_allocator/partition_alloc_perftest.cc
index cce6824..c002aed 100644
--- a/base/allocator/partition_allocator/partition_alloc_perftest.cc
+++ b/base/allocator/partition_allocator/partition_alloc_perftest.cc
@@ -420,8 +420,9 @@
     total_laps_per_second += laps_per_second;
   }
 
-  if (noisy_neighbor_thread)
+  if (noisy_neighbor_thread) {
     noisy_neighbor_thread->Run();
+  }
 
   char const* alloc_type_str;
   switch (alloc_type) {
diff --git a/base/allocator/partition_allocator/partition_bucket.h b/base/allocator/partition_allocator/partition_bucket.h
index 3f7e61f..ef9e367 100644
--- a/base/allocator/partition_allocator/partition_bucket.h
+++ b/base/allocator/partition_allocator/partition_bucket.h
@@ -87,8 +87,9 @@
     // subsequent PartitionPage to store the raw size. It isn't only metadata
     // space though, slot spans that have more than one slot can't have raw size
     // stored, because we wouldn't know which slot it applies to.
-    if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize()))
+    if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
       return false;
+    }
 
     PA_DCHECK((slot_size % SystemPageSize()) == 0);
     PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
diff --git a/base/allocator/partition_allocator/partition_bucket_lookup.h b/base/allocator/partition_allocator/partition_bucket_lookup.h
index 8f0c2e6..edd3c9f 100644
--- a/base/allocator/partition_allocator/partition_bucket_lookup.h
+++ b/base/allocator/partition_allocator/partition_bucket_lookup.h
@@ -25,15 +25,17 @@
 // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
 // for the sub_order_index).
 constexpr uint8_t OrderIndexShift(uint8_t order) {
-  if (order < kNumBucketsPerOrderBits + 1)
+  if (order < kNumBucketsPerOrderBits + 1) {
     return 0;
+  }
 
   return order - (kNumBucketsPerOrderBits + 1);
 }
 
 constexpr size_t OrderSubIndexMask(uint8_t order) {
-  if (order == kBitsPerSizeT)
+  if (order == kBitsPerSizeT) {
     return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
+  }
 
   return ((static_cast<size_t>(1) << order) - 1) >>
          (kNumBucketsPerOrderBits + 1);
@@ -262,10 +264,11 @@
   //
   // We also do not want to go about the index for the max bucketed size.
   if (size > kAlignment * kNumBucketsPerOrder &&
-      index < GetIndexFor8Buckets(kMaxBucketed))
+      index < GetIndexFor8Buckets(kMaxBucketed)) {
     return RoundUpToOdd(index);
-  else
+  } else {
     return index;
+  }
 }
 
 // static
@@ -288,8 +291,9 @@
   //
   // So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
   // Distribution A, but to the 2^11 bucket under Distribution B.
-  if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
+  if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) {
     return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
+  }
   return BucketIndexLookup::GetIndexForDenserBuckets(size);
 }
 
diff --git a/base/allocator/partition_allocator/partition_cookie.h b/base/allocator/partition_allocator/partition_cookie.h
index 1877eb9..7c6b4a2 100644
--- a/base/allocator/partition_allocator/partition_cookie.h
+++ b/base/allocator/partition_allocator/partition_cookie.h
@@ -23,13 +23,15 @@
 constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
 
 PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
-  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
     PA_DCHECK(*cookie_ptr == kCookieValue[i]);
+  }
 }
 
 PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
-  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
     *cookie_ptr = kCookieValue[i];
+  }
 }
 
 #else
diff --git a/base/allocator/partition_allocator/partition_freelist_entry.h b/base/allocator/partition_allocator/partition_freelist_entry.h
index b694471..e99460b 100644
--- a/base/allocator/partition_allocator/partition_freelist_entry.h
+++ b/base/allocator/partition_allocator/partition_freelist_entry.h
@@ -260,11 +260,12 @@
     bool not_in_metadata =
         (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
 
-    if (for_thread_cache)
+    if (for_thread_cache) {
       return shadow_ptr_ok & not_in_metadata;
-    else
+    } else {
       return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
              not_in_metadata;
+    }
   }
 
   EncodedPartitionFreelistEntryPtr encoded_next_;
@@ -297,8 +298,9 @@
                                         bool for_thread_cache) const {
   // GetNext() can be called on discarded memory, in which case |encoded_next_|
   // is 0, and none of the checks apply. Don't prefetch nullptr either.
-  if (IsEncodedNextPtrZero())
+  if (IsEncodedNextPtrZero()) {
     return nullptr;
+  }
 
   auto* ret = encoded_next_.Decode();
   // We rely on constant propagation to remove the branches coming from
diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc
index b5f797c..e6d9e2f 100644
--- a/base/allocator/partition_allocator/partition_page.cc
+++ b/base/allocator/partition_allocator/partition_page.cc
@@ -105,8 +105,9 @@
       root->global_empty_slot_span_ring[current_index];
   // The slot span might well have been re-activated, filled up, etc. before we
   // get around to looking at it here.
-  if (slot_span_to_decommit)
+  if (slot_span_to_decommit) {
     slot_span_to_decommit->DecommitIfPossible(root);
+  }
 
   // We put the empty slot span on our global list of "slot spans that were once
   // empty", thus providing it a bit of breathing room to get re-used before we
@@ -116,8 +117,9 @@
   empty_cache_index_ = current_index;
   in_empty_cache_ = 1;
   ++current_index;
-  if (current_index == root->global_empty_slot_span_ring_size)
+  if (current_index == root->global_empty_slot_span_ring_size) {
     current_index = 0;
+  }
   root->global_empty_slot_span_ring_index = current_index;
 
   // Avoid wasting too much memory on empty slot spans. Note that we only divide
@@ -185,8 +187,9 @@
     // chances of it being filled up again. The old current slot span will be
     // the next slot span.
     PA_DCHECK(!next_slot_span);
-    if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
+    if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
       next_slot_span = bucket->active_slot_spans_head;
+    }
     bucket->active_slot_spans_head = this;
     PA_CHECK(bucket->num_full_slot_spans);  // Underflow.
     --bucket->num_full_slot_spans;
@@ -203,12 +206,14 @@
 #endif
     // If it's the current active slot span, change it. We bounce the slot span
     // to the empty list as a force towards defragmentation.
-    if (PA_LIKELY(this == bucket->active_slot_spans_head))
+    if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
       bucket->SetNewActiveSlotSpan();
+    }
     PA_DCHECK(bucket->active_slot_spans_head != this);
 
-    if (CanStoreRawSize())
+    if (CanStoreRawSize()) {
       SetRawSize(0);
+    }
 
     RegisterEmpty();
   }
@@ -259,8 +264,9 @@
   PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
   PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
   in_empty_cache_ = 0;
-  if (is_empty())
+  if (is_empty()) {
     Decommit(root);
+  }
 }
 
 template <bool thread_safe>
@@ -295,10 +301,11 @@
         uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
         auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
 
-        if (!head)
+        if (!head) {
           head = entry;
-        else
+        } else {
           back->SetNext(entry);
+        }
 
         back = entry;
       }
diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h
index ec4a60f..1163270 100644
--- a/base/allocator/partition_allocator/partition_page.h
+++ b/base/allocator/partition_allocator/partition_page.h
@@ -960,8 +960,9 @@
       break;
     }
     slot_span = &page->slot_span_metadata;
-    if (callback(slot_span))
+    if (callback(slot_span)) {
       return;
+    }
     page += slot_span->bucket->get_pages_per_slot_span();
   }
   // Each super page must have at least one valid slot span.
diff --git a/base/allocator/partition_allocator/partition_ref_count.h b/base/allocator/partition_allocator/partition_ref_count.h
index ec0f4df..459a4ad 100644
--- a/base/allocator/partition_allocator/partition_ref_count.h
+++ b/base/allocator/partition_allocator/partition_ref_count.h
@@ -190,8 +190,9 @@
     CountType old_count =
         count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
 
-    if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit)))
+    if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
       DoubleFreeOrCorruptionDetected(old_count);
+    }
 
     if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) ==
                   kMemoryHeldByAllocatorBit)) {
@@ -226,8 +227,9 @@
   PA_ALWAYS_INLINE bool IsAlive() {
     bool alive =
         count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
-    if (alive)
+    if (alive) {
       CheckCookieIfSupported();
+    }
     return alive;
   }
 
diff --git a/base/allocator/partition_allocator/partition_tag.h b/base/allocator/partition_allocator/partition_tag.h
index e00faec..67509fa 100644
--- a/base/allocator/partition_allocator/partition_tag.h
+++ b/base/allocator/partition_allocator/partition_tag.h
@@ -88,8 +88,9 @@
   if (sizeof(PartitionTag) == 1) {
     memset(tag_ptr, value, tag_count);
   } else {
-    while (tag_count-- > 0)
+    while (tag_count-- > 0) {
       *tag_ptr++ = value;
+    }
   }
 }
 
diff --git a/base/allocator/partition_allocator/partition_tls.h b/base/allocator/partition_allocator/partition_tls.h
index 2891e96..420559f 100644
--- a/base/allocator/partition_allocator/partition_tls.h
+++ b/base/allocator/partition_allocator/partition_tls.h
@@ -108,8 +108,9 @@
   DWORD saved_error = GetLastError();
   void* ret = TlsGetValue(key);
   // Only non-zero errors need to be restored.
-  if (PA_UNLIKELY(saved_error))
+  if (PA_UNLIKELY(saved_error)) {
     SetLastError(saved_error);
+  }
   return ret;
 }
 
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr.h b/base/allocator/partition_allocator/pointers/raw_ptr.h
index 63ad1f2..da7054a 100644
--- a/base/allocator/partition_allocator/pointers/raw_ptr.h
+++ b/base/allocator/partition_allocator/pointers/raw_ptr.h
@@ -481,6 +481,8 @@
     if (partition_alloc::internal::base::is_constant_evaluated()) {
       return wrapped_ptr1 - wrapped_ptr2;
     }
+
+#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
     // Ensure that both pointers come from the same allocation.
     //
     // Disambiguation: UntagPtr removes the hardware MTE tag, whereas this
@@ -497,6 +499,7 @@
       PA_BASE_CHECK(tag1 == tag2);
       return wrapped_ptr1 - wrapped_ptr2;
     }
+#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
 
     // If one or the other arg come untagged, we have to perform the
     // subtraction entirely without tags.
@@ -1093,11 +1096,13 @@
   }
 
   // Do not disable operator+() and operator-().
-  // They provide OOB checks. Keep them enabled, which may be blocked later when
-  // attempting to apply the += or -= operation, when disabled. In the absence
-  // of operators +/-, the compiler is free to implicitly convert to the
-  // underlying T* representation and perform ordinary pointer arithmetic, thus
-  // invalidating the purpose behind disabling them.
+  // They provide OOB checks, which prevent from assigning an arbitrary value to
+  // raw_ptr, leading BRP to modifying arbitrary memory thinking it's ref-count.
+  // Keep them enabled, which may be blocked later when attempting to apply the
+  // += or -= operation, when disabled. In the absence of operators +/-, the
+  // compiler is free to implicitly convert to the underlying T* representation
+  // and perform ordinary pointer arithmetic, thus invalidating the purpose
+  // behind disabling them.
   template <typename Z>
   friend constexpr PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p,
                                                       Z delta_elems) {
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h b/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h
index b941ec5..5930eaa 100644
--- a/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h
+++ b/base/allocator/partition_allocator/pointers/raw_ptr_backup_ref_impl.h
@@ -365,8 +365,10 @@
     if (partition_alloc::internal::base::is_constant_evaluated()) {
       return wrapped_ptr1 - wrapped_ptr2;
     }
+
     T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
     T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
+#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
     if (partition_alloc::internal::base::is_constant_evaluated()) {
       return unpoisoned_ptr1 - unpoisoned_ptr2;
     }
@@ -382,6 +384,7 @@
     } else {
       PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
     }
+#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
     return unpoisoned_ptr1 - unpoisoned_ptr2;
   }
 
diff --git a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc b/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc
index ac930da..dec145f 100644
--- a/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc
+++ b/base/allocator/partition_allocator/pointers/raw_ptr_unittest.cc
@@ -30,6 +30,7 @@
 #include "base/task/thread_pool.h"
 #include "base/test/bind.h"
 #include "base/test/gtest_util.h"
+#include "base/test/memory/dangling_ptr_instrumentation.h"
 #include "base/test/scoped_feature_list.h"
 #include "base/test/task_environment.h"
 #include "build/build_config.h"
@@ -1657,7 +1658,7 @@
   **protected_arr_ptr = 4;
   protected_arr_ptr++;
   EXPECT_DEATH_IF_SUPPORTED(** protected_arr_ptr = 4, "");
-#endif
+#endif  // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
 
   allocator.root()->Free(ptr);
 }
@@ -1736,6 +1737,7 @@
             checked_cast<ptrdiff_t>(requested_size));
   EXPECT_EQ(protected_ptr1 - protected_ptr1_4,
             -checked_cast<ptrdiff_t>(requested_size));
+#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
   EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1);
   EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2);
   EXPECT_CHECK_DEATH(protected_ptr2 - protected_ptr1_4);
@@ -1744,6 +1746,7 @@
   EXPECT_CHECK_DEATH(protected_ptr1 - protected_ptr2_2);
   EXPECT_CHECK_DEATH(protected_ptr2_2 - protected_ptr1_4);
   EXPECT_CHECK_DEATH(protected_ptr1_4 - protected_ptr2_2);
+#endif  // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
   EXPECT_EQ(protected_ptr2_2 - protected_ptr2, 1);
   EXPECT_EQ(protected_ptr2 - protected_ptr2_2, -1);
 
@@ -1973,7 +1976,8 @@
 #else
   allocator_.root()->Free(ptr.get());
   ptr = nullptr;
-#endif
+#endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
+        // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
 }
 
 TEST_F(BackupRefPtrTest, SpatialAlgoCompat) {
@@ -1990,8 +1994,6 @@
       reinterpret_cast<int*>(allocator_.root()->Alloc(requested_size, ""));
   int* ptr_end = ptr + requested_elements;
 
-  RawPtrCountingImpl::ClearCounters();
-
   CountingRawPtr<int> protected_ptr = ptr;
   CountingRawPtr<int> protected_ptr_end = protected_ptr + requested_elements;
 
@@ -1999,6 +2001,8 @@
   EXPECT_DEATH_IF_SUPPORTED(*protected_ptr_end = 1, "");
 #endif
 
+  RawPtrCountingImpl::ClearCounters();
+
   int gen_val = 1;
   std::generate(protected_ptr, protected_ptr_end, [&gen_val]() {
     gen_val ^= gen_val + 1;
@@ -2509,5 +2513,64 @@
 
 #endif  // BUILDFLAG(USE_HOOKABLE_RAW_PTR)
 
+TEST(DanglingPtrTest, DetectAndReset) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  raw_ptr<int> dangling_ptr = owned_ptr.get();
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  owned_ptr.reset();
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  dangling_ptr = nullptr;
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+TEST(DanglingPtrTest, DetectAndDestructor) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  {
+    [[maybe_unused]] raw_ptr<int> dangling_ptr = owned_ptr.get();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    owned_ptr.reset();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+  }
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+TEST(DanglingPtrTest, DetectResetAndDestructor) {
+  auto instrumentation = test::DanglingPtrInstrumentation::Create();
+  if (!instrumentation.has_value()) {
+    GTEST_SKIP() << instrumentation.error();
+  }
+
+  auto owned_ptr = std::make_unique<int>(42);
+  {
+    raw_ptr<int> dangling_ptr = owned_ptr.get();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    owned_ptr.reset();
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+    dangling_ptr = nullptr;
+    EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+    EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+  }
+  EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+  EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
 }  // namespace internal
 }  // namespace base
diff --git a/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc b/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc
index b68f6fe..626d098 100644
--- a/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc
+++ b/base/allocator/partition_allocator/shim/allocator_shim_unittest.cc
@@ -19,7 +19,6 @@
 #include "base/memory/page_size.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/platform_thread.h"
-#include "base/threading/thread_local.h"
 #include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -113,9 +112,9 @@
       // Hitting it for the first time will cause a failure, causing the
       // invocation of the std::new_handler.
       if (size == 0xFEED) {
-        if (!instance_->did_fail_realloc_0xfeed_once->Get()) {
-          instance_->did_fail_realloc_0xfeed_once->Set(
-              instance_->did_fail_realloc_0xfeed_once.get());
+        thread_local bool did_fail_realloc_0xfeed_once = false;
+        if (!did_fail_realloc_0xfeed_once) {
+          did_fail_realloc_0xfeed_once = true;
           return nullptr;
         }
         return address;
@@ -260,8 +259,6 @@
     aligned_reallocs_intercepted_by_size.resize(MaxSizeTracked());
     aligned_reallocs_intercepted_by_addr.resize(MaxSizeTracked());
     aligned_frees_intercepted_by_addr.resize(MaxSizeTracked());
-    did_fail_realloc_0xfeed_once =
-        std::make_unique<base::ThreadLocalStorage::Slot>();
     num_new_handler_calls.store(0, std::memory_order_release);
     instance_ = this;
 
@@ -303,7 +300,6 @@
   std::vector<size_t> aligned_reallocs_intercepted_by_size;
   std::vector<size_t> aligned_reallocs_intercepted_by_addr;
   std::vector<size_t> aligned_frees_intercepted_by_addr;
-  std::unique_ptr<base::ThreadLocalStorage::Slot> did_fail_realloc_0xfeed_once;
   std::atomic<uint32_t> num_new_handler_calls;
 
  private:
diff --git a/base/allocator/partition_allocator/thread_cache.cc b/base/allocator/partition_allocator/thread_cache.cc
index a0d25a1..0655176 100644
--- a/base/allocator/partition_allocator/thread_cache.cc
+++ b/base/allocator/partition_allocator/thread_cache.cc
@@ -93,18 +93,22 @@
   ThreadCache* previous_head = list_head_;
   list_head_ = cache;
   cache->next_ = previous_head;
-  if (previous_head)
+  if (previous_head) {
     previous_head->prev_ = cache;
+  }
 }
 
 void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
   internal::ScopedGuard scoped_locker(GetLock());
-  if (cache->prev_)
+  if (cache->prev_) {
     cache->prev_->next_ = cache->next_;
-  if (cache->next_)
+  }
+  if (cache->next_) {
     cache->next_->prev_ = cache->prev_;
-  if (cache == list_head_)
+  }
+  if (cache == list_head_) {
     list_head_ = cache->next_;
+  }
 }
 
 void ThreadCacheRegistry::DumpStats(bool my_thread_only,
@@ -115,8 +119,9 @@
   internal::ScopedGuard scoped_locker(GetLock());
   if (my_thread_only) {
     auto* tcache = ThreadCache::Get();
-    if (!ThreadCache::IsValid(tcache))
+    if (!ThreadCache::IsValid(tcache)) {
       return;
+    }
     tcache->AccumulateStats(stats);
   } else {
     ThreadCache* tcache = list_head_;
@@ -146,8 +151,9 @@
   // the main thread for the partition lock, since it is acquired/released once
   // per bucket. By purging the main thread first, we avoid these interferences
   // for this thread at least.
-  if (ThreadCache::IsValid(current_thread_tcache))
+  if (ThreadCache::IsValid(current_thread_tcache)) {
     current_thread_tcache->Purge();
+  }
 
   {
     internal::ScopedGuard scoped_locker(GetLock());
@@ -158,8 +164,9 @@
       // point".
       // Note that this will not work if the other thread is sleeping forever.
       // TODO(lizeb): Handle sleeping threads.
-      if (tcache != current_thread_tcache)
+      if (tcache != current_thread_tcache) {
         tcache->SetShouldPurge();
+      }
       tcache = tcache->next_;
     }
   }
@@ -217,8 +224,9 @@
     // If this is called before *any* thread cache has serviced *any*
     // allocation, which can happen in tests, and in theory in non-test code as
     // well.
-    if (!tcache)
+    if (!tcache) {
       return;
+    }
 
     // Setting the global limit while locked, because we need |tcache->root_|.
     ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
@@ -256,8 +264,9 @@
     // Can run when there is no thread cache, in which case there is nothing to
     // do, and the task should not be rescheduled. This would typically indicate
     // a case where the thread cache was never enabled, or got disabled.
-    if (!tcache)
+    if (!tcache) {
       return;
+    }
 
     while (tcache) {
       cached_memory_approx += tcache->cached_memory_;
@@ -316,8 +325,9 @@
   // adding a special-pupose lock.
   internal::ScopedGuard scoped_locker(
       ThreadCacheRegistry::Instance().GetLock());
-  if (g_thread_cache_key_created)
+  if (g_thread_cache_key_created) {
     return;
+  }
 
   bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
   PA_CHECK(ok);
@@ -333,8 +343,9 @@
 void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
   auto* old_tcache = ThreadCache::Get();
   g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
-  if (old_tcache)
+  if (old_tcache) {
     ThreadCache::DeleteForTesting(old_tcache);
+  }
   if (root) {
     Init(root);
     Create(root);
@@ -421,8 +432,9 @@
 
 // static
 void ThreadCache::SetLargestCachedSize(size_t size) {
-  if (size > ThreadCache::kLargeSizeThreshold)
+  if (size > ThreadCache::kLargeSizeThreshold) {
     size = ThreadCache::kLargeSizeThreshold;
+  }
   largest_active_bucket_index_ =
       PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
           size,
@@ -512,8 +524,9 @@
 void ThreadCache::Delete(void* tcache_ptr) {
   auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
 
-  if (!IsValid(tcache))
+  if (!IsValid(tcache)) {
     return;
+  }
 
 #if PA_CONFIG(THREAD_CACHE_FAST_TLS)
   internal::g_thread_cache = nullptr;
@@ -617,8 +630,9 @@
     // some objects, then the allocation will be handled normally. Otherwise,
     // this goes to the central allocator, which will service the allocation,
     // return nullptr or crash.
-    if (!slot_start)
+    if (!slot_start) {
       break;
+    }
 
     allocated_slots++;
     PutInBucket(bucket, slot_start);
@@ -634,8 +648,9 @@
 template <bool crash_on_corruption>
 void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
   // Avoids acquiring the lock needlessly.
-  if (!bucket.count || bucket.count <= limit)
+  if (!bucket.count || bucket.count <= limit) {
     return;
+  }
 
   // This serves two purposes: error checking and avoiding stalls when grabbing
   // the lock:
@@ -717,8 +732,9 @@
 
 size_t ThreadCache::CachedMemory() const {
   size_t total = 0;
-  for (const Bucket& bucket : buckets_)
+  for (const Bucket& bucket : buckets_) {
     total += bucket.count * static_cast<size_t>(bucket.slot_size);
+  }
 
   return total;
 }
@@ -738,8 +754,9 @@
   stats->batch_fill_count += stats_.batch_fill_count;
 
 #if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
-  for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
+  for (size_t i = 0; i < internal::kNumBuckets + 1; i++) {
     stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
+  }
 #endif  // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
 
   // cached_memory_ is not necessarily equal to |CachedMemory()| here, since
@@ -767,8 +784,9 @@
 // static
 void ThreadCache::PurgeCurrentThread() {
   auto* tcache = Get();
-  if (IsValid(tcache))
+  if (IsValid(tcache)) {
     tcache->Purge();
+  }
 }
 
 void ThreadCache::PurgeInternal() {
@@ -789,8 +807,9 @@
   // |largest_active_bucket_index_| can be lowered at runtime, there may be
   // memory already cached in the inactive buckets. They should still be
   // purged.
-  for (auto& bucket : buckets_)
+  for (auto& bucket : buckets_) {
     ClearBucketHelper<crash_on_corruption>(bucket, 0);
+  }
 }
 
 }  // namespace partition_alloc
diff --git a/base/allocator/partition_allocator/thread_cache.h b/base/allocator/partition_allocator/thread_cache.h
index 31462fe..79c7a4f 100644
--- a/base/allocator/partition_allocator/thread_cache.h
+++ b/base/allocator/partition_allocator/thread_cache.h
@@ -194,8 +194,10 @@
 
 }  // namespace internal
 
-#define PA_REENTRANCY_GUARD(x) \
-  internal::ReentrancyGuard guard { x }
+#define PA_REENTRANCY_GUARD(x)      \
+  internal::ReentrancyGuard guard { \
+    x                               \
+  }
 
 #else  // BUILDFLAG(PA_DCHECK_IS_ON)
 
@@ -493,8 +495,9 @@
     ClearBucket(bucket, limit / 2);
   }
 
-  if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
+  if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
     PurgeInternal();
+  }
 
   *slot_size = bucket.slot_size;
   return true;
@@ -527,8 +530,9 @@
 
     // Very unlikely, means that the central allocator is out of memory. Let it
     // deal with it (may return 0, may crash).
-    if (PA_UNLIKELY(!bucket.freelist_head))
+    if (PA_UNLIKELY(!bucket.freelist_head)) {
       return 0;
+    }
   }
 
   PA_DCHECK(bucket.count != 0);
diff --git a/base/allocator/partition_allocator/thread_cache_unittest.cc b/base/allocator/partition_allocator/thread_cache_unittest.cc
index 9469760..3508547 100644
--- a/base/allocator/partition_allocator/thread_cache_unittest.cc
+++ b/base/allocator/partition_allocator/thread_cache_unittest.cc
@@ -92,8 +92,9 @@
     ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
 
     // Cleanup the global state so next test can recreate ThreadCache.
-    if (ThreadCache::IsTombstone(ThreadCache::Get()))
+    if (ThreadCache::IsTombstone(ThreadCache::Get())) {
       ThreadCache::RemoveTombstoneForTesting();
+    }
   }
 
  protected:
@@ -176,8 +177,9 @@
            allocation_size++) {
         FillThreadCacheAndReturnIndex(allocation_size, batch);
 
-        if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
+        if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
           return;
+        }
       }
     }
 
@@ -481,8 +483,9 @@
   EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
   root()->Free(other_thread_ptr);
 
-  for (void* ptr : tmp)
+  for (void* ptr : tmp) {
     root()->Free(ptr);
+  }
 }
 
 namespace {
@@ -796,8 +799,9 @@
       FillThreadCacheAndReturnIndex(root, allocation_size, bucket_distribution,
                                     batch);
 
-      if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
+      if (ThreadCache::Get()->CachedMemory() >= target_cached_memory) {
         return;
+      }
     }
   }
 
diff --git a/base/android/java/src/org/chromium/base/task/ChoreographerTaskRunner.java b/base/android/java/src/org/chromium/base/task/ChoreographerTaskRunner.java
deleted file mode 100644
index 124089c..0000000
--- a/base/android/java/src/org/chromium/base/task/ChoreographerTaskRunner.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package org.chromium.base.task;
-
-import android.view.Choreographer;
-
-/**
- * An adapter that allows PostTask to submit Choreographer frame callbacks which
- * run after the next vsync.
- */
-final class ChoreographerTaskRunner implements SingleThreadTaskRunner {
-    private final Choreographer mChoreographer;
-
-    ChoreographerTaskRunner(Choreographer choreographer) {
-        mChoreographer = choreographer;
-    }
-
-    @Override
-    public boolean belongsToCurrentThread() {
-        try {
-            return mChoreographer == Choreographer.getInstance();
-        } catch (IllegalStateException e) {
-            return false;
-        }
-    }
-
-    @Override
-    public void postTask(Runnable task) {
-        mChoreographer.postFrameCallback(new Choreographer.FrameCallback() {
-            @Override
-            public void doFrame(long frameTimeNanos) {
-                task.run();
-            }
-        });
-    }
-
-    @Override
-    public void postDelayedTask(Runnable task, long delayMillis) {
-        mChoreographer.postFrameCallbackDelayed(new Choreographer.FrameCallback() {
-            @Override
-            public void doFrame(long frameTimeNanos) {
-                task.run();
-            }
-        }, delayMillis);
-    }
-}
diff --git a/base/android/java/src/org/chromium/base/task/DefaultTaskExecutor.java b/base/android/java/src/org/chromium/base/task/DefaultTaskExecutor.java
index 4283793..261909d 100644
--- a/base/android/java/src/org/chromium/base/task/DefaultTaskExecutor.java
+++ b/base/android/java/src/org/chromium/base/task/DefaultTaskExecutor.java
@@ -4,10 +4,6 @@
 
 package org.chromium.base.task;
 
-import android.view.Choreographer;
-
-import org.chromium.base.ThreadUtils;
-
 import java.util.HashMap;
 import java.util.Map;
 
@@ -21,13 +17,11 @@
 
     @Override
     public TaskRunner createTaskRunner(TaskTraits taskTraits) {
-        if (taskTraits.mIsChoreographerFrame) return createChoreographerTaskRunner();
         return new TaskRunnerImpl(taskTraits);
     }
 
     @Override
     public SequencedTaskRunner createSequencedTaskRunner(TaskTraits taskTraits) {
-        if (taskTraits.mIsChoreographerFrame) return createChoreographerTaskRunner();
         return new SequencedTaskRunnerImpl(taskTraits);
     }
 
@@ -38,7 +32,6 @@
      */
     @Override
     public SingleThreadTaskRunner createSingleThreadTaskRunner(TaskTraits taskTraits) {
-        if (taskTraits.mIsChoreographerFrame) return createChoreographerTaskRunner();
         // Tasks posted via this API will not execute until after native has started.
         return new SingleThreadTaskRunnerImpl(null, taskTraits);
     }
@@ -62,10 +55,4 @@
     public boolean canRunTaskImmediately(TaskTraits traits) {
         return false;
     }
-
-    private synchronized ChoreographerTaskRunner createChoreographerTaskRunner() {
-        // TODO(alexclarke): Migrate to the new Android UI thread trait when available.
-        return ThreadUtils.runOnUiThreadBlockingNoException(
-                () -> { return new ChoreographerTaskRunner(Choreographer.getInstance()); });
-    }
 }
diff --git a/base/android/java/src/org/chromium/base/task/PostTask.java b/base/android/java/src/org/chromium/base/task/PostTask.java
index 8a5bf07..02928f6 100644
--- a/base/android/java/src/org/chromium/base/task/PostTask.java
+++ b/base/android/java/src/org/chromium/base/task/PostTask.java
@@ -94,7 +94,7 @@
      * @param delay The delay in milliseconds before the task can be run.
      */
     public static void postDelayedTask(TaskTraits taskTraits, Runnable task, long delay) {
-        if (!sNativeInitialized || taskTraits.mIsChoreographerFrame) {
+        if (!sNativeInitialized) {
             getTaskExecutorForTraits(taskTraits).postDelayedTask(taskTraits, task, delay);
         } else {
             TaskTraits postedTraits = taskTraits.withExplicitDestination();
diff --git a/base/android/java/src/org/chromium/base/task/TaskTraits.java b/base/android/java/src/org/chromium/base/task/TaskTraits.java
index 0989923..099bd50 100644
--- a/base/android/java/src/org/chromium/base/task/TaskTraits.java
+++ b/base/android/java/src/org/chromium/base/task/TaskTraits.java
@@ -58,13 +58,6 @@
     // USER_BLOCKING + may block.
     public static final TaskTraits USER_BLOCKING_MAY_BLOCK = USER_BLOCKING.mayBlock();
 
-    // A bit like requestAnimationFrame, this task will be posted onto the Choreographer
-    // and will be run on the android main thread after the next vsync.
-    public static final TaskTraits CHOREOGRAPHER_FRAME = new TaskTraits();
-    static {
-        CHOREOGRAPHER_FRAME.mIsChoreographerFrame = true;
-    }
-
     // For tasks that should run on the thread pool instead of the main thread.
     // Note that currently also tasks which lack this trait will execute on the
     // thread pool unless a trait for a named thread is given.
@@ -84,7 +77,6 @@
     boolean mUseThreadPool;
     byte mExtensionId;
     byte mExtensionData[];
-    boolean mIsChoreographerFrame;
 
     // Derive custom traits from existing trait constants.
     private TaskTraits() {
@@ -181,8 +173,7 @@
             TaskTraits other = (TaskTraits) object;
             return mPriority == other.mPriority && mMayBlock == other.mMayBlock
                     && mUseThreadPool == other.mUseThreadPool && mExtensionId == other.mExtensionId
-                    && Arrays.equals(mExtensionData, other.mExtensionData)
-                    && mIsChoreographerFrame == other.mIsChoreographerFrame;
+                    && Arrays.equals(mExtensionData, other.mExtensionData);
         } else {
             return false;
         }
@@ -196,7 +187,6 @@
         hash = 37 * hash + (mUseThreadPool ? 0 : 1);
         hash = 37 * hash + (int) mExtensionId;
         hash = 37 * hash + Arrays.hashCode(mExtensionData);
-        hash = 37 * hash + (mIsChoreographerFrame ? 0 : 1);
         return hash;
     }
 }
diff --git a/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java b/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
index dbb729f..d836455 100644
--- a/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
+++ b/base/android/javatests/src/org/chromium/base/task/PostTaskTest.java
@@ -13,13 +13,11 @@
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
-import org.chromium.base.ThreadUtils;
 import org.chromium.base.test.BaseJUnit4ClassRunner;
 import org.chromium.base.test.task.SchedulerTestHelpers;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
@@ -88,36 +86,4 @@
         // This should not timeout.
         SchedulerTestHelpers.postTaskAndBlockUntilRun(taskQueue);
     }
-
-    @Test
-    @SmallTest
-    public void testChoreographerFrameTrait() throws Exception {
-        List<Integer> orderList = new ArrayList<>();
-        CountDownLatch latch = new CountDownLatch(2);
-        PostTask.postTask(TaskTraits.CHOREOGRAPHER_FRAME, new Runnable() {
-            @Override
-            public void run() {
-                ThreadUtils.assertOnUiThread();
-                synchronized (orderList) {
-                    orderList.add(1);
-                    latch.countDown();
-                }
-            }
-        });
-
-        PostTask.postTask(TaskTraits.CHOREOGRAPHER_FRAME, new Runnable() {
-            @Override
-            public void run() {
-                ThreadUtils.assertOnUiThread();
-                synchronized (orderList) {
-                    orderList.add(2);
-                    latch.countDown();
-                }
-            }
-        });
-
-        latch.await();
-
-        assertThat(orderList, contains(1, 2));
-    }
 }
diff --git a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
index d1b1878..c98b0b6 100644
--- a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
+++ b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherTestBase.java
@@ -164,6 +164,50 @@
         Assert.fail("Expected AssertionError");
     }
 
+    protected void doTestExtraRecordAllowedAny_success(@TestScenario int scenario) {
+        // Arrange
+        maybeLoadNativeFirst(scenario);
+        mWatcher = HistogramWatcher.newBuilder()
+                           .expectAnyRecords(BOOLEAN_HISTOGRAM, 3)
+                           .allowExtraRecordsForHistogramsAbove()
+                           .build();
+
+        // Act
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, false);
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, false);
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, true);
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, true);
+        maybeLoadNativeAfterRecord(scenario);
+
+        // Assert
+        mWatcher.assertExpected();
+    }
+
+    protected void doTestExtraRecordAllowedAny_failure(@TestScenario int scenario) {
+        // Arrange
+        maybeLoadNativeFirst(scenario);
+        mWatcher = HistogramWatcher.newBuilder()
+                           .expectAnyRecords(BOOLEAN_HISTOGRAM, 3)
+                           .allowExtraRecordsForHistogramsAbove()
+                           .build();
+
+        // Act
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, false);
+        RecordHistogram.recordBooleanHistogram(BOOLEAN_HISTOGRAM, false);
+        maybeLoadNativeAfterRecord(scenario);
+
+        // Assert
+        try {
+            mWatcher.assertExpected();
+        } catch (AssertionError e) {
+            assertContains(BOOLEAN_HISTOGRAM, e.getMessage());
+            assertContains("3 record(s) expected: [Any (3 times)]", e.getMessage());
+            assertContains("2 record(s) seen: [0 (2 times)]", e.getMessage());
+            return;
+        }
+        Assert.fail("Expected AssertionError");
+    }
+
     protected void doTestMissingLastRecord_failure(@TestScenario int scenario) {
         // Arrange
         maybeLoadNativeFirst(scenario);
diff --git a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithNativeTest.java b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithNativeTest.java
index fbb9198..6b321fb 100644
--- a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithNativeTest.java
+++ b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithNativeTest.java
@@ -54,6 +54,18 @@
 
     @Test
     @MediumTest
+    public void testExtraRecordAllowedAny_success() {
+        doTestExtraRecordAllowedAny_success(TestScenario.WITH_NATIVE);
+    }
+
+    @Test
+    @MediumTest
+    public void testExtraRecordAllowedAny_failure() {
+        doTestExtraRecordAllowedAny_failure(TestScenario.WITH_NATIVE);
+    }
+
+    @Test
+    @MediumTest
     public void testMissingRecord_failure() {
         doTestMissingLastRecord_failure(TestScenario.WITH_NATIVE);
     }
diff --git a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
index fd490d6..6658008 100644
--- a/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
+++ b/base/android/javatests/src/org/chromium/base/test/metrics/HistogramWatcherWithoutNativeTest.java
@@ -58,6 +58,18 @@
 
     @Test
     @MediumTest
+    public void testExtraRecordAllowedAny_success() {
+        doTestExtraRecordAllowedAny_success(TestScenario.WITHOUT_NATIVE);
+    }
+
+    @Test
+    @MediumTest
+    public void testExtraRecordAllowedAny_failure() {
+        doTestExtraRecordAllowedAny_failure(TestScenario.WITHOUT_NATIVE);
+    }
+
+    @Test
+    @MediumTest
     public void testMissingRecord_failure() {
         doTestMissingLastRecord_failure(TestScenario.WITHOUT_NATIVE);
     }
diff --git a/base/files/file_descriptor_watcher_posix.cc b/base/files/file_descriptor_watcher_posix.cc
index 06bf1dc..cb520db 100644
--- a/base/files/file_descriptor_watcher_posix.cc
+++ b/base/files/file_descriptor_watcher_posix.cc
@@ -11,24 +11,20 @@
 #include "base/memory/ptr_util.h"
 #include "base/memory/raw_ref.h"
 #include "base/message_loop/message_pump_for_io.h"
-#include "base/no_destructor.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/task/current_thread.h"
 #include "base/task/sequenced_task_runner.h"
 #include "base/task/single_thread_task_runner.h"
 #include "base/threading/thread_checker.h"
-#include "base/threading/thread_local.h"
 #include "base/threading/thread_restrictions.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
 
 namespace base {
 
 namespace {
 
 // Per-thread FileDescriptorWatcher registration.
-ThreadLocalPointer<FileDescriptorWatcher>& GetTlsFdWatcher() {
-  static NoDestructor<ThreadLocalPointer<FileDescriptorWatcher>> tls_fd_watcher;
-  return *tls_fd_watcher;
-}
+ABSL_CONST_INIT thread_local FileDescriptorWatcher* fd_watcher = nullptr;
 
 }  // namespace
 
@@ -173,7 +169,7 @@
                                               int fd,
                                               const RepeatingClosure& callback)
     : callback_(callback),
-      io_thread_task_runner_(GetTlsFdWatcher().Get()->io_thread_task_runner()) {
+      io_thread_task_runner_(fd_watcher->io_thread_task_runner()) {
   DCHECK(!callback_.is_null());
   DCHECK(io_thread_task_runner_);
   watcher_ =
@@ -254,14 +250,10 @@
 
 FileDescriptorWatcher::FileDescriptorWatcher(
     scoped_refptr<SingleThreadTaskRunner> io_thread_task_runner)
-    : io_thread_task_runner_(std::move(io_thread_task_runner)) {
-  DCHECK(!GetTlsFdWatcher().Get());
-  GetTlsFdWatcher().Set(this);
-}
+    : resetter_(&fd_watcher, this, nullptr),
+      io_thread_task_runner_(std::move(io_thread_task_runner)) {}
 
-FileDescriptorWatcher::~FileDescriptorWatcher() {
-  GetTlsFdWatcher().Set(nullptr);
-}
+FileDescriptorWatcher::~FileDescriptorWatcher() = default;
 
 std::unique_ptr<FileDescriptorWatcher::Controller>
 FileDescriptorWatcher::WatchReadable(int fd, const RepeatingClosure& callback) {
@@ -276,7 +268,7 @@
 
 #if DCHECK_IS_ON()
 void FileDescriptorWatcher::AssertAllowed() {
-  DCHECK(GetTlsFdWatcher().Get());
+  DCHECK(fd_watcher);
 }
 #endif
 
diff --git a/base/files/file_descriptor_watcher_posix.h b/base/files/file_descriptor_watcher_posix.h
index 41d4554..a252651 100644
--- a/base/files/file_descriptor_watcher_posix.h
+++ b/base/files/file_descriptor_watcher_posix.h
@@ -7,6 +7,7 @@
 
 #include <memory>
 
+#include "base/auto_reset.h"
 #include "base/base_export.h"
 #include "base/dcheck_is_on.h"
 #include "base/functional/callback.h"
@@ -121,12 +122,10 @@
       const RepeatingClosure& callback);
 
   // Asserts that usage of this API is allowed on this thread.
-  static void AssertAllowed()
 #if DCHECK_IS_ON()
-      ;
+  static void AssertAllowed();
 #else
-  {
-  }
+  static void AssertAllowed() {}
 #endif
 
  private:
@@ -134,6 +133,7 @@
     return io_thread_task_runner_;
   }
 
+  const AutoReset<FileDescriptorWatcher*> resetter_;
   const scoped_refptr<SingleThreadTaskRunner> io_thread_task_runner_;
 };
 
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.cc b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
index 0af72e2..670ac8c 100644
--- a/base/memory/raw_ptr_asan_bound_arg_tracker.cc
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
@@ -12,12 +12,21 @@
 #include <sanitizer/asan_interface.h>
 
 #include "base/memory/raw_ptr_asan_service.h"
-#include "base/no_destructor.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
 
 namespace base {
+
+namespace {
+
+// We use thread-local storage instead of sequence-local storage for consistency
+// with PendingReport in RawPtrAsanService.
+ABSL_CONST_INIT thread_local RawPtrAsanBoundArgTracker::ProtectedArgsVector*
+    protected_args = nullptr;
+
+}  // namespace
+
 // static
 uintptr_t RawPtrAsanBoundArgTracker::GetProtectedArgPtr(uintptr_t ptr) {
-  ProtectedArgsVector* protected_args = CurrentProtectedArgs().Get();
   if (!protected_args) {
     return 0;
   }
@@ -39,14 +48,14 @@
 RawPtrAsanBoundArgTracker::RawPtrAsanBoundArgTracker()
     : enabled_(RawPtrAsanService::GetInstance().IsEnabled()) {
   if (enabled_) {
-    prev_protected_args_ = CurrentProtectedArgs().Get();
-    CurrentProtectedArgs().Set(&protected_args_);
+    prev_protected_args_ = protected_args;
+    protected_args = &protected_args_;
   }
 }
 
 RawPtrAsanBoundArgTracker::~RawPtrAsanBoundArgTracker() {
   if (enabled_) {
-    CurrentProtectedArgs().Set(prev_protected_args_);
+    protected_args = prev_protected_args_;
   }
 }
 
@@ -56,16 +65,6 @@
   }
 }
 
-// static
-ThreadLocalPointer<RawPtrAsanBoundArgTracker::ProtectedArgsVector>&
-RawPtrAsanBoundArgTracker::CurrentProtectedArgs() {
-  // We use thread-local storage instead of sequence-local storage for
-  // consistency with PendingReport in RawPtrAsanService.
-  static NoDestructor<
-      ThreadLocalPointer<RawPtrAsanBoundArgTracker::ProtectedArgsVector>>
-      protected_args;
-  return *protected_args;
-}
 }  // namespace base
 
 #endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.h b/base/memory/raw_ptr_asan_bound_arg_tracker.h
index 1f3dcb0..9e473d1 100644
--- a/base/memory/raw_ptr_asan_bound_arg_tracker.h
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.h
@@ -16,7 +16,6 @@
 #include "base/base_export.h"
 #include "base/containers/stack_container.h"
 #include "base/memory/raw_ptr.h"
-#include "base/threading/thread_local.h"
 
 namespace base {
 namespace internal {
@@ -47,6 +46,9 @@
 // the Bind implementation. This should not be used directly.
 class BASE_EXPORT RawPtrAsanBoundArgTracker {
  public:
+  static constexpr size_t kInlineArgsCount = 3;
+  using ProtectedArgsVector = base::StackVector<uintptr_t, kInlineArgsCount>;
+
   // Check whether ptr is an address inside an allocation pointed to by one of
   // the currently protected callback arguments. If it is, then this function
   // returns the base address of that allocation, otherwise it returns 0.
@@ -103,10 +105,6 @@
     }
   }
 
-  static constexpr size_t kInlineArgsCount = 3;
-  using ProtectedArgsVector = base::StackVector<uintptr_t, kInlineArgsCount>;
-  static ThreadLocalPointer<ProtectedArgsVector>& CurrentProtectedArgs();
-
   // Cache whether or not BRP-ASan is running when we enter the argument
   // tracking scope so that we ensure that our actions on leaving the scope are
   // consistent even if the runtime flags are changed.
diff --git a/base/memory/raw_ptr_asan_service.cc b/base/memory/raw_ptr_asan_service.cc
index 89dee2a..194c9c6 100644
--- a/base/memory/raw_ptr_asan_service.cc
+++ b/base/memory/raw_ptr_asan_service.cc
@@ -19,17 +19,17 @@
 #include "base/memory/raw_ptr.h"
 #include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
 #include "base/memory/raw_ptr_asan_hooks.h"
-#include "base/no_destructor.h"
 #include "base/process/process.h"
 #include "base/strings/stringprintf.h"
 #include "base/task/thread_pool/thread_group.h"
-#include "base/threading/thread_local.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
 
 namespace base {
 
 RawPtrAsanService RawPtrAsanService::instance_;
 
 namespace {
+
 // https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_mapping.h#L154
 constexpr size_t kShadowScale = 3;
 // https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_allocator.cpp#L143
@@ -38,6 +38,13 @@
 constexpr uint8_t kAsanHeapLeftRedzoneMagic = 0xfa;
 // https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_internal.h#L145
 constexpr uint8_t kAsanUserPoisonedMemoryMagic = 0xf7;
+
+// Intentionally use thread-local-storage here. Making this sequence-local
+// doesn't prevent sharing of PendingReport contents between unrelated tasks, so
+// we keep this at a lower-level and avoid introducing additional assumptions
+// about Chrome's sequence model.
+ABSL_CONST_INIT thread_local RawPtrAsanService::PendingReport pending_report;
+
 }  // namespace
 
 // Mark the first eight bytes of every allocation's header as "user poisoned".
@@ -112,8 +119,8 @@
   __asan_locate_address(const_cast<void*>(ptr), nullptr, 0, &region_base,
                         &region_size);
 
-  GetPendingReport() = {type, reinterpret_cast<uintptr_t>(region_base),
-                        region_size};
+  pending_report = {type, reinterpret_cast<uintptr_t>(region_base),
+                    region_size};
 }
 
 namespace {
@@ -157,7 +164,6 @@
     const char* protection_details;
   } crash_info;
 
-  auto& pending_report = GetPendingReport();
   uintptr_t ptr = reinterpret_cast<uintptr_t>(__asan_get_report_address());
   uintptr_t bound_arg_ptr = RawPtrAsanBoundArgTracker::GetProtectedArgPtr(ptr);
   if (pending_report.allocation_base <= ptr &&
@@ -355,21 +361,6 @@
   base::ImmediateCrash();
 }
 
-// static
-RawPtrAsanService::PendingReport& RawPtrAsanService::GetPendingReport() {
-  // Intentionally use thread-local-storage here. Making this sequence-local
-  // doesn't prevent sharing of PendingReport contents between unrelated
-  // tasks, so we keep this at a lower-level and avoid introducing additional
-  // assumptions about Chrome's sequence model.
-  static NoDestructor<ThreadLocalOwnedPointer<PendingReport>> pending_report;
-  PendingReport* raw_pending_report = pending_report->Get();
-  if (UNLIKELY(!raw_pending_report)) {
-    auto new_pending_report = std::make_unique<PendingReport>();
-    raw_pending_report = new_pending_report.get();
-    pending_report->Set(std::move(new_pending_report));
-  }
-  return *raw_pending_report;
-}
-
 }  // namespace base
+
 #endif  // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_asan_service.h b/base/memory/raw_ptr_asan_service.h
index 14d725c..db8cd4f 100644
--- a/base/memory/raw_ptr_asan_service.h
+++ b/base/memory/raw_ptr_asan_service.h
@@ -32,6 +32,12 @@
     kInstantiation,
   };
 
+  struct PendingReport {
+    ReportType type = ReportType::kDereference;
+    uintptr_t allocation_base = 0;
+    size_t allocation_size = 0;
+  };
+
   void Configure(EnableDereferenceCheck,
                  EnableExtractionCheck,
                  EnableInstantiationCheck);
@@ -71,14 +77,6 @@
     kEnabled,
   };
 
-  struct PendingReport {
-    ReportType type;
-    uintptr_t allocation_base;
-    size_t allocation_size;
-  };
-
-  static PendingReport& GetPendingReport();
-
   uint8_t* GetShadow(void* ptr) const;
 
   static void MallocHook(const volatile void*, size_t);
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index 75d1844..120ffb2 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -13,6 +13,7 @@
 #include <stdint.h>
 
 #include <initializer_list>
+#include <memory>
 #include <sstream>
 #include <string>
 #include <type_traits>
@@ -25,6 +26,7 @@
 #include "base/cxx20_to_address.h"
 #include "base/strings/string_piece.h"  // For implicit conversions.
 #include "base/strings/string_util_internal.h"
+#include "base/template_util.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -114,11 +116,90 @@
   return MakeBasicStringPiece<wchar_t>(begin, end);
 }
 
-// Convert a type with defined `operator<<` into a string.
-template <typename... Streamable>
-std::string StreamableToString(const Streamable&... values) {
+// Convert a type with defined `operator<<` or `.ToString()` method into a
+// string.
+
+// I/O manipulators are function pointers, but should be sent directly to the
+// `ostream` instead of being cast to `const void*` like other function
+// pointers.
+template <typename T, typename = void>
+constexpr bool IsIomanip = false;
+template <typename T>
+constexpr bool
+    IsIomanip<T&(T&), std::enable_if_t<std::is_base_of_v<std::ios_base, T>>> =
+        true;
+
+// Function pointers implicitly convert to `bool`, so use this to avoid printing
+// function pointers as 1 or 0.
+template <typename T, typename = void>
+constexpr bool WillBeIncorrectlyStreamedAsBool = false;
+template <typename T>
+constexpr bool WillBeIncorrectlyStreamedAsBool<
+    T,
+    std::enable_if_t<std::is_function_v<typename std::remove_pointer_t<T>> &&
+                     !IsIomanip<typename std::remove_pointer_t<T>>>> = true;
+
+// Fallback case when there is no better representation.
+template <typename T, typename = void>
+struct ToStringHelper {
+  static void Stringify(const T& v, std::ostringstream& ss) {
+    ss << "[" << sizeof(v) << "-byte object at 0x" << std::addressof(v) << "]";
+  }
+};
+
+// Most streamables.
+template <typename T>
+struct ToStringHelper<
+    T,
+    std::enable_if_t<base::internal::SupportsOstreamOperator<const T&>::value &&
+                     !WillBeIncorrectlyStreamedAsBool<T>>> {
+  static void Stringify(const T& v, std::ostringstream& ss) { ss << v; }
+};
+
+// Functions and function pointers.
+template <typename T>
+struct ToStringHelper<
+    T,
+    std::enable_if_t<base::internal::SupportsOstreamOperator<const T&>::value &&
+                     WillBeIncorrectlyStreamedAsBool<T>>> {
+  static void Stringify(const T& v, std::ostringstream& ss) {
+    ToStringHelper<const void*>::Stringify(reinterpret_cast<const void*>(v),
+                                           ss);
+  }
+};
+
+// Non-streamables that have a `ToString` member.
+template <typename T>
+struct ToStringHelper<
+    T,
+    std::enable_if_t<
+        !base::internal::SupportsOstreamOperator<const T&>::value &&
+        base::internal::SupportsToString<const T&>::value>> {
+  static void Stringify(const T& v, std::ostringstream& ss) {
+    // .ToString() may not return a std::string, e.g. blink::WTF::String.
+    ToStringHelper<decltype(v.ToString())>::Stringify(v.ToString(), ss);
+  }
+};
+
+// Non-streamable enums (i.e. scoped enums where no `operator<<` overload was
+// declared).
+template <typename T>
+struct ToStringHelper<T,
+                      std::enable_if_t<!base::internal::SupportsOstreamOperator<
+                                           const T&>::value &&
+                                       std::is_enum_v<T>>> {
+  static void Stringify(const T& v, std::ostringstream& ss) {
+    using UT = typename std::underlying_type_t<T>;
+    ToStringHelper<UT>::Stringify(static_cast<UT>(v), ss);
+  }
+};
+
+template <typename... Ts>
+std::string ToString(const Ts&... values) {
   std::ostringstream ss;
-  (ss << ... << values);
+  (ToStringHelper<typename std::remove_cvref_t<decltype(values)>>::Stringify(
+       values, ss),
+   ...);
   return ss.str();
 }
 
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index b035312..aba0e31 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -24,43 +24,43 @@
 
 namespace base {
 
-static const struct trim_case {
+namespace {
+
+const struct trim_case {
   const wchar_t* input;
   const TrimPositions positions;
   const wchar_t* output;
   const TrimPositions return_value;
 } trim_cases[] = {
-  {L" Google Video ", TRIM_LEADING, L"Google Video ", TRIM_LEADING},
-  {L" Google Video ", TRIM_TRAILING, L" Google Video", TRIM_TRAILING},
-  {L" Google Video ", TRIM_ALL, L"Google Video", TRIM_ALL},
-  {L"Google Video", TRIM_ALL, L"Google Video", TRIM_NONE},
-  {L"", TRIM_ALL, L"", TRIM_NONE},
-  {L"  ", TRIM_LEADING, L"", TRIM_LEADING},
-  {L"  ", TRIM_TRAILING, L"", TRIM_TRAILING},
-  {L"  ", TRIM_ALL, L"", TRIM_ALL},
-  {L"\t\rTest String\n", TRIM_ALL, L"Test String", TRIM_ALL},
-  {L"\x2002Test String\x00A0\x3000", TRIM_ALL, L"Test String", TRIM_ALL},
+    {L" Google Video ", TRIM_LEADING, L"Google Video ", TRIM_LEADING},
+    {L" Google Video ", TRIM_TRAILING, L" Google Video", TRIM_TRAILING},
+    {L" Google Video ", TRIM_ALL, L"Google Video", TRIM_ALL},
+    {L"Google Video", TRIM_ALL, L"Google Video", TRIM_NONE},
+    {L"", TRIM_ALL, L"", TRIM_NONE},
+    {L"  ", TRIM_LEADING, L"", TRIM_LEADING},
+    {L"  ", TRIM_TRAILING, L"", TRIM_TRAILING},
+    {L"  ", TRIM_ALL, L"", TRIM_ALL},
+    {L"\t\rTest String\n", TRIM_ALL, L"Test String", TRIM_ALL},
+    {L"\x2002Test String\x00A0\x3000", TRIM_ALL, L"Test String", TRIM_ALL},
 };
 
-static const struct trim_case_ascii {
+const struct trim_case_ascii {
   const char* input;
   const TrimPositions positions;
   const char* output;
   const TrimPositions return_value;
 } trim_cases_ascii[] = {
-  {" Google Video ", TRIM_LEADING, "Google Video ", TRIM_LEADING},
-  {" Google Video ", TRIM_TRAILING, " Google Video", TRIM_TRAILING},
-  {" Google Video ", TRIM_ALL, "Google Video", TRIM_ALL},
-  {"Google Video", TRIM_ALL, "Google Video", TRIM_NONE},
-  {"", TRIM_ALL, "", TRIM_NONE},
-  {"  ", TRIM_LEADING, "", TRIM_LEADING},
-  {"  ", TRIM_TRAILING, "", TRIM_TRAILING},
-  {"  ", TRIM_ALL, "", TRIM_ALL},
-  {"\t\rTest String\n", TRIM_ALL, "Test String", TRIM_ALL},
+    {" Google Video ", TRIM_LEADING, "Google Video ", TRIM_LEADING},
+    {" Google Video ", TRIM_TRAILING, " Google Video", TRIM_TRAILING},
+    {" Google Video ", TRIM_ALL, "Google Video", TRIM_ALL},
+    {"Google Video", TRIM_ALL, "Google Video", TRIM_NONE},
+    {"", TRIM_ALL, "", TRIM_NONE},
+    {"  ", TRIM_LEADING, "", TRIM_LEADING},
+    {"  ", TRIM_TRAILING, "", TRIM_TRAILING},
+    {"  ", TRIM_ALL, "", TRIM_ALL},
+    {"\t\rTest String\n", TRIM_ALL, "Test String", TRIM_ALL},
 };
 
-namespace {
-
 // Helper used to test TruncateUTF8ToByteSize.
 bool Truncated(const std::string& input,
                const size_t byte_size,
@@ -72,7 +72,7 @@
 
 using TestFunction = bool (*)(StringPiece str);
 
-// Helper used to test IsStringUTF8{,AllowingNoncharacters}.
+// Helper used to test IsStringUTF8[AllowingNoncharacters].
 void TestStructurallyValidUtf8(TestFunction fn) {
   EXPECT_TRUE(fn("abc"));
   EXPECT_TRUE(fn("\xC2\x81"));
@@ -92,7 +92,7 @@
   EXPECT_TRUE(fn(kEmbeddedNull));
 }
 
-// Helper used to test IsStringUTF8{,AllowingNoncharacters}.
+// Helper used to test IsStringUTF8[AllowingNoncharacters].
 void TestStructurallyInvalidUtf8(TestFunction fn) {
   // Invalid encoding of U+1FFFE (0x8F instead of 0x9F)
   EXPECT_FALSE(fn("\xF0\x8F\xBF\xBE"));
@@ -151,7 +151,7 @@
   EXPECT_FALSE(fn(kUtf32LeBom));
 }
 
-// Helper used to test IsStringUTF8{,AllowingNoncharacters}.
+// Helper used to test IsStringUTF8[AllowingNoncharacters].
 void TestNoncharacters(TestFunction fn, bool expected_result) {
   EXPECT_EQ(fn("\xEF\xB7\x90"), expected_result);      // U+FDD0
   EXPECT_EQ(fn("\xEF\xB7\x9F"), expected_result);      // U+FDDF
@@ -192,8 +192,6 @@
   EXPECT_EQ(fn("\xF4\x8F\xBF\xBF"), expected_result);  // U+10FFFF
 }
 
-}  // namespace
-
 TEST(StringUtilTest, TruncateUTF8ToByteSize) {
   std::string output;
 
@@ -1354,6 +1352,7 @@
 }
 
 enum class StreamableTestEnum { kGreeting, kLocation };
+enum class NonStreamableTestEnum { kGreeting = 0, kLocation };
 
 std::ostream& operator<<(std::ostream& os, const StreamableTestEnum& value) {
   switch (value) {
@@ -1364,15 +1363,66 @@
   }
 }
 
-TEST(StringUtilTest, StreamableToString) {
-  EXPECT_EQ(StreamableToString("foo"), "foo");
-  EXPECT_EQ(StreamableToString(123), "123");
-  EXPECT_EQ(StreamableToString(StreamableTestEnum::kGreeting), "hello");
-  EXPECT_EQ(StreamableToString(StreamableTestEnum::kGreeting, " ",
-                               StreamableTestEnum::kLocation),
+class HasToString {
+ public:
+  std::string ToString() const { return "yay!"; }
+};
+
+class UnusualToString {
+ public:
+  HasToString ToString() const { return HasToString(); }
+};
+
+void Func() {}
+
+class NotStringifiable {};
+
+class OverloadsAddressOp {
+ public:
+  OverloadsAddressOp* operator&() { return nullptr; }
+  const OverloadsAddressOp* operator&() const { return nullptr; }
+};
+
+TEST(StringUtilTest, ToString) {
+  // Types with built-in <<.
+  EXPECT_EQ(ToString("foo"), "foo");
+  EXPECT_EQ(ToString(123), "123");
+
+  // Type with user-defined <<.
+  EXPECT_EQ(ToString(StreamableTestEnum::kGreeting), "hello");
+  EXPECT_EQ(ToString(StreamableTestEnum::kGreeting, " ",
+                     StreamableTestEnum::kLocation),
             "hello world");
-  EXPECT_EQ(StreamableToString("42 in hex is ", std::hex, 42),
-            "42 in hex is 2a");
+
+  // Type with user-defined ToString().
+  EXPECT_EQ(ToString(HasToString()), "yay!");
+
+  // Types with a ToString() that does not directly return a std::string should
+  // still work.
+  EXPECT_EQ(ToString(UnusualToString()), "yay!");
+
+  // Scoped enums without a defined << should print as their underlying type.
+  EXPECT_EQ(ToString(NonStreamableTestEnum::kLocation), "1");
+
+  // I/O manipulators should have their expected effect, not be printed as
+  // function pointers.
+  EXPECT_EQ(ToString("42 in hex is ", std::hex, 42), "42 in hex is 2a");
+
+  // We don't care about the actual address, but a function pointer should not
+  // be implicitly converted to bool.
+  EXPECT_NE(ToString(&Func), ToString(true));
+
+  // Functions should be treated like function pointers.
+  EXPECT_EQ(ToString(Func), ToString(&Func));
+
+  // Non-stringifiable types should be printed using a fallback.
+  EXPECT_NE(ToString(NotStringifiable()).find("-byte object at 0x"),
+            std::string::npos);
+
+  // Non-stringifiable types which overload operator& should print their real
+  // address.
+  EXPECT_NE(ToString(OverloadsAddressOp()),
+            ToString(static_cast<OverloadsAddressOp*>(nullptr)));
 }
 
 TEST(StringUtilTest, RemoveChars) {
@@ -1633,4 +1683,6 @@
   EXPECT_EQ(4u, live.size());
 }
 
+}  // namespace
+
 }  // namespace base
diff --git a/base/test/gtest_util.cc b/base/test/gtest_util.cc
index 7b4a39a..ea7d1ea 100644
--- a/base/test/gtest_util.cc
+++ b/base/test/gtest_util.cc
@@ -11,6 +11,7 @@
 #include "base/files/file_path.h"
 #include "base/json/json_file_value_serializer.h"
 #include "base/strings/string_util.h"
+#include "base/test/values_test_util.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -66,8 +67,7 @@
     storage.Append(std::move(test_info));
   }
 
-  JSONFileValueSerializer serializer(path);
-  return serializer.Serialize(storage);
+  return base::test::WriteJsonFile(path, storage).has_value();
 }
 
 bool ReadTestNamesFromFile(const FilePath& path,
diff --git a/base/test/memory/dangling_ptr_instrumentation.cc b/base/test/memory/dangling_ptr_instrumentation.cc
new file mode 100644
index 0000000..0a4a789
--- /dev/null
+++ b/base/test/memory/dangling_ptr_instrumentation.cc
@@ -0,0 +1,97 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "base/test/memory/dangling_ptr_instrumentation.h"
+
+#include <cstdint>
+
+#include "base/allocator/partition_alloc_features.h"
+#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
+#include "base/check_op.h"
+#include "base/feature_list.h"
+#include "base/memory/raw_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::test {
+
+// static
+base::expected<DanglingPtrInstrumentation, base::StringPiece>
+DanglingPtrInstrumentation::Create() {
+  if (!FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr)) {
+    return base::unexpected(
+        "DanglingPtrInstrumentation requires the feature flag "
+        "'PartitionAllocBackupRefPtr' to be on.");
+  }
+  // Note: We don't need to enable the `PartitionAllocDanglingPtr` feature,
+  // because this does provide an alternative "implementation", by incrementing
+  // the two counters.
+
+#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+  return base::unexpected(
+      "DanglingPtrInstrumentation requires the binary flag "
+      "'use_partition_alloc_as_malloc' to be on.");
+#elif !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
+  return base::unexpected(
+      "DanglingPtrInstrumentation requires the binary flag "
+      "'enable_dangling_raw_ptr_checks' to be on.");
+#elif BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
+  return base::unexpected(
+      "DanglingPtrInstrumentation requires the binary flag "
+      "'enable_dangling_raw_ptr_perf_experiment' to be off.");
+#else
+  return DanglingPtrInstrumentation();
+#endif
+}
+
+DanglingPtrInstrumentation::DanglingPtrInstrumentation() {
+  Register();
+}
+
+DanglingPtrInstrumentation::~DanglingPtrInstrumentation() {
+  Unregister();
+}
+
+DanglingPtrInstrumentation::DanglingPtrInstrumentation(
+    DanglingPtrInstrumentation&& old) {
+  operator=(std::move(old));
+}
+
+DanglingPtrInstrumentation& DanglingPtrInstrumentation::operator=(
+    DanglingPtrInstrumentation&& old) {
+  old.Unregister();
+  Register();
+  return *this;
+}
+
+void DanglingPtrInstrumentation::Register() {
+  CHECK_EQ(g_observer, nullptr);
+  g_observer = this;
+  old_detected_fn_ = partition_alloc::GetDanglingRawPtrDetectedFn();
+  old_dereferenced_fn_ = partition_alloc::GetDanglingRawPtrReleasedFn();
+  partition_alloc::SetDanglingRawPtrDetectedFn(IncreaseCountDetected);
+  partition_alloc::SetDanglingRawPtrReleasedFn(IncreaseCountReleased);
+}
+
+void DanglingPtrInstrumentation::Unregister() {
+  if (g_observer != this) {
+    return;
+  }
+  g_observer = nullptr;
+  partition_alloc::SetDanglingRawPtrDetectedFn(old_detected_fn_);
+  partition_alloc::SetDanglingRawPtrReleasedFn(old_dereferenced_fn_);
+}
+
+raw_ptr<DanglingPtrInstrumentation> DanglingPtrInstrumentation::g_observer =
+    nullptr;
+
+// static
+void DanglingPtrInstrumentation::IncreaseCountDetected(std::uintptr_t) {
+  g_observer->dangling_ptr_detected_++;
+}
+
+// static
+void DanglingPtrInstrumentation::IncreaseCountReleased(std::uintptr_t) {
+  g_observer->dangling_ptr_released_++;
+}
+
+}  // namespace base::test
diff --git a/base/test/memory/dangling_ptr_instrumentation.h b/base/test/memory/dangling_ptr_instrumentation.h
new file mode 100644
index 0000000..37fc563
--- /dev/null
+++ b/base/test/memory/dangling_ptr_instrumentation.h
@@ -0,0 +1,73 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MEMORY_DANGLING_PTR_INSTRUMENTATION_H_
+#define BASE_TEST_MEMORY_DANGLING_PTR_INSTRUMENTATION_H_
+
+#include <cstdint>
+
+#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
+#include "base/memory/raw_ptr.h"
+#include "base/strings/string_piece.h"
+#include "base/types/expected.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::test {
+
+// It is difficult to configure malloc as partition_alloc in death test and
+// enable BackupRefPtr. This can be used as an alternative. This replaces a
+// crash by incrementing a set of counters.
+//
+// Usage:
+//
+// ```cpp
+// TEST(DanglingTest, Basic) {
+//   auto instrumentation = test::DanglingPtrInstrumentation::Create();
+//   if (!instrumentation.has_value()) {
+//     GTEST_SKIP() << instrumentation.error();
+//   }
+//
+//   [...]
+//   EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+//   EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+// }
+// ```
+class DanglingPtrInstrumentation {
+ public:
+  // Returns the DanglingPtrInstrumentation or a reason why it can't be used,
+  // in which case the test should be skipped.
+  //
+  // This function should typically be called from the `testing::Test::SetUp()`
+  // override so that it can skip the test with `GTEST_SKIP()` on failure.
+  static base::expected<DanglingPtrInstrumentation, base::StringPiece> Create();
+
+  ~DanglingPtrInstrumentation();
+  DanglingPtrInstrumentation(const DanglingPtrInstrumentation&) = delete;
+  DanglingPtrInstrumentation(DanglingPtrInstrumentation&&);
+  DanglingPtrInstrumentation& operator=(const DanglingPtrInstrumentation&) =
+      delete;
+  DanglingPtrInstrumentation& operator=(DanglingPtrInstrumentation&&);
+
+  size_t dangling_ptr_detected() { return dangling_ptr_detected_; }
+  size_t dangling_ptr_released() { return dangling_ptr_released_; }
+
+ private:
+  static void IncreaseCountDetected(std::uintptr_t);
+  static void IncreaseCountReleased(std::uintptr_t);
+  static raw_ptr<DanglingPtrInstrumentation> g_observer;
+
+  DanglingPtrInstrumentation();
+
+  void Register();
+  void Unregister();
+
+  size_t dangling_ptr_detected_ = 0;
+  size_t dangling_ptr_released_ = 0;
+  partition_alloc::DanglingRawPtrDetectedFn* old_detected_fn_ = nullptr;
+  partition_alloc::DanglingRawPtrReleasedFn* old_dereferenced_fn_ = nullptr;
+};
+
+}  // namespace base::test
+
+#endif  // BASE_TEST_MEMORY_DANGLING_PTR_INSTRUMENTATION_H_
diff --git a/base/test/test_suite.cc b/base/test/test_suite.cc
index 94c7234..ff9b2d9 100644
--- a/base/test/test_suite.cc
+++ b/base/test/test_suite.cc
@@ -177,12 +177,10 @@
     *CommandLine::ForCurrentProcess() = new_command_line;
 
     // TODO(https://crbug.com/1400059): Enable dangling pointer detector.
-    // TODO(https://crbug.com/1400058): Enable BackupRefPtr in unittests on
-    // Windows and Android too.
     // TODO(https://crbug.com/1413674): Enable PartitionAlloc in unittests with
     // ASAN.
-#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_WIN) && \
-    !BUILDFLAG(IS_ANDROID) && !defined(ADDRESS_SANITIZER)
+#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_ANDROID) && \
+    !defined(ADDRESS_SANITIZER)
     allocator::PartitionAllocSupport::Get()->ReconfigureAfterFeatureListInit(
         "", /*configure_dangling_pointer_detector=*/false);
 #endif
@@ -597,11 +595,11 @@
 #endif
 
   // TODO(https://crbug.com/1400058): Enable BackupRefPtr in unittests on
-  // Windows and Android too. Same for ASAN.
+  // Android too. Same for ASAN.
   // TODO(https://crbug.com/1413674): Enable PartitionAlloc in unittests with
   // ASAN.
-#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_WIN) && \
-    !BUILDFLAG(IS_ANDROID) && !defined(ADDRESS_SANITIZER)
+#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_ANDROID) && \
+    !defined(ADDRESS_SANITIZER)
   allocator::PartitionAllocSupport::Get()->ReconfigureForTests();
 #endif  // BUILDFLAG(IS_WIN)
 
diff --git a/base/test/values_test_util.cc b/base/test/values_test_util.cc
index 5dc14e4..d3a61da 100644
--- a/base/test/values_test_util.cc
+++ b/base/test/values_test_util.cc
@@ -7,6 +7,7 @@
 #include <ostream>
 #include <utility>
 
+#include "base/files/file_util.h"
 #include "base/json/json_reader.h"
 #include "base/json/json_writer.h"
 #include "base/memory/ptr_util.h"
@@ -276,5 +277,17 @@
   return result.has_value() ? std::move(*result).TakeList() : Value::List();
 }
 
+expected<void, WriteJsonError> WriteJsonFile(const FilePath& json_file_path,
+                                             ValueView root) {
+  std::string json;
+  if (!JSONWriter::Write(root, &json)) {
+    return unexpected(WriteJsonError::kGenerateJsonFailure);
+  }
+  if (!WriteFile(json_file_path, json)) {
+    return unexpected(WriteJsonError::kWriteFileFailure);
+  }
+  return {};
+}
+
 }  // namespace test
 }  // namespace base
diff --git a/base/test/values_test_util.h b/base/test/values_test_util.h
index 4538724..c7cf725 100644
--- a/base/test/values_test_util.h
+++ b/base/test/values_test_util.h
@@ -9,7 +9,9 @@
 #include <memory>
 #include <string>
 
+#include "base/files/file_path.h"
 #include "base/strings/string_piece.h"
+#include "base/types/expected.h"
 #include "base/values.h"
 #include "testing/gmock/include/gmock/gmock-matchers.h"
 
@@ -106,6 +108,21 @@
 Value::Dict ParseJsonDict(StringPiece json);
 Value::List ParseJsonList(StringPiece json);
 
+// An enumaration with the possible types of errors when calling
+// `WriteJsonFile`.
+enum class WriteJsonError {
+  // Failed to generate a json string with the value provided.
+  kGenerateJsonFailure,
+
+  // Failed to write the json string into a file.
+  kWriteFileFailure,
+};
+
+// Serialises `root` as a json string to a file. Returns a empty expected when
+// successful. Otherwise returns an error.
+expected<void, WriteJsonError> WriteJsonFile(const FilePath& json_file_path,
+                                             ValueView root);
+
 }  // namespace test
 }  // namespace base
 
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index a4e34ee..5e08929 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -12,6 +12,7 @@
 #include "base/test/scoped_feature_list.h"
 #include "base/threading/thread.h"
 #include "base/threading/threading_features.h"
+#include "build/blink_buildflags.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -539,7 +540,9 @@
 
 TEST(PlatformThreadTest, GetDefaultThreadStackSize) {
   size_t stack_size = PlatformThread::GetDefaultThreadStackSize();
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_IOS) || BUILDFLAG(IS_FUCHSIA) ||        \
+#if BUILDFLAG(IS_IOS) && BUILDFLAG(USE_BLINK)
+  EXPECT_EQ(1024u * 1024u, stack_size);
+#elif BUILDFLAG(IS_WIN) || BUILDFLAG(IS_IOS) || BUILDFLAG(IS_FUCHSIA) ||      \
     ((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(__GLIBC__) && \
      !defined(THREAD_SANITIZER)) ||                                           \
     (BUILDFLAG(IS_ANDROID) && !defined(ADDRESS_SANITIZER))
diff --git a/base/trace_event/cfi_backtrace_android.cc b/base/trace_event/cfi_backtrace_android.cc
index 7c4254f..7118194 100644
--- a/base/trace_event/cfi_backtrace_android.cc
+++ b/base/trace_event/cfi_backtrace_android.cc
@@ -10,6 +10,7 @@
 #include "base/android/apk_assets.h"
 #include "base/android/library_loader/anchor_functions.h"
 #include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
 
 #if !defined(ARCH_CPU_ARMEL)
 #error This file should not be built for this architecture.
@@ -119,6 +120,8 @@
     sizeof(CFIUnwindDataRow) == 4,
     "The CFIUnwindDataRow struct must be exactly 4 bytes for searching.");
 
+ABSL_CONST_INIT thread_local CFIBacktraceAndroid::CFICache cfi_cache;
+
 }  // namespace
 
 // static
@@ -142,32 +145,28 @@
   return base::android::kEndOfText;
 }
 
-CFIBacktraceAndroid::CFIBacktraceAndroid()
-    : thread_local_cfi_cache_(
-          [](void* ptr) { delete static_cast<CFICache*>(ptr); }) {
-  Initialize();
-}
-
-CFIBacktraceAndroid::~CFIBacktraceAndroid() {}
-
-void CFIBacktraceAndroid::Initialize() {
+CFIBacktraceAndroid::CFIBacktraceAndroid() {
   // This file name is defined by extract_unwind_tables.gni.
   static constexpr char kCfiFileName[] = "assets/unwind_cfi_32";
   static constexpr char kSplitName[] = "stack_unwinder";
 
   MemoryMappedFile::Region cfi_region;
   int fd = base::android::OpenApkAsset(kCfiFileName, kSplitName, &cfi_region);
-  if (fd < 0)
+  if (fd < 0) {
     return;
+  }
   cfi_mmap_ = std::make_unique<MemoryMappedFile>();
   // The CFI region starts at |cfi_region.offset|.
-  if (!cfi_mmap_->Initialize(base::File(fd), cfi_region))
+  if (!cfi_mmap_->Initialize(base::File(fd), cfi_region)) {
     return;
+  }
 
   ParseCFITables();
   can_unwind_stack_frames_ = true;
 }
 
+CFIBacktraceAndroid::~CFIBacktraceAndroid() = default;
+
 void CFIBacktraceAndroid::ParseCFITables() {
   // The first 4 bytes in the file is the number of entries in UNW_INDEX table.
   size_t unw_index_size = 0;
@@ -243,19 +242,15 @@
   return depth;
 }
 
-void CFIBacktraceAndroid::AllocateCacheForCurrentThread() {
-  GetThreadLocalCFICache();
-}
-
 bool CFIBacktraceAndroid::FindCFIRowForPC(uintptr_t func_addr,
                                           CFIBacktraceAndroid::CFIRow* cfi) {
   if (!can_unwind_stack_frames())
     return false;
 
-  auto* cache = GetThreadLocalCFICache();
   *cfi = {0};
-  if (cache->Find(func_addr, cfi))
+  if (cfi_cache.Find(func_addr, cfi)) {
     return true;
+  }
 
   // Consider each column of UNW_INDEX table as arrays of uintptr_t (function
   // addresses) and uint16_t (indices). Define start and end iterator on the
@@ -326,19 +321,10 @@
   DCHECK(cfi->ra_offset);
 
   // safe to update since the cache is thread local.
-  cache->Add(func_addr, *cfi);
+  cfi_cache.Add(func_addr, *cfi);
   return true;
 }
 
-CFIBacktraceAndroid::CFICache* CFIBacktraceAndroid::GetThreadLocalCFICache() {
-  auto* cache = static_cast<CFICache*>(thread_local_cfi_cache_.Get());
-  if (!cache) {
-    cache = new CFICache();
-    thread_local_cfi_cache_.Set(cache);
-  }
-  return cache;
-}
-
 void CFIBacktraceAndroid::CFICache::Add(uintptr_t address, CFIRow cfi) {
   cache_[address % kLimit] = {address, cfi};
 }
diff --git a/base/trace_event/cfi_backtrace_android.h b/base/trace_event/cfi_backtrace_android.h
index b07fcdb5..06e5605 100644
--- a/base/trace_event/cfi_backtrace_android.h
+++ b/base/trace_event/cfi_backtrace_android.h
@@ -15,7 +15,6 @@
 #include "base/files/memory_mapped_file.h"
 #include "base/gtest_prod_util.h"
 #include "base/memory/raw_ptr.h"
-#include "base/threading/thread_local_storage.h"
 
 namespace base {
 namespace trace_event {
@@ -32,49 +31,6 @@
 // data.
 class BASE_EXPORT CFIBacktraceAndroid {
  public:
-  // Creates and initializes by memory mapping the unwind tables from apk assets
-  // on first call.
-  static CFIBacktraceAndroid* GetInitializedInstance();
-
-  // Returns true if the given program counter |pc| is mapped in chrome library.
-  static bool is_chrome_address(uintptr_t pc);
-
-  // Returns the start and end address of the current library.
-  static uintptr_t executable_start_addr();
-  static uintptr_t executable_end_addr();
-
-  // Returns true if stack unwinding is possible using CFI unwind tables in apk.
-  // There is no need to check this before each unwind call. Will always return
-  // the same value based on CFI tables being present in the binary.
-  bool can_unwind_stack_frames() const { return can_unwind_stack_frames_; }
-
-  // Returns the program counters by unwinding stack in the current thread in
-  // order of latest call frame first. Unwinding works only if
-  // can_unwind_stack_frames() returns true. This function allocates memory from
-  // heap for cache on the first call of the calling thread, unless
-  // AllocateCacheForCurrentThread() is called from the thread. For each stack
-  // frame, this method searches through the unwind table mapped in memory to
-  // find the unwind information for function and walks the stack to find all
-  // the return address. This only works until the last function call from the
-  // chrome.so. We do not have unwind information to unwind beyond any frame
-  // outside of chrome.so. Calls to Unwind() are thread safe and lock free, once
-  // Initialize() returns success.
-  size_t Unwind(const void** out_trace, size_t max_depth);
-
-  // Same as above function, but starts from a given program counter |pc|,
-  // stack pointer |sp| and link register |lr|. This can be from current thread
-  // or any other thread. But the caller must make sure that the thread's stack
-  // segment is not racy to read.
-  size_t Unwind(uintptr_t pc,
-                uintptr_t sp,
-                uintptr_t lr,
-                const void** out_trace,
-                size_t max_depth);
-
-  // Allocates memory for CFI cache for the current thread so that Unwind()
-  // calls are safe for signal handlers.
-  void AllocateCacheForCurrentThread();
-
   // The CFI information that correspond to an instruction.
   struct CFIRow {
     bool operator==(const CFIBacktraceAndroid::CFIRow& o) const {
@@ -90,15 +46,6 @@
     uint16_t ra_offset = 0;
   };
 
-  // Finds the CFI row for the given |func_addr| in terms of offset from
-  // the start of the current binary. Concurrent calls are thread safe.
-  bool FindCFIRowForPC(uintptr_t func_addr, CFIRow* out);
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
-  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestFindCFIRow);
-  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestUnwinding);
-
   // A simple cache that stores entries in table using prime modulo hashing.
   // This cache with 500 entries already gives us 95% hit rate, and fits in a
   // single system page (usually 4KiB). Using a thread local cache for each
@@ -130,8 +77,50 @@
   static_assert(sizeof(CFIBacktraceAndroid::CFICache) < 4096,
                 "The cache does not fit in a single page.");
 
-  CFIBacktraceAndroid();
-  ~CFIBacktraceAndroid();
+  // Creates and initializes by memory mapping the unwind tables from apk assets
+  // on first call.
+  static CFIBacktraceAndroid* GetInitializedInstance();
+
+  // Returns true if the given program counter |pc| is mapped in chrome library.
+  static bool is_chrome_address(uintptr_t pc);
+
+  // Returns the start and end address of the current library.
+  static uintptr_t executable_start_addr();
+  static uintptr_t executable_end_addr();
+
+  // Returns true if stack unwinding is possible using CFI unwind tables in apk.
+  // There is no need to check this before each unwind call. Will always return
+  // the same value based on CFI tables being present in the binary.
+  bool can_unwind_stack_frames() const { return can_unwind_stack_frames_; }
+
+  // Returns the program counters by unwinding stack in the current thread in
+  // order of latest call frame first. Unwinding works only if
+  // can_unwind_stack_frames() returns true. For each stack frame, this method
+  // searches through the unwind table mapped in memory to find the unwind
+  // information for function and walks the stack to find all the return
+  // address. This only works until the last function call from the chrome.so.
+  // We do not have unwind information to unwind beyond any frame outside of
+  // chrome.so. Calls to Unwind() are thread safe and lock free.
+  size_t Unwind(const void** out_trace, size_t max_depth);
+
+  // Same as above function, but starts from a given program counter |pc|,
+  // stack pointer |sp| and link register |lr|. This can be from current thread
+  // or any other thread. But the caller must make sure that the thread's stack
+  // segment is not racy to read.
+  size_t Unwind(uintptr_t pc,
+                uintptr_t sp,
+                uintptr_t lr,
+                const void** out_trace,
+                size_t max_depth);
+
+  // Finds the CFI row for the given |func_addr| in terms of offset from
+  // the start of the current binary. Concurrent calls are thread safe.
+  bool FindCFIRowForPC(uintptr_t func_addr, CFIRow* out);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestFindCFIRow);
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestUnwinding);
 
   // Initializes unwind tables using the CFI asset file in the apk if present.
   // Also stores the limits of mapped region of the lib[mono]chrome.so binary,
@@ -141,13 +130,13 @@
   // heap profiling is turned off. But since we keep the memory map is clean,
   // the system can choose to evict the unused pages when needed. This would
   // still reduce the total amount of address space available in process.
-  void Initialize();
+  CFIBacktraceAndroid();
+
+  ~CFIBacktraceAndroid();
 
   // Finds the UNW_INDEX and UNW_DATA tables in from the CFI file memory map.
   void ParseCFITables();
 
-  CFICache* GetThreadLocalCFICache();
-
   // The start address of the memory mapped unwind table asset file. Unique ptr
   // because it is replaced in tests.
   std::unique_ptr<MemoryMappedFile> cfi_mmap_;
@@ -167,8 +156,6 @@
   raw_ptr<const uint16_t, AllowPtrArithmetic> unw_data_start_addr_ = nullptr;
 
   bool can_unwind_stack_frames_ = false;
-
-  ThreadLocalStorage::Slot thread_local_cfi_cache_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 00b7d64..a6887b1 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -10,6 +10,7 @@
 #include <unordered_set>
 #include <utility>
 
+#include "base/auto_reset.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/containers/contains.h"
@@ -98,16 +99,20 @@
 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
 
 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
-#if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
-const int kThreadFlushTimeoutMs = 3000;
-#endif
 
 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
-static bool g_perfetto_initialized_by_tracelog;
+bool g_perfetto_initialized_by_tracelog = false;
+#else
+constexpr TimeDelta kThreadFlushTimeout = Seconds(3);
 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
 
 TraceLog* g_trace_log_for_testing = nullptr;
 
+ABSL_CONST_INIT thread_local TraceLog::ThreadLocalEventBuffer*
+    thread_local_event_buffer = nullptr;
+ABSL_CONST_INIT thread_local bool thread_blocks_message_loop = false;
+ABSL_CONST_INIT thread_local bool thread_is_in_trace_event = false;
+
 ThreadTicks ThreadNow() {
   return ThreadTicks::IsSupported()
              ? base::subtle::ThreadTicksNowIgnoringOverride()
@@ -136,21 +141,6 @@
       &args, TRACE_EVENT_FLAG_NONE);
 }
 
-class AutoThreadLocalBoolean {
- public:
-  explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
-      : thread_local_boolean_(thread_local_boolean) {
-    DCHECK(!thread_local_boolean_->Get());
-    thread_local_boolean_->Set(true);
-  }
-  AutoThreadLocalBoolean(const AutoThreadLocalBoolean&) = delete;
-  AutoThreadLocalBoolean& operator=(const AutoThreadLocalBoolean&) = delete;
-  ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
-
- private:
-  raw_ptr<ThreadLocalBoolean> thread_local_boolean_;
-};
-
 // Use this function instead of TraceEventHandle constructor to keep the
 // overhead of ScopedTracer (trace_event.h) constructor minimum.
 void MakeHandle(uint32_t chunk_seq,
@@ -497,9 +487,11 @@
   void FlushWhileLocked();
 
   void CheckThisIsCurrentBuffer() const {
-    DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+    DCHECK_EQ(thread_local_event_buffer, this);
   }
 
+  const AutoReset<ThreadLocalEventBuffer*> resetter_{&thread_local_event_buffer,
+                                                     this, nullptr};
   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
   // as long as the thread exists.
   raw_ptr<TraceLog> trace_log_;
@@ -532,14 +524,9 @@
   CurrentThread::Get()->RemoveDestructionObserver(this);
   MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
 
-  {
-    AutoLock lock(trace_log_->lock_);
-    FlushWhileLocked();
-
-    auto thread_id = PlatformThread::CurrentId();
-    trace_log_->thread_task_runners_.erase(thread_id);
-  }
-  trace_log_->thread_local_event_buffer_.Set(nullptr);
+  AutoLock lock(trace_log_->lock_);
+  FlushWhileLocked();
+  trace_log_->thread_task_runners_.erase(PlatformThread::CurrentId());
 }
 
 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
@@ -696,20 +683,17 @@
   // - to handle the final flush.
   // For a thread without a message loop or if the message loop may be blocked,
   // the trace events will be added into the main buffer directly.
-  if (thread_blocks_message_loop_.Get() || !CurrentThread::IsSet() ||
+  if (thread_blocks_message_loop || !CurrentThread::IsSet() ||
       !SingleThreadTaskRunner::HasCurrentDefault()) {
     return;
   }
   HEAP_PROFILER_SCOPED_IGNORE;
-  auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
   if (thread_local_event_buffer &&
       !CheckGeneration(thread_local_event_buffer->generation())) {
     delete thread_local_event_buffer;
-    thread_local_event_buffer = nullptr;
   }
   if (!thread_local_event_buffer) {
     thread_local_event_buffer = new ThreadLocalEventBuffer(this);
-    thread_local_event_buffer_.Set(thread_local_event_buffer);
   }
 }
 
@@ -1374,7 +1358,7 @@
         FROM_HERE,
         BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
                  discard_events),
-        Milliseconds(kThreadFlushTimeoutMs));
+        kThreadFlushTimeout);
     return;
   }
 
@@ -1511,7 +1495,7 @@
   }
 
   // This will flush the thread local buffer.
-  delete thread_local_event_buffer_.Get();
+  delete thread_local_event_buffer;
 
   auto on_flush_override = on_flush_override_.load(std::memory_order_relaxed);
   if (on_flush_override) {
@@ -1576,8 +1560,9 @@
   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
-  if (thread_is_in_trace_event_.Get())
+  if (thread_is_in_trace_event) {
     return false;
+  }
 
   // Check and update the current thread name only if the event is for the
   // current thread to avoid locks in most cases.
@@ -1588,9 +1573,9 @@
     // call (if any), but don't bother if the new name is empty. Note this will
     // not detect a thread name change within the same char* buffer address: we
     // favor common case performance over corner case correctness.
-    static auto* current_thread_name = new ThreadLocalPointer<const char>();
-    if (new_name != current_thread_name->Get() && new_name && *new_name) {
-      current_thread_name->Set(new_name);
+    thread_local const char* current_thread_name = nullptr;
+    if (new_name != current_thread_name && new_name && *new_name) {
+      current_thread_name = new_name;
 
       AutoLock thread_info_lock(thread_info_lock_);
 
@@ -1605,8 +1590,9 @@
             existing_name->second, ",", base::KEEP_WHITESPACE,
             base::SPLIT_WANT_NONEMPTY);
         if (!Contains(existing_names, new_name)) {
-          if (!existing_names.empty())
+          if (!existing_names.empty()) {
             existing_name->second.push_back(',');
+          }
           existing_name->second.append(new_name);
         }
       }
@@ -1737,7 +1723,7 @@
   }
   DCHECK(!timestamp.is_null());
 
-  AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+  const AutoReset<bool> resetter(&thread_is_in_trace_event, true, false);
 
   // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
   // avoid collisions.
@@ -1748,12 +1734,12 @@
 
   TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
 
-  ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
+  ThreadLocalEventBuffer* event_buffer = nullptr;
   if (*category_group_enabled & RECORDING_MODE) {
-    // |thread_local_event_buffer_| can be null if the current thread doesn't
+    // |thread_local_event_buffer| can be null if the current thread doesn't
     // have a message loop or the message loop is blocked.
     InitializeThreadLocalEventBufferIfSupported();
-    thread_local_event_buffer = thread_local_event_buffer_.Get();
+    event_buffer = thread_local_event_buffer;
   }
 
   if (*category_group_enabled & RECORDING_MODE) {
@@ -1764,9 +1750,9 @@
           thread_id, offset_event_timestamp, thread_timestamp, phase,
           category_group_enabled, name, scope, id, bind_id, args, flags);
 
-      trace_event_override(
-          &new_trace_event,
-          /*thread_will_flush=*/thread_local_event_buffer != nullptr, &handle);
+      trace_event_override(&new_trace_event,
+                           /*thread_will_flush=*/event_buffer != nullptr,
+                           &handle);
       return handle;
     }
   }
@@ -1779,8 +1765,8 @@
     OptionalAutoLock lock(&lock_);
 
     TraceEvent* trace_event = nullptr;
-    if (thread_local_event_buffer) {
-      trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+    if (event_buffer) {
+      trace_event = event_buffer->AddTraceEvent(&handle);
     } else {
       lock.EnsureAcquired();
       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
@@ -1907,9 +1893,10 @@
   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
-  if (thread_is_in_trace_event_.Get())
+  if (thread_is_in_trace_event) {
     return;
-  AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+  }
+  const AutoReset<bool> resetter(&thread_is_in_trace_event, true);
 
 #if BUILDFLAG(IS_WIN)
   // Generate an ETW event that marks the end of a complete event.
@@ -2056,9 +2043,9 @@
   DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
   DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
 
-  if (thread_local_event_buffer_.Get()) {
+  if (thread_local_event_buffer) {
     TraceEvent* trace_event =
-        thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+        thread_local_event_buffer->GetEventByHandle(handle);
     if (trace_event)
       return trace_event;
   }
@@ -2146,9 +2133,9 @@
 }
 
 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
-  thread_blocks_message_loop_.Set(true);
+  thread_blocks_message_loop = true;
   // This will flush the thread local buffer.
-  delete thread_local_event_buffer_.Get();
+  delete thread_local_event_buffer;
 }
 
 TraceBuffer* TraceLog::CreateTraceBuffer() {
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index a8dfe9b..637125d 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -22,7 +22,6 @@
 #include "base/no_destructor.h"
 #include "base/task/single_thread_task_runner.h"
 #include "base/threading/platform_thread.h"
-#include "base/threading/thread_local.h"
 #include "base/time/time_override.h"
 #include "base/trace_event/category_registry.h"
 #include "base/trace_event/memory_dump_provider.h"
@@ -71,6 +70,8 @@
 #endif  // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
     public MemoryDumpProvider {
  public:
+  class ThreadLocalEventBuffer;
+
   // Argument passed to TraceLog::SetEnabled.
   enum Mode : uint8_t {
     // Enables normal tracing (recording trace events in the trace buffer).
@@ -475,7 +476,6 @@
   InternalTraceOptions GetInternalOptionsFromTraceConfig(
       const TraceConfig& config);
 
-  class ThreadLocalEventBuffer;
   class OptionalAutoLock;
   struct RegisteredAsyncObserver;
 
@@ -602,10 +602,6 @@
 
   TraceConfig trace_config_;
 
-  ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
-  ThreadLocalBoolean thread_blocks_message_loop_;
-  ThreadLocalBoolean thread_is_in_trace_event_;
-
   // Contains task runners for the threads that have had at least one event
   // added into the local event buffer.
   std::unordered_map<PlatformThreadId, scoped_refptr<SingleThreadTaskRunner>>
diff --git a/base/tracing/perfetto_task_runner.cc b/base/tracing/perfetto_task_runner.cc
index 8022be3..9f18e3a 100644
--- a/base/tracing/perfetto_task_runner.cc
+++ b/base/tracing/perfetto_task_runner.cc
@@ -7,6 +7,7 @@
 #include <memory>
 #include <utility>
 
+#include "base/auto_reset.h"
 #include "base/containers/contains.h"
 #include "base/functional/bind.h"
 #include "base/notreached.h"
@@ -15,8 +16,6 @@
 #include "base/task/sequenced_task_runner.h"
 #include "base/task/thread_pool.h"
 #include "base/task/thread_pool/thread_pool_instance.h"
-#include "base/threading/thread_local.h"
-#include "base/threading/thread_local_storage.h"
 #include "base/tracing/tracing_tls.h"
 #include "build/build_config.h"
 
@@ -56,8 +55,8 @@
             // to.
             // TODO(oysteine): Try to see if we can be more selective
             // about this.
-            AutoThreadLocalBoolean thread_is_in_trace_event(
-                GetThreadIsInTraceEventTLS());
+            const AutoReset<bool> resetter(GetThreadIsInTraceEvent(), true,
+                                           false);
             task();
           },
           task),
diff --git a/base/tracing/protos/chrome_track_event.proto b/base/tracing/protos/chrome_track_event.proto
index e9287ab..5a73842 100644
--- a/base/tracing/protos/chrome_track_event.proto
+++ b/base/tracing/protos/chrome_track_event.proto
@@ -288,6 +288,9 @@
   // Number of "worker delay" references active in the RenderProcessHost,
   // recorded when Cleanup() was called.
   optional uint32 worker_ref_count = 4;
+  // Number of "pending reuse" references active in the RenderProcessHost,
+  // recorded when Cleanup() was called.
+  optional uint32 pending_reuse_ref_count = 5;
 }
 
 message ChildProcessLauncherPriority {
diff --git a/base/tracing/tracing_tls.cc b/base/tracing/tracing_tls.cc
index cfbe200..1fd4064 100644
--- a/base/tracing/tracing_tls.cc
+++ b/base/tracing/tracing_tls.cc
@@ -4,15 +4,13 @@
 
 #include "base/tracing/tracing_tls.h"
 
-#include "base/no_destructor.h"
-
 namespace base {
 namespace tracing {
 
 // static
-ThreadLocalBoolean* GetThreadIsInTraceEventTLS() {
-  static base::NoDestructor<base::ThreadLocalBoolean> thread_is_in_trace_event;
-  return thread_is_in_trace_event.get();
+bool* GetThreadIsInTraceEvent() {
+  thread_local bool thread_is_in_trace_event = false;
+  return &thread_is_in_trace_event;
 }
 
 }  // namespace tracing
diff --git a/base/tracing/tracing_tls.h b/base/tracing/tracing_tls.h
index aa87031..c331226 100644
--- a/base/tracing/tracing_tls.h
+++ b/base/tracing/tracing_tls.h
@@ -6,9 +6,6 @@
 #define BASE_TRACING_TRACING_TLS_H_
 
 #include "base/base_export.h"
-#include "base/check.h"
-#include "base/memory/raw_ptr.h"
-#include "base/threading/thread_local.h"
 
 namespace base {
 namespace tracing {
@@ -16,24 +13,7 @@
 // Returns a thread-local flag that records whether the calling thread is
 // running trace event related code. This is used to avoid writing trace events
 // re-entrantly.
-BASE_EXPORT ThreadLocalBoolean* GetThreadIsInTraceEventTLS();
-
-// A scoped class for automatically setting and clearing a thread-local boolean
-// flag.
-class BASE_EXPORT AutoThreadLocalBoolean {
- public:
-  explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
-      : thread_local_boolean_(thread_local_boolean) {
-    DCHECK(!thread_local_boolean_->Get());
-    thread_local_boolean_->Set(true);
-  }
-  ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
-  AutoThreadLocalBoolean(const AutoThreadLocalBoolean&) = delete;
-  AutoThreadLocalBoolean& operator=(const AutoThreadLocalBoolean&) = delete;
-
- private:
-  raw_ptr<base::ThreadLocalBoolean> thread_local_boolean_;
-};
+BASE_EXPORT bool* GetThreadIsInTraceEvent();
 
 }  // namespace tracing
 }  // namespace base
diff --git a/base/types/variant_util.h b/base/types/variant_util.h
new file mode 100644
index 0000000..3a40f46
--- /dev/null
+++ b/base/types/variant_util.h
@@ -0,0 +1,52 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TYPES_VARIANT_UTIL_H_
+#define BASE_TYPES_VARIANT_UTIL_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "base/types/always_false.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+
+namespace base {
+namespace internal {
+
+template <typename Variant, typename T>
+struct VariantIndexOfTypeHelper {
+  static_assert(AlwaysFalse<Variant>, "Variant must be an absl::variant<...>");
+};
+
+template <typename... Ts, typename T>
+struct VariantIndexOfTypeHelper<absl::variant<Ts...>, T> {
+  static constexpr size_t Index() {
+    static_assert(std::is_constructible_v<absl::variant<LiteralType<Ts>...>,
+                                          LiteralType<T>>,
+                  "Variant is not constructible from T");
+    return absl::variant<LiteralType<Ts>...>(LiteralType<T>()).index();
+  }
+
+  // Helper struct; even if `Tag` may not be usable as a literal type, a
+  // `LiteralType<Tag>` will be.
+  template <typename Tag>
+  struct LiteralType {};
+};
+
+}  // namespace internal
+
+// Returns the 0-based index of `T` in `Variant`'s list of alternative types,
+// e.g. given `Variant` == `absl::variant<A, B, C>` and `T` == `B`, returns 1.
+//
+// Note that this helper cannot be used if the list of alternative types
+// contains duplicates.
+template <typename Variant, typename T>
+constexpr size_t VariantIndexOfType() {
+  return internal::VariantIndexOfTypeHelper<Variant, T>::Index();
+}
+
+}  // namespace base
+
+#endif  // BASE_TYPES_VARIANT_UTIL_H_
diff --git a/base/types/variant_util_unittest.cc b/base/types/variant_util_unittest.cc
new file mode 100644
index 0000000..b421203
--- /dev/null
+++ b/base/types/variant_util_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/types/variant_util.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+
+namespace base {
+namespace {
+
+TEST(VariantUtilTest, IndexOfType) {
+  using TestType = absl::variant<bool, int, double>;
+
+  static_assert(VariantIndexOfType<TestType, bool>() == 0);
+  static_assert(VariantIndexOfType<TestType, int>() == 1);
+  static_assert(VariantIndexOfType<TestType, double>() == 2);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/types/variant_util_unittest.nc b/base/types/variant_util_unittest.nc
new file mode 100644
index 0000000..aec037b
--- /dev/null
+++ b/base/types/variant_util_unittest.nc
@@ -0,0 +1,23 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/types/variant_util.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+
+namespace base {
+
+#if defined(NCTEST_DUPLICATE_ALTERNATIVE_TYPES)  // [r"Variant is not constructible from T"]
+
+inline constexpr size_t kValue = VariantIndexOfType<absl::variant<int, int>, int>();
+
+#elif defined(NCTEST_NOT_AN_ALTERNATIVE_TYPE)  // [r"Variant is not constructible from T"]
+
+inline constexpr size_t kValue = VariantIndexOfType<absl::variant<int, int>, bool>();
+
+#endif
+
+}  // namespace base
diff --git a/libchrome_tools/patches/long-term-2100-Use-template_util-remove_cvref_t-pre-C-20.patch b/libchrome_tools/patches/long-term-2100-Use-template_util-remove_cvref_t-pre-C-20.patch
new file mode 100644
index 0000000..f18a686
--- /dev/null
+++ b/libchrome_tools/patches/long-term-2100-Use-template_util-remove_cvref_t-pre-C-20.patch
@@ -0,0 +1,28 @@
+From 14acd9b32c31ea3d3f65f5d5094b673804984a6b Mon Sep 17 00:00:00 2001
+From: Jae Hoon Kim <kimjae@chromium.org>
+Date: Fri, 10 Mar 2023 06:40:26 +0000
+Subject: [PATCH] [PATCH] Use template_util remove_cvref_t pre-C++20
+
+Until libchrome in CrOS is built using C++20.
+
+Change-Id: I0b9035974ccaa6386113089a8b97c0afb3e2fd4f
+---
+ base/strings/string_util.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/base/strings/string_util.h b/base/strings/string_util.h
+index 120ffb20be..bda1cdf18f 100644
+--- a/base/strings/string_util.h
++++ b/base/strings/string_util.h
+@@ -197,7 +197,7 @@ struct ToStringHelper<T,
+ template <typename... Ts>
+ std::string ToString(const Ts&... values) {
+   std::ostringstream ss;
+-  (ToStringHelper<typename std::remove_cvref_t<decltype(values)>>::Stringify(
++  (ToStringHelper<remove_cvref_t<decltype(values)>>::Stringify(
+        values, ss),
+    ...);
+   return ss.str();
+-- 
+2.40.0.rc1.284.g88254d51c5-goog
+
diff --git a/mojo/public/rust/tests/integration/mojom_validation.rs b/mojo/public/rust/tests/integration/mojom_validation.rs
index e43b64b..664daac 100644
--- a/mojo/public/rust/tests/integration/mojom_validation.rs
+++ b/mojo/public/rust/tests/integration/mojom_validation.rs
@@ -866,8 +866,9 @@
 
 impl CastHandle for InterfaceAClient {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> InterfaceAClient {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         InterfaceAClient {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -916,8 +917,9 @@
 
 impl CastHandle for InterfaceAServer {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> InterfaceAServer {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         InterfaceAServer {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -1014,8 +1016,9 @@
 
 impl CastHandle for BoundsCheckTestInterfaceClient {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> BoundsCheckTestInterfaceClient {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         BoundsCheckTestInterfaceClient {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -1067,8 +1070,9 @@
 
 impl CastHandle for BoundsCheckTestInterfaceServer {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> BoundsCheckTestInterfaceServer {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         BoundsCheckTestInterfaceServer {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -1399,8 +1403,9 @@
 
 impl CastHandle for ConformanceTestInterfaceClient {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> ConformanceTestInterfaceClient {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         ConformanceTestInterfaceClient {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -1452,8 +1457,9 @@
 
 impl CastHandle for ConformanceTestInterfaceServer {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> ConformanceTestInterfaceServer {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         ConformanceTestInterfaceServer {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -2953,8 +2959,9 @@
 
 impl CastHandle for IntegrationTestInterfaceClient {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> IntegrationTestInterfaceClient {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         IntegrationTestInterfaceClient {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
@@ -3006,8 +3013,9 @@
 
 impl CastHandle for IntegrationTestInterfaceServer {
     unsafe fn from_untyped(handle: system::UntypedHandle) -> IntegrationTestInterfaceServer {
+        let pipe = unsafe { message_pipe::MessageEndpoint::from_untyped(handle) };
         IntegrationTestInterfaceServer {
-            pipe: message_pipe::MessageEndpoint::from_untyped(handle),
+            pipe,
             version: 0, // Since we have no other information, assume its the base
         }
     }
diff --git a/testing/gmock/include/gmock/gmock-matchers.h b/testing/gmock/include/gmock/gmock-matchers.h
new file mode 100644
index 0000000..6472746
--- /dev/null
+++ b/testing/gmock/include/gmock/gmock-matchers.h
@@ -0,0 +1 @@
+#include <gmock/gmock-matchers.h>