diff --git a/absl/base/macros.h b/absl/base/macros.h
index f9acdc8..ff89944 100644
--- a/absl/base/macros.h
+++ b/absl/base/macros.h
@@ -197,9 +197,9 @@
 // While open-source users do not have access to this service, the macro is
 // provided for compatibility, and so that users receive deprecation warnings.
 #if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \
-    ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) && !defined(SWIG)
+    ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
 #define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]]
-#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && !defined(SWIG)
+#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated)
 #define ABSL_DEPRECATE_AND_INLINE() [[deprecated]]
 #else
 #define ABSL_DEPRECATE_AND_INLINE()
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index 61e816f..34ac288 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -530,6 +530,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
         ":common_policy_traits",
+        ":container_memory",
         "//absl/meta:type_traits",
     ],
 )
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index d8cd7d0..edc4a82 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -583,6 +583,7 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::container_memory
     absl::common_policy_traits
     absl::meta
   PUBLIC
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 32a82ef..131f622 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -117,8 +117,8 @@
   //
   // * Copy assignment operator
   //
-  //  absl::btree_map<int, std::string> map4;
-  //  map4 = map3;
+  //   absl::btree_map<int, std::string> map4;
+  //   map4 = map3;
   //
   // * Move constructor
   //
@@ -555,8 +555,8 @@
   //
   // * Copy assignment operator
   //
-  //  absl::btree_multimap<int, std::string> map4;
-  //  map4 = map3;
+  //   absl::btree_multimap<int, std::string> map4;
+  //   map4 = map3;
   //
   // * Move constructor
   //
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index 16181de..44a39cf 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -119,8 +119,8 @@
   //
   // * Copy assignment operator
   //
-  //  absl::btree_set<std::string> set4;
-  //  set4 = set3;
+  //   absl::btree_set<std::string> set4;
+  //   set4 = set3;
   //
   // * Move constructor
   //
@@ -475,8 +475,8 @@
   //
   // * Copy assignment operator
   //
-  //  absl::btree_multiset<std::string> set4;
-  //  set4 = set3;
+  //   absl::btree_multiset<std::string> set4;
+  //   set4 = set3;
   //
   // * Move constructor
   //
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index bc86ced..5fa5023 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -115,18 +115,18 @@
 //   absl::flat_hash_map<std::string, std::string> ducks =
 //     {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
 //
-//  // Insert a new element into the flat hash map
-//  ducks.insert({"d", "donald"});
+//   // Insert a new element into the flat hash map
+//   ducks.insert({"d", "donald"});
 //
-//  // Force a rehash of the flat hash map
-//  ducks.rehash(0);
+//   // Force a rehash of the flat hash map
+//   ducks.rehash(0);
 //
-//  // Find the element with the key "b"
-//  std::string search_key = "b";
-//  auto result = ducks.find(search_key);
-//  if (result != ducks.end()) {
-//    std::cout << "Result: " << result->second << std::endl;
-//  }
+//   // Find the element with the key "b"
+//   std::string search_key = "b";
+//   auto result = ducks.find(search_key);
+//   if (result != ducks.end()) {
+//     std::cout << "Result: " << result->second << std::endl;
+//   }
 template <class K, class V, class Hash = DefaultHashContainerHash<K>,
           class Eq = DefaultHashContainerEq<K>,
           class Allocator = std::allocator<std::pair<const K, V>>>
@@ -158,9 +158,9 @@
   //
   // * Copy assignment operator
   //
-  //  // Hash functor and Comparator are copied as well
-  //  absl::flat_hash_map<int, std::string> map4;
-  //  map4 = map3;
+  //   // Hash functor and Comparator are copied as well
+  //   absl::flat_hash_map<int, std::string> map4;
+  //   map4 = map3;
   //
   // * Move constructor
   //
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index bf63eb5..bc1ceb1 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -114,16 +114,16 @@
 //   absl::flat_hash_set<std::string> ducks =
 //     {"huey", "dewey", "louie"};
 //
-//  // Insert a new element into the flat hash set
-//  ducks.insert("donald");
+//   // Insert a new element into the flat hash set
+//   ducks.insert("donald");
 //
-//  // Force a rehash of the flat hash set
-//  ducks.rehash(0);
+//   // Force a rehash of the flat hash set
+//   ducks.rehash(0);
 //
-//  // See if "dewey" is present
-//  if (ducks.contains("dewey")) {
-//    std::cout << "We found dewey!" << std::endl;
-//  }
+//   // See if "dewey" is present
+//   if (ducks.contains("dewey")) {
+//     std::cout << "We found dewey!" << std::endl;
+//   }
 template <class T, class Hash = DefaultHashContainerHash<T>,
           class Eq = DefaultHashContainerEq<T>,
           class Allocator = std::allocator<T>>
@@ -154,9 +154,9 @@
   //
   // * Copy assignment operator
   //
-  //  // Hash functor and Comparator are copied as well
-  //  absl::flat_hash_set<std::string> set4;
-  //  set4 = set3;
+  //   // Hash functor and Comparator are copied as well
+  //   absl::flat_hash_set<std::string> set4;
+  //   set4 = set3;
   //
   // * Move constructor
   //
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index e7ac1db..ed7b90b 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -464,6 +464,54 @@
   }
 };
 
+// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
+// thinks that the call to slot_array() in find_or_prepare_insert() is reading
+// uninitialized memory, but slot_array is only called there when the table is
+// non-empty and this memory is initialized when the table is non-empty.
+#if !defined(__clang__) && defined(__GNUC__)
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x)                    \
+  _Pragma("GCC diagnostic push")                                   \
+      _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")  \
+          _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
+  _Pragma("GCC diagnostic pop")
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
+  ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
+#else
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
+#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
+#endif
+
+// Variadic arguments hash function that ignore the rest of the arguments.
+// Useful for usage with policy traits.
+template <class Hash>
+struct HashElement {
+  template <class K, class... Args>
+  size_t operator()(const K& key, Args&&...) const {
+    return h(key);
+  }
+  const Hash& h;
+};
+
+// No arguments function hash function for a specific key.
+template <class Hash, class Key>
+struct HashKey {
+  size_t operator()() const { return HashElement<Hash>{hash}(key); }
+  const Hash& hash;
+  const Key& key;
+};
+
+// Variadic arguments equality function that ignore the rest of the arguments.
+// Useful for usage with policy traits.
+template <class K1, class KeyEqual>
+struct EqualElement {
+  template <class K2, class... Args>
+  bool operator()(const K2& lhs, Args&&...) const {
+    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs));
+  }
+  const K1& rhs;
+  const KeyEqual& eq;
+};
+
 // Type erased function for computing hash of the slot.
 using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
 
@@ -472,7 +520,7 @@
 template <class Fn, class T>
 size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) {
   const auto* f = static_cast<const Fn*>(fn);
-  return (*f)(*static_cast<const T*>(slot));
+  return HashElement<Fn>{*f}(*static_cast<const T*>(slot));
 }
 
 // Type erased function to apply `Fn` to data inside of the `*slot_ptr`.
@@ -481,7 +529,7 @@
 size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) {
   const auto* f = static_cast<const Fn*>(fn);
   const T* slot = *static_cast<const T**>(slot_ptr);
-  return (*f)(*slot);
+  return HashElement<Fn>{*f}(*slot);
 }
 
 }  // namespace container_internal
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index cd6b42f..1d7c910 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -22,6 +22,7 @@
 #include <utility>
 
 #include "absl/container/internal/common_policy_traits.h"
+#include "absl/container/internal/container_memory.h"
 #include "absl/meta/type_traits.h"
 
 namespace absl {
@@ -145,8 +146,6 @@
     return P::value(elem);
   }
 
-  using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
-
   template <class Hash>
   static constexpr HashSlotFn get_hash_slot_fn() {
 // get_hash_slot_fn may return nullptr to signal that non type erased function
@@ -169,15 +168,6 @@
 
  private:
   template <class Hash>
-  struct HashElement {
-    template <class K, class... Args>
-    size_t operator()(const K& key, Args&&...) const {
-      return h(key);
-    }
-    const Hash& h;
-  };
-
-  template <class Hash>
   static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot) {
     return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
                          Policy::element(static_cast<slot_type*>(slot)));
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index cea225e..5238c81 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -19,6 +19,7 @@
 #include <cstddef>
 #include <cstdint>
 #include <cstring>
+#include <utility>
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
@@ -40,20 +41,9 @@
 // Represents a control byte corresponding to a full slot with arbitrary hash.
 constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
 
-// We have space for `growth_info` before a single block of control bytes. A
-// single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find(). In order to ensure
-// that the control bytes are aligned to 16, we have 16 bytes before the control
-// bytes even though growth_info only needs 8.
-alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
-    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
-    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
-    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
-    ZeroCtrlT(),       ZeroCtrlT(),    ZeroCtrlT(),    ZeroCtrlT(),
-    ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
-    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
-    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
-    ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+// A single control byte for default-constructed iterators. We leave it
+// uninitialized because reading this memory is a bug.
+ABSL_DLL ctrl_t kDefaultIterControl;
 
 // We need one full byte followed by a sentinel byte for iterator::operator++ to
 // work. We have a full group after kSentinel to be safe (in case operator++ is
@@ -64,8 +54,6 @@
     ctrl_t::kEmpty, ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty,
     ctrl_t::kEmpty, ctrl_t::kEmpty,    ctrl_t::kEmpty, ctrl_t::kEmpty,
     ctrl_t::kEmpty};
-static_assert(NumControlBytes(SooCapacity()) <= 17,
-              "kSooControl capacity too small");
 
 namespace {
 
@@ -443,6 +431,7 @@
   ctrl_t* ctrl = common.control();
   static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
   if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
+    if (IsSmallCapacity(capacity)) return;
     std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
     std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
                 Group::kWidth);
@@ -526,9 +515,8 @@
 
 }  // namespace
 
-void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
-  ABSL_SWISSTABLE_ASSERT(IsFull(c.control()[index]) &&
-                         "erasing a dangling iterator");
+void EraseMetaOnly(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) {
+  ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");
   c.decrement_size();
   c.infoz().RecordErase();
 
@@ -538,6 +526,8 @@
     return;
   }
 
+  size_t index = static_cast<size_t>(ctrl - c.control());
+
   if (WasNeverFull(c, index)) {
     SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
     c.growth_info().OverwriteFullAsEmpty();
@@ -590,13 +580,14 @@
 
   const auto insert_slot = [&](void* slot) {
     size_t hash = policy.hash_slot(hash_fn, slot);
-    auto target = find_first_non_full(common, hash);
+    FindInfo target =
+        common.is_small() ? FindInfo{0, 0} : find_first_non_full(common, hash);
     SetCtrl(common, target.offset, H2(hash), slot_size);
     policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
                       slot, 1);
     return target.probe_length;
   };
-  if (old_capacity == 1) {
+  if (IsSmallCapacity(old_capacity)) {
     if (common.size() == 1) insert_slot(old_slots);
     return 0;
   }
@@ -610,6 +601,45 @@
   return total_probe_length;
 }
 
+void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,
+                             size_t hash, size_t total_probe_length,
+                             size_t distance_from_desired) {
+  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
+  infoz.RecordStorageChanged(common.size() - 1, common.capacity());
+  infoz.RecordRehash(total_probe_length);
+  infoz.RecordInsert(hash, distance_from_desired);
+  common.set_has_infoz();
+  // TODO(b/413062340): we could potentially store infoz in place of the
+  // control pointer for the capacity 1 case.
+  common.set_infoz(infoz);
+}
+
+// Specialization to avoid passing two 0s from hot function.
+ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(
+    CommonFields& common, HashtablezInfoHandle infoz, size_t hash) {
+  ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,
+                          /*distance_from_desired=*/0);
+}
+
+ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,
+                                                 HashtablezInfoHandle infoz,
+                                                 size_t hash,
+                                                 size_t total_probe_length,
+                                                 size_t distance_from_desired) {
+  ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,
+                          distance_from_desired);
+}
+
+ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,
+                                                 HashtablezInfoHandle infoz,
+                                                 size_t total_probe_length) {
+  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
+  infoz.RecordStorageChanged(common.size(), common.capacity());
+  infoz.RecordRehash(total_probe_length);
+  common.set_has_infoz();
+  common.set_infoz(infoz);
+}
+
 struct BackingArrayPtrs {
   ctrl_t* ctrl;
   void* slots;
@@ -671,11 +701,8 @@
         CapacityToGrowth(new_capacity));
   }
 
-  if (has_infoz) {
-    common.set_has_infoz();
-    infoz.RecordStorageChanged(common.size(), new_capacity);
-    infoz.RecordRehash(total_probe_length);
-    common.set_infoz(infoz);
+  if (ABSL_PREDICT_FALSE(has_infoz)) {
+    ReportResizeToInfoz(common, infoz, total_probe_length);
   }
 }
 
@@ -1223,32 +1250,85 @@
   }
 }
 
+void IncrementSmallSizeNonSoo(CommonFields& common,
+                              const PolicyFunctions& __restrict policy) {
+  ABSL_SWISSTABLE_ASSERT(common.is_small());
+  common.increment_size();
+  SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
+}
+
 void IncrementSmallSize(CommonFields& common,
                         const PolicyFunctions& __restrict policy) {
   ABSL_SWISSTABLE_ASSERT(common.is_small());
   if (policy.soo_enabled) {
     common.set_full_soo();
   } else {
-    common.increment_size();
-    common.growth_info().OverwriteEmptyAsFull();
-    SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
+    IncrementSmallSizeNonSoo(common, policy);
   }
 }
 
-}  // namespace
+std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(
+    CommonFields& common, const PolicyFunctions& __restrict policy,
+    absl::FunctionRef<size_t()> get_hash) {
+  // TODO(b/413062340): Refactor to reuse more code with
+  // GrowSooTableToNextCapacityAndPrepareInsert.
+  ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);
+  ABSL_SWISSTABLE_ASSERT(!common.empty());
+  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
+  constexpr size_t kOldCapacity = 1;
+  constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);
+  ctrl_t* old_ctrl = common.control();
+  void* old_slots = common.slot_array();
 
+  common.set_capacity(kNewCapacity);
+  const size_t slot_size = policy.slot_size;
+  const size_t slot_align = policy.slot_align;
+  void* alloc = policy.get_char_alloc(common);
+  HashtablezInfoHandle infoz = common.infoz();
+  const bool has_infoz = infoz.IsSampled();
+
+  const auto [new_ctrl, new_slots] =
+      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
+  common.set_control</*kGenerateSeed=*/true>(new_ctrl);
+  common.set_slots(new_slots);
+  SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);
+
+  const size_t new_hash = get_hash();
+  h2_t new_h2 = H2(new_hash);
+  size_t orig_hash = policy.hash_slot(policy.hash_fn(common), old_slots);
+  size_t offset = Resize1To3NewOffset(new_hash, common.seed());
+  InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);
+
+  void* old_element_target = NextSlot(new_slots, slot_size);
+  SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);
+  policy.transfer_n(&common, old_element_target, old_slots, 1);
+
+  void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);
+  SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);
+
+  policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,
+                 has_infoz);
+  PrepareInsertCommon(common);
+  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
+  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
+
+  if (ABSL_PREDICT_FALSE(has_infoz)) {
+    ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);
+  }
+  return {new_ctrl + offset, new_element_target_slot};
+}
+
+// Grows to next capacity and prepares insert for the given new_hash.
+// Returns the offset of the new element.
 size_t GrowToNextCapacityAndPrepareInsert(
     CommonFields& common, const PolicyFunctions& __restrict policy,
     size_t new_hash) {
   ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
   const size_t old_capacity = common.capacity();
   ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
+  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));
 
   const size_t new_capacity = NextCapacity(old_capacity);
-  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
-  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
-  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(new_capacity));
-
   ctrl_t* old_ctrl = common.control();
   void* old_slots = common.slot_array();
 
@@ -1270,25 +1350,15 @@
   FindInfo find_info;
   if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
     size_t offset;
-    if (old_capacity == 1) {
-      size_t orig_hash = policy.hash_slot(policy.hash_fn(common), old_slots);
-      offset = Resize1To3NewOffset(new_hash, common.seed());
-      InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset,
-                                          new_ctrl);
-      void* target_slot = SlotAddress(new_slots, offset, slot_size);
-      SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
-    } else {
-      GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
-                                             new_capacity);
-      // We put the new element either at the beginning or at the end of the
-      // table with approximately equal probability.
-      offset = SingleGroupTableH1(new_hash, common.seed()) & 1
-                   ? 0
-                   : new_capacity - 1;
+    GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
+                                           new_capacity);
+    // We put the new element either at the beginning or at the end of the
+    // table with approximately equal probability.
+    offset =
+        SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;
 
-      ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
-      SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
-    }
+    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
+    SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
     find_info = FindInfo{offset, 0};
     // Single group tables have all slots full on resize. So we can transfer
     // all slots without checking the control bytes.
@@ -1310,25 +1380,30 @@
                   common.size());
 
   if (ABSL_PREDICT_FALSE(has_infoz)) {
-    common.set_has_infoz();
-    infoz.RecordStorageChanged(common.size() - 1, new_capacity);
-    infoz.RecordRehash(total_probe_length);
-    infoz.RecordInsert(new_hash, find_info.probe_length);
-    common.set_infoz(infoz);
+    ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,
+                        find_info.probe_length);
   }
   return find_info.offset;
 }
 
-void SmallEmptyNonSooPrepareInsert(CommonFields& common,
-                                   const PolicyFunctions& __restrict policy,
-                                   absl::FunctionRef<size_t()> get_hash) {
+}  // namespace
+
+std::pair<ctrl_t*, void*> SmallNonSooPrepareInsert(
+    CommonFields& common, const PolicyFunctions& __restrict policy,
+    absl::FunctionRef<size_t()> get_hash) {
   ABSL_SWISSTABLE_ASSERT(common.is_small());
   ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
   if (common.capacity() == 1) {
-    IncrementSmallSize(common, policy);
-    return;
+    if (common.empty()) {
+      IncrementSmallSizeNonSoo(common, policy);
+      return {SooControl(), common.slot_array()};
+    } else {
+      return Grow1To3AndPrepareInsert(common, policy, get_hash);
+    }
   }
 
+  // Growing from 0 to 1 capacity.
+  ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);
   constexpr size_t kNewCapacity = 1;
 
   common.set_capacity(kNewCapacity);
@@ -1342,11 +1417,10 @@
   const bool has_infoz = infoz.IsSampled();
   void* alloc = policy.get_char_alloc(common);
 
-  // TODO(b/413062340): don't allocate control bytes for capacity 1 tables. We
-  // don't use the control bytes in this case.
   const auto [new_ctrl, new_slots] =
       AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
-  common.set_control</*kGenerateSeed=*/true>(new_ctrl);
+  // In small tables seed is not needed.
+  common.set_control</*kGenerateSeed=*/false>(new_ctrl);
   common.set_slots(new_slots);
 
   static_assert(NextCapacity(0) == 1);
@@ -1356,14 +1430,10 @@
   // worth it.
   GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(0);
 
-  if (ABSL_PREDICT_TRUE(!has_infoz)) return;
-  // TODO(b/413062340): we could potentially store infoz in place of the control
-  // pointer for the capacity 1 case.
-  common.set_has_infoz();
-  infoz.RecordStorageChanged(/*size=*/0, kNewCapacity);
-  infoz.RecordRehash(/*total_probe_length=*/0);
-  infoz.RecordInsert(get_hash(), /*distance_from_desired=*/0);
-  common.set_infoz(infoz);
+  if (ABSL_PREDICT_FALSE(has_infoz)) {
+    ReportSingleGroupTableGrowthToInfoz(common, infoz, get_hash());
+  }
+  return {SooControl(), new_slots};
 }
 
 namespace {
@@ -1516,6 +1586,17 @@
   common.infoz().RecordReservation(new_size);
 }
 
+// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
+// table to be sampled. SOO tables need to switch from SOO to heap in order to
+// store the infoz. No-op if sampling is disabled or not possible.
+void GrowFullSooTableToNextCapacityForceSampling(
+    CommonFields& common, const PolicyFunctions& __restrict policy) {
+  AssertFullSoo(common, policy);
+  ResizeFullSooTable(
+      common, policy, NextCapacity(SooCapacity()),
+      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
+}
+
 }  // namespace
 
 void* GetRefForEmptyClass(CommonFields& common) {
@@ -1600,20 +1681,14 @@
   common.set_control</*kGenerateSeed=*/false>(new_ctrl);
   common.set_slots(new_slots);
 
-  common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
+  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
+  // have been resized to the next capacity.
+  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
   SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
                                 slot_size);
   return offset;
 }
 
-void GrowFullSooTableToNextCapacityForceSampling(
-    CommonFields& common, const PolicyFunctions& __restrict policy) {
-  AssertFullSoo(common, policy);
-  ResizeFullSooTable(
-      common, policy, NextCapacity(SooCapacity()),
-      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
-}
-
 void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
             size_t n) {
   const size_t cap = common.capacity();
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index f5fdf66..f9c9b0b 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -380,10 +380,7 @@
   return false;
 }
 
-// See definition comment for why this is size 32.
-// TODO(b/413062340): we can probably reduce this to 16 now that it's only used
-// for default-constructed iterators.
-ABSL_DLL extern const ctrl_t kEmptyGroup[32];
+ABSL_DLL extern ctrl_t kDefaultIterControl;
 
 // We use these sentinel capacity values in debug mode to indicate different
 // classes of bugs.
@@ -397,13 +394,9 @@
   kSelfMovedFrom,
 };
 
-// Returns a pointer to a control byte group that can be used by
-// default-constructed iterators.
-inline ctrl_t* EmptyGroup() {
-  // Const must be cast away here; no uses of this function will actually write
-  // to it because it is only used for default-constructed iterators.
-  return const_cast<ctrl_t*>(kEmptyGroup + 16);
-}
+// Returns a pointer to a control byte that can be used by default-constructed
+// iterators. We don't expect this pointer to be dereferenced.
+inline ctrl_t* DefaultIterControl() { return &kDefaultIterControl; }
 
 // For use in SOO iterators.
 // TODO(b/289225379): we could potentially get rid of this by adding an is_soo
@@ -796,7 +789,7 @@
 
 // Returns the number of control bytes including cloned.
 constexpr size_t NumControlBytes(size_t capacity) {
-  return capacity + 1 + NumClonedBytes();
+  return IsSmallCapacity(capacity) ? 0 : capacity + 1 + NumClonedBytes();
 }
 
 // Computes the offset from the start of the backing allocation of control.
@@ -852,23 +845,6 @@
 
 struct HashtableFreeFunctionsAccess;
 
-// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
-// thinks that the call to slot_array() in find_or_prepare_insert() is reading
-// uninitialized memory, but slot_array is only called there when the table is
-// non-empty and this memory is initialized when the table is non-empty.
-#if !defined(__clang__) && defined(__GNUC__)
-#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x)                    \
-  _Pragma("GCC diagnostic push")                                   \
-      _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")  \
-          _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
-  _Pragma("GCC diagnostic pop")
-#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
-  ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
-#else
-#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
-#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
-#endif
-
 // This allows us to work around an uninitialized memory warning when
 // constructing begin() iterators in empty hashtables.
 template <typename T>
@@ -1269,7 +1245,7 @@
   if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
     ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
   }
-  if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
+  if (ABSL_PREDICT_FALSE(ctrl == DefaultIterControl())) {
     ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
                  operation);
   }
@@ -1304,7 +1280,7 @@
                                        const GenerationType* generation_ptr) {
   if (!SwisstableDebugEnabled()) return;
   const bool ctrl_is_valid_for_comparison =
-      ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+      ctrl == nullptr || ctrl == DefaultIterControl() || IsFull(*ctrl);
   if (SwisstableGenerationsEnabled()) {
     if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
       ABSL_RAW_LOG(FATAL,
@@ -1370,8 +1346,8 @@
     }
   };
 
-  const bool a_is_default = ctrl_a == EmptyGroup();
-  const bool b_is_default = ctrl_b == EmptyGroup();
+  const bool a_is_default = ctrl_a == DefaultIterControl();
+  const bool b_is_default = ctrl_b == DefaultIterControl();
   if (a_is_default && b_is_default) return;
   fail_if(a_is_default != b_is_default,
           "Comparing default-constructed hashtable iterator with a "
@@ -1816,22 +1792,13 @@
                                                   size_t new_hash,
                                                   ctrl_t soo_slot_ctrl);
 
-// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
-// table to be sampled. SOO tables need to switch from SOO to heap in order to
-// store the infoz. No-op if sampling is disabled or not possible.
-void GrowFullSooTableToNextCapacityForceSampling(CommonFields& common,
-                                                 const PolicyFunctions& policy);
-
-// Grows to next capacity and prepares insert for the given new_hash.
-// Returns the offset of the new element.
-size_t GrowToNextCapacityAndPrepareInsert(CommonFields& common,
-                                          const PolicyFunctions& policy,
-                                          size_t new_hash);
-// When growing from capacity 0 to 1, we only need the hash if the table ends up
-// being sampled so don't compute it unless needed.
-void SmallEmptyNonSooPrepareInsert(CommonFields& common,
-                                   const PolicyFunctions& policy,
-                                   absl::FunctionRef<size_t()> get_hash);
+// PrepareInsert for small tables (is_small()==true).
+// Returns the new control and the new slot.
+// Hash is only computed if the table is sampled or grew to large size
+// (is_small()==false).
+std::pair<ctrl_t*, void*> SmallNonSooPrepareInsert(
+    CommonFields& common, const PolicyFunctions& policy,
+    absl::FunctionRef<size_t()> get_hash);
 
 // Resizes table with allocated slots and change the table seed.
 // Tables with SOO enabled must have capacity > policy.soo_capacity.
@@ -1847,7 +1814,7 @@
                        void* alloc, bool reuse, bool soo_enabled);
 
 // Type-erased version of raw_hash_set::erase_meta_only.
-void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
+void EraseMetaOnly(CommonFields& c, const ctrl_t* ctrl, size_t slot_size);
 
 // For trivially relocatable types we use memcpy directly. This allows us to
 // share the same function body for raw_hash_set instantiations that have the
@@ -2110,9 +2077,9 @@
     ctrl_t* control() const { return ctrl_; }
     slot_type* slot() const { return slot_; }
 
-    // We use EmptyGroup() for default-constructed iterators so that they can
-    // be distinguished from end iterators, which have nullptr ctrl_.
-    ctrl_t* ctrl_ = EmptyGroup();
+    // We use DefaultIterControl() for default-constructed iterators so that
+    // they can be distinguished from end iterators, which have nullptr ctrl_.
+    ctrl_t* ctrl_ = DefaultIterControl();
     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
     // The member is not initialized on singleton and end iterators.
     union {
@@ -2971,24 +2938,6 @@
     const raw_hash_set& s;
   };
 
-  struct HashElement {
-    template <class K, class... Args>
-    size_t operator()(const K& key, Args&&...) const {
-      return h(key);
-    }
-    const hasher& h;
-  };
-
-  template <class K1>
-  struct EqualElement {
-    template <class K2, class... Args>
-    bool operator()(const K2& lhs, Args&&...) const {
-      ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs));
-    }
-    const K1& rhs;
-    const key_equal& eq;
-  };
-
   struct EmplaceDecomposable {
     template <class K, class... Args>
     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
@@ -3043,10 +2992,7 @@
   template <class K = key_type>
   iterator find_small(const key_arg<K>& key) {
     ABSL_SWISSTABLE_ASSERT(is_small());
-    return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
-                                           PolicyTraits::element(single_slot()))
-               ? end()
-               : single_iterator();
+    return empty() || !equal_to(key, single_slot()) ? end() : single_iterator();
   }
 
   template <class K = key_type>
@@ -3061,9 +3007,7 @@
 #endif
       Group g{ctrl + seq.offset()};
       for (uint32_t i : g.Match(h2)) {
-        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
-                EqualElement<K>{key, eq_ref()},
-                PolicyTraits::element(slot_array() + seq.offset(i)))))
+        if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i))))
           return iterator_at(seq.offset(i));
       }
       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
@@ -3140,22 +3084,29 @@
       common().set_empty_soo();
       return;
     }
-    EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
-                  sizeof(slot_type));
+    EraseMetaOnly(common(), it.control(), sizeof(slot_type));
   }
 
   template <class K>
-  size_t hash_of(const K& key) const {
-    return hash_ref()(key);
+  ABSL_ATTRIBUTE_ALWAYS_INLINE bool equal_to(const K& key,
+                                             slot_type* slot) const {
+    return PolicyTraits::apply(EqualElement<K, key_equal>{key, eq_ref()},
+                               PolicyTraits::element(slot));
   }
-  size_t hash_of(slot_type* slot) const {
-    return PolicyTraits::apply(HashElement{hash_ref()},
+  template <class K>
+  ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(const K& key) const {
+    return HashElement<hasher>{hash_ref()}(key);
+  }
+  ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(slot_type* slot) const {
+    return PolicyTraits::apply(HashElement<hasher>{hash_ref()},
                                PolicyTraits::element(slot));
   }
 
   // Casting directly from e.g. char* to slot_type* can cause compilation errors
   // on objective-C. This function converts to void* first, avoiding the issue.
-  static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
+  static ABSL_ATTRIBUTE_ALWAYS_INLINE slot_type* to_slot(void* buf) {
+    return static_cast<slot_type*>(buf);
+  }
 
   // Requires that lhs does not have a full SOO slot.
   static void move_common(bool rhs_is_full_soo, CharAlloc& rhs_alloc,
@@ -3262,45 +3213,48 @@
   }
 
   template <class K>
-  std::pair<iterator, bool> find_or_prepare_insert_small(const K& key) {
-    ABSL_SWISSTABLE_ASSERT(is_small());
-    [[maybe_unused]] ctrl_t soo_slot_ctrl;
+  std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
+    ABSL_SWISSTABLE_ASSERT(is_soo());
+    ctrl_t soo_slot_ctrl;
     if (empty()) {
-      if (!SooEnabled()) {
-        SmallEmptyNonSooPrepareInsert(common(), GetPolicyFunctions(),
-                                      [&] { return hash_of(key); });
-        return {single_iterator(), true};
-      }
       if (!should_sample_soo()) {
         common().set_full_soo();
         return {single_iterator(), true};
       }
       soo_slot_ctrl = ctrl_t::kEmpty;
-    } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
-                                   PolicyTraits::element(single_slot()))) {
+    } else if (equal_to(key, single_slot())) {
       return {single_iterator(), false};
-    } else if constexpr (SooEnabled()) {
+    } else {
       soo_slot_ctrl = static_cast<ctrl_t>(H2(hash_of(single_slot())));
     }
     ABSL_SWISSTABLE_ASSERT(capacity() == 1);
     const size_t hash = hash_of(key);
-    size_t index;
-    if constexpr (SooEnabled()) {
-      constexpr bool kUseMemcpy =
-          PolicyTraits::transfer_uses_memcpy() && SooEnabled();
-      index = GrowSooTableToNextCapacityAndPrepareInsert<
-          kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type))
-                     : 0,
-          kUseMemcpy>(common(), GetPolicyFunctions(), hash, soo_slot_ctrl);
-    } else {
-      // TODO(b/413062340): add specialized function for growing from 1 to 3.
-      index = GrowToNextCapacityAndPrepareInsert(common(), GetPolicyFunctions(),
-                                                 hash);
-    }
+    constexpr bool kUseMemcpy =
+        PolicyTraits::transfer_uses_memcpy() && SooEnabled();
+    size_t index = GrowSooTableToNextCapacityAndPrepareInsert<
+        kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0,
+        kUseMemcpy>(common(), GetPolicyFunctions(), hash, soo_slot_ctrl);
     return {iterator_at(index), true};
   }
 
   template <class K>
+  std::pair<iterator, bool> find_or_prepare_insert_small(const K& key) {
+    ABSL_SWISSTABLE_ASSERT(is_small());
+    if constexpr (SooEnabled()) {
+      return find_or_prepare_insert_soo(key);
+    }
+    if (!empty()) {
+      if (equal_to(key, single_slot())) {
+        return {single_iterator(), false};
+      }
+    }
+    return {iterator_at_ptr(
+                SmallNonSooPrepareInsert(common(), GetPolicyFunctions(),
+                                         HashKey<hasher, K>{hash_ref(), key})),
+            true};
+  }
+
+  template <class K>
   std::pair<iterator, bool> find_or_prepare_insert_large(const K& key) {
     ABSL_SWISSTABLE_ASSERT(!is_soo());
     prefetch_heap_block();
@@ -3314,9 +3268,7 @@
 #endif
       Group g{ctrl + seq.offset()};
       for (uint32_t i : g.Match(h2)) {
-        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
-                EqualElement<K>{key, eq_ref()},
-                PolicyTraits::element(slot_array() + seq.offset(i)))))
+        if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i))))
           return {iterator_at(seq.offset(i)), false};
       }
       auto mask_empty = g.MaskEmpty();
@@ -3391,16 +3343,11 @@
 
     const size_t hash_of_arg = hash_of(key);
     const auto assert_consistent = [&](const ctrl_t*, void* slot) {
-      const value_type& element =
-          PolicyTraits::element(static_cast<slot_type*>(slot));
-      const bool is_key_equal =
-          PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
+      const bool is_key_equal = equal_to(key, to_slot(slot));
       if (!is_key_equal) return;
 
-      const size_t hash_of_slot =
-          PolicyTraits::apply(HashElement{hash_ref()}, element);
       ABSL_ATTRIBUTE_UNUSED const bool is_hash_equal =
-          hash_of_arg == hash_of_slot;
+          hash_of_arg == hash_of(to_slot(slot));
       assert((!is_key_equal || is_hash_equal) &&
              "eq(k1, k2) must imply that hash(k1) == hash(k2). "
              "hash/eq functors are inconsistent.");
@@ -3450,6 +3397,10 @@
   const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
     return const_cast<raw_hash_set*>(this)->iterator_at(i);
   }
+  iterator iterator_at_ptr(std::pair<ctrl_t*, void*> ptrs)
+      ABSL_ATTRIBUTE_LIFETIME_BOUND {
+    return {ptrs.first, to_slot(ptrs.second), common().generation_ptr()};
+  }
 
   reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
 
@@ -3691,8 +3642,7 @@
           auto* slot = static_cast<SlotType*>(slot_void);
           if (pred(Set::PolicyTraits::element(slot))) {
             c->destroy(slot);
-            EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
-                          sizeof(*slot));
+            EraseMetaOnly(c->common(), ctrl, sizeof(*slot));
             ++num_deleted;
           }
         });
@@ -3758,10 +3708,7 @@
     while (true) {
       container_internal::Group g{ctrl + seq.offset()};
       for (uint32_t i : g.Match(h2)) {
-        if (Traits::apply(
-                typename Set::template EqualElement<typename Set::key_type>{
-                    key, set.eq_ref()},
-                Traits::element(set.slot_array() + seq.offset(i))))
+        if (set.equal_to(key, set.slot_array() + seq.offset(i)))
           return num_probes;
         ++num_probes;
       }
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index a5cbd44..6da3360 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -423,10 +423,6 @@
   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
 }
 
-TEST(Group, EmptyGroup) {
-  for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
-}
-
 TEST(Group, Match) {
   if (Group::kWidth == 16) {
     ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
@@ -1184,7 +1180,7 @@
     t.insert(i);
     ASSERT_EQ(t.size(), i + 1);
     for (int j = 0; j < i + 1; ++j) {
-      EXPECT_TRUE(t.find(j) != t.end());
+      ASSERT_TRUE(t.find(j) != t.end());
       EXPECT_EQ(*t.find(j), j);
     }
   }
@@ -1207,7 +1203,7 @@
           t.reserve(target_size);
         }
         for (size_t i = 0; i < source_size; ++i) {
-          EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+          ASSERT_TRUE(t.find(static_cast<int>(i)) != t.end());
           EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
         }
       }
@@ -1232,7 +1228,7 @@
             << "rehash(0) must resize to the minimum capacity";
       }
       for (size_t i = 0; i < inserted_count; ++i) {
-        EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+        ASSERT_TRUE(t.find(static_cast<int>(i)) != t.end());
         EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
       }
     }
@@ -4267,8 +4263,8 @@
 // 5. Finally we will catch up and go to overflow codepath.
 TEST(Table, GrowExtremelyLargeTable) {
   constexpr size_t kTargetCapacity =
-#if defined(__wasm__) || defined(__asmjs__)
-      NextCapacity(ProbedItem4Bytes::kMaxNewCapacity);  // OOMs on WASM.
+#if defined(__wasm__) || defined(__asmjs__) || defined(__i386__)
+      NextCapacity(ProbedItem4Bytes::kMaxNewCapacity);  // OOMs on WASM, 32-bit.
 #else
       NextCapacity(ProbedItem8Bytes::kMaxNewCapacity);
 #endif
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index 8aed18b..5f6be95 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -110,18 +110,18 @@
 //   absl::node_hash_map<std::string, std::string> ducks =
 //     {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
 //
-//  // Insert a new element into the node hash map
-//  ducks.insert({"d", "donald"}};
+//   // Insert a new element into the node hash map
+//   ducks.insert({"d", "donald"}};
 //
-//  // Force a rehash of the node hash map
-//  ducks.rehash(0);
+//   // Force a rehash of the node hash map
+//   ducks.rehash(0);
 //
-//  // Find the element with the key "b"
-//  std::string search_key = "b";
-//  auto result = ducks.find(search_key);
-//  if (result != ducks.end()) {
-//    std::cout << "Result: " << result->second << std::endl;
-//  }
+//   // Find the element with the key "b"
+//   std::string search_key = "b";
+//   auto result = ducks.find(search_key);
+//   if (result != ducks.end()) {
+//     std::cout << "Result: " << result->second << std::endl;
+//   }
 template <class Key, class Value, class Hash = DefaultHashContainerHash<Key>,
           class Eq = DefaultHashContainerEq<Key>,
           class Alloc = std::allocator<std::pair<const Key, Value>>>
@@ -153,9 +153,9 @@
   //
   // * Copy assignment operator
   //
-  //  // Hash functor and Comparator are copied as well
-  //  absl::node_hash_map<int, std::string> map4;
-  //  map4 = map3;
+  //   // Hash functor and Comparator are copied as well
+  //   absl::node_hash_map<int, std::string> map4;
+  //   map4 = map3;
   //
   // * Move constructor
   //
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index 6240e2d..127c640 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -108,16 +108,16 @@
 //   absl::node_hash_set<std::string> ducks =
 //     {"huey", "dewey", "louie"};
 //
-//  // Insert a new element into the node hash set
-//  ducks.insert("donald");
+//   // Insert a new element into the node hash set
+//   ducks.insert("donald");
 //
-//  // Force a rehash of the node hash set
-//  ducks.rehash(0);
+//   // Force a rehash of the node hash set
+//   ducks.rehash(0);
 //
-//  // See if "dewey" is present
-//  if (ducks.contains("dewey")) {
-//    std::cout << "We found dewey!" << std::endl;
-//  }
+//   // See if "dewey" is present
+//   if (ducks.contains("dewey")) {
+//     std::cout << "We found dewey!" << std::endl;
+//   }
 template <class T, class Hash = DefaultHashContainerHash<T>,
           class Eq = DefaultHashContainerEq<T>, class Alloc = std::allocator<T>>
 class ABSL_ATTRIBUTE_OWNER node_hash_set
@@ -147,9 +147,9 @@
   //
   // * Copy assignment operator
   //
-  //  // Hash functor and Comparator are copied as well
-  //  absl::node_hash_set<std::string> set4;
-  //  set4 = set3;
+  //   // Hash functor and Comparator are copied as well
+  //   absl::node_hash_set<std::string> set4;
+  //   set4 = set3;
   //
   // * Move constructor
   //
diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel
index b2ffcd0..8176cd9 100644
--- a/absl/hash/BUILD.bazel
+++ b/absl/hash/BUILD.bazel
@@ -82,6 +82,8 @@
     ],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    # TODO(b/417700722): Fix HashValueTest.PointerAlignment reporting more collisions under ubsan.
+    tags = ["noubsan"],
     deps = [
         ":hash",
         ":hash_testing",
diff --git a/absl/hash/internal/hash.cc b/absl/hash/internal/hash.cc
index 9abace5..b185a0a 100644
--- a/absl/hash/internal/hash.cc
+++ b/absl/hash/internal/hash.cc
@@ -20,7 +20,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
-#include "absl/hash/internal/low_level_hash.h"
+#include "absl/hash/internal/city.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -44,7 +44,7 @@
 uint64_t MixingHashState::CombineLargeContiguousImpl64(
     uint64_t state, const unsigned char* first, size_t len) {
   while (len >= PiecewiseChunkSize()) {
-    state = Mix(state ^ Hash64(first, PiecewiseChunkSize()), kMul);
+    state = Hash64(first, PiecewiseChunkSize(), state);
     len -= PiecewiseChunkSize();
     first += PiecewiseChunkSize();
   }
@@ -55,11 +55,6 @@
 
 ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
 
-uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
-                                           size_t len) {
-  return LowLevelHashLenGt32(data, len, Seed(), &kStaticRandomData[0]);
-}
-
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index 7c90ab4..f400c7b 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -80,6 +80,7 @@
 #include "absl/base/port.h"
 #include "absl/container/fixed_array.h"
 #include "absl/hash/internal/city.h"
+#include "absl/hash/internal/low_level_hash.h"
 #include "absl/hash/internal/weakly_mixed_integer.h"
 #include "absl/meta/type_traits.h"
 #include "absl/numeric/bits.h"
@@ -1074,13 +1075,6 @@
   using uint128 = absl::uint128;
 #endif  // ABSL_HAVE_INTRINSIC_INT128
 
-  // Random data taken from the hexadecimal digits of Pi's fractional component.
-  // https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
-  ABSL_CACHELINE_ALIGNED static constexpr uint64_t kStaticRandomData[] = {
-      0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,
-      0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377,
-  };
-
   static constexpr uint64_t kMul =
    uint64_t{0xdcb22ca68cb134ed};
 
@@ -1329,16 +1323,14 @@
     return absl::gbswap_64(n * kMul);
   }
 
-  // An extern to avoid bloat on a direct call to LowLevelHash() with fixed
-  // values for both the seed and salt parameters.
-  static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
-
   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
-                                                      size_t len) {
+                                                      size_t len,
+                                                      uint64_t state) {
 #ifdef ABSL_HAVE_INTRINSIC_INT128
-    return LowLevelHashImpl(data, len);
+    return LowLevelHashLenGt32(data, len, state);
 #else
-    return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
+    return hash_internal::CityHash64WithSeed(
+        reinterpret_cast<const char*>(data), len, state);
 #endif
   }
 
@@ -1378,12 +1370,13 @@
 inline uint64_t MixingHashState::CombineContiguousImpl(
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 4> /* sizeof_size_t */) {
-  // For large values we use CityHash, for small ones we just use a
-  // multiplicative hash.
+  // For large values we use CityHash, for small ones we use custom low latency
+  // hash.
   if (len <= 8) {
     return CombineSmallContiguousImpl(state, first, len);
   }
   if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) {
+    // TODO(b/417141985): expose and use CityHash32WithSeed.
     return Mix(state ^ hash_internal::CityHash32(
                            reinterpret_cast<const char*>(first), len),
                kMul);
@@ -1396,7 +1389,7 @@
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 8> /* sizeof_size_t */) {
   // For large values we use LowLevelHash or CityHash depending on the platform,
-  // for small ones we just use a multiplicative hash.
+  // for small ones we use custom low latency hash.
   if (len <= 8) {
     return CombineSmallContiguousImpl(state, first, len);
   }
@@ -1407,7 +1400,7 @@
     return CombineContiguousImpl17to32(state, first, len);
   }
   if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) {
-    return Mix(state ^ Hash64(first, len), kMul);
+    return Hash64(first, len, state);
   }
   return CombineLargeContiguousImpl64(state, first, len);
 }
diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc
index 1a107ec..575cf74 100644
--- a/absl/hash/internal/low_level_hash.cc
+++ b/absl/hash/internal/low_level_hash.cc
@@ -28,29 +28,30 @@
 ABSL_NAMESPACE_BEGIN
 namespace hash_internal {
 namespace {
+
 uint64_t Mix(uint64_t v0, uint64_t v1) {
   absl::uint128 p = v0;
   p *= v1;
   return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
 }
-uint64_t Mix32Bytes(const uint8_t* ptr, uint64_t current_state,
-                    const uint64_t salt[5]) {
+
+uint64_t Mix32Bytes(const uint8_t* ptr, uint64_t current_state) {
   uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
   uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
   uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
   uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
 
-  uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
-  uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
+  uint64_t cs0 = Mix(a ^ kStaticRandomData[1], b ^ current_state);
+  uint64_t cs1 = Mix(c ^ kStaticRandomData[2], d ^ current_state);
   return cs0 ^ cs1;
 }
+
 }  // namespace
 
-uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed,
-                             const uint64_t salt[5]) {
+uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed) {
   assert(len > 32);
   const uint8_t* ptr = static_cast<const uint8_t*>(data);
-  uint64_t current_state = seed ^ salt[0] ^ len;
+  uint64_t current_state = seed ^ kStaticRandomData[0] ^ len;
   const uint8_t* last_32_ptr = ptr + len - 32;
 
   if (len > 64) {
@@ -74,11 +75,11 @@
       uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
       uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
 
-      current_state = Mix(a ^ salt[1], b ^ current_state);
-      duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
+      current_state = Mix(a ^ kStaticRandomData[1], b ^ current_state);
+      duplicated_state0 = Mix(c ^ kStaticRandomData[2], d ^ duplicated_state0);
 
-      duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
-      duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
+      duplicated_state1 = Mix(e ^ kStaticRandomData[3], f ^ duplicated_state1);
+      duplicated_state2 = Mix(g ^ kStaticRandomData[4], h ^ duplicated_state2);
 
       ptr += 64;
       len -= 64;
@@ -91,13 +92,13 @@
   // We now have a data `ptr` with at most 64 bytes and the current state
   // of the hashing state machine stored in current_state.
   if (len > 32) {
-    current_state = Mix32Bytes(ptr, current_state, salt);
+    current_state = Mix32Bytes(ptr, current_state);
   }
 
   // We now have a data `ptr` with at most 32 bytes and the current state
   // of the hashing state machine stored in current_state. But we can
   // safely read from `ptr + len - 32`.
-  return Mix32Bytes(last_32_ptr, current_state, salt);
+  return Mix32Bytes(last_32_ptr, current_state);
 }
 
 }  // namespace hash_internal
diff --git a/absl/hash/internal/low_level_hash.h b/absl/hash/internal/low_level_hash.h
index 49e9ec4..bb2821c 100644
--- a/absl/hash/internal/low_level_hash.h
+++ b/absl/hash/internal/low_level_hash.h
@@ -29,19 +29,26 @@
 #include <stdlib.h>
 
 #include "absl/base/config.h"
+#include "absl/base/optimization.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace hash_internal {
 
+// Random data taken from the hexadecimal digits of Pi's fractional component.
+// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
+ABSL_CACHELINE_ALIGNED static constexpr uint64_t kStaticRandomData[] = {
+    0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,
+    0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377,
+};
+
 // Hash function for a byte array. A 64-bit seed and a set of five 64-bit
 // integers are hashed into the result. The length must be greater than 32.
 //
 // To allow all hashable types (including string_view and Span) to depend on
 // this algorithm, we keep the API low-level, with as few dependencies as
 // possible.
-uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed,
-                             const uint64_t salt[5]);
+uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed);
 
 }  // namespace hash_internal
 ABSL_NAMESPACE_END
diff --git a/absl/hash/internal/low_level_hash_test.cc b/absl/hash/internal/low_level_hash_test.cc
index d370dc7..fcfa6eb 100644
--- a/absl/hash/internal/low_level_hash_test.cc
+++ b/absl/hash/internal/low_level_hash_test.cc
@@ -14,7 +14,7 @@
 
 #include "absl/hash/internal/low_level_hash.h"
 
-#include <cinttypes>
+#include <cstddef>
 #include <cstdint>
 
 #include "gmock/gmock.h"
@@ -25,10 +25,6 @@
 
 namespace {
 
-static const uint64_t kSalt[5] = {0xa0761d6478bd642f, 0xe7037ed1a0b428dbl,
-                                  0x8ebc6af09c88c6e3, 0x589965cc75374cc3l,
-                                  0x1d8e4e27c47d124f};
-
 TEST(LowLevelHashTest, VerifyGolden) {
   constexpr size_t kNumGoldenOutputs = 94;
   static struct {
@@ -366,38 +362,38 @@
   GTEST_SKIP() << "We only maintain golden data for little endian systems.";
 #else
   constexpr uint64_t kGolden[kNumGoldenOutputs] = {
-      0x59b1542b0ff6b7b8, 0x3fb979d297096db9, 0xb391802c536343a9,
-      0x94e0f7e4331081c4, 0x234d95e49e3ce30e, 0xca6351a3e568ed17,
-      0xa62fcf7fa334293d, 0xb03111035f546067, 0x97b8c861e013d558,
-      0xb6683803d9387949, 0xce5d907e0b3cb6a1, 0xab7466fae53ed201,
-      0x8f13ca3f1cac3edd, 0xa2684a99cd909a2a, 0x03194f86b9440843,
-      0xab3a745d96f75a66, 0xef2448606760ec3d, 0xd999e03247d5d5c5,
-      0x4a25ab345d53f926, 0xa511b829ce9fc919, 0x4b76517f8e806cbf,
-      0x006efd7ee09ff8d4, 0x790a4978bd0170a1, 0xc14f6e4b2dff057e,
-      0xe0d2f4ae7c836d09, 0x4e2038a491ed939d, 0x23fd6f408e9598e0,
-      0xa91cf8f1d92bcb08, 0x555cdec06df49d58, 0xe7d3e14bd6a8f3bd,
-      0x4fdd25c1e75c009a, 0x3dffb8acf1ffbd17, 0x56946f33ed73a705,
-      0x154c633d7690f3b0, 0x3e96f8e9a58a04e0, 0xb0279b244d3ccf9c,
-      0x8571e87c882b2142, 0x9d9ada45132e7b41, 0xd5667655533f1dec,
-      0x70607ace4ec36463, 0x691418d2eb63116c, 0xa70179d8e7142980,
-      0xf8388d756bea25a7, 0xe5127c736d9826de, 0x7f1c95f9b6b656b6,
-      0x66ab835b7bf4c7b3, 0xc03423b9a6db9728, 0xe88415a2b416b76d,
-      0x8afd8c14d0b56c36, 0xe9a252b3ba217dad, 0x710150f5cd87a9ff,
-      0xd66b147837fad9ae, 0x1af5f8ffbaa717a7, 0xe01f88d7a9a8ac17,
-      0xd67870a7251fde72, 0xf32b837f845a676b, 0x0827717b1ffe59f7,
-      0x80307212ca7645fb, 0xf0d22af71ea57c80, 0x459373765f2c114b,
-      0x54d26109fab9cbaf, 0xc603da4e257b93db, 0x57fa334b5689d7d5,
-      0x41cd1b2a8a91f620, 0xe1d6e7cd0fb015af, 0x8608e9035eb9d795,
-      0x45c7b9fae739fee1, 0x9f5ae4f7a6b597ee, 0xfb771b6e0017757d,
-      0x8dac6d29cfd8d027, 0x3c9ba4fb62ce6508, 0xa971fad8243844a7,
-      0xd2126f49b2ea3b64, 0x5dd78fe7ac436861, 0xfe4004a6bb3494a8,
-      0xe7c01cc63d770d7c, 0xa117075b8c801d37, 0xdf1dfe75f0e73069,
-      0x7285b39700cefb98, 0x5e97ea1aa9a670eb, 0xe21872db2b9137a3,
-      0x12630b02c6ca405e, 0xfe1f2d802151f97a, 0xb53b0ed3dea4fb02,
-      0xc6d5ed56d1dbf9fd, 0xe5b92b558a5c70cb, 0xccd6eedf97277d08,
-      0x08582fff2e1494ed, 0xa41f2b3d17f1c4c7, 0x29ec07e5ef950f3d,
-      0x96aba32565a97084, 0xf26870eca10cebcd, 0xbe1432feb4d33361,
-      0x21993a779845e6eb,
+      0x669da02f8d009e0f, 0xceb19bf2255445cd, 0x0e746992d6d43a7c,
+      0x41ed623b9dcc5fde, 0x187a5a30d7c72edc, 0x949ae2a9c1eb925a,
+      0x7e9c76a7b7c35e68, 0x4f96bf15b8309ff6, 0x26c0c1fde233732e,
+      0xb0453f72aa151615, 0xf24b621a9ce9fece, 0x99ed798408687b5f,
+      0x3b13ec1221423b66, 0xc67cf148a28afe59, 0x22f7e0173f92e3fa,
+      0x14186c5fda6683a0, 0x97d608caa2603b2c, 0xfde3b0bbba24ffa9,
+      0xb7068eb48c472c77, 0x9e34d72866b9fda0, 0xbbb99c884cdef88e,
+      0x81d3e01f472a8a1a, 0xf84f506b3b60366d, 0xfe3f42f01300db37,
+      0xe385712a51c1f836, 0x41dfd5e394245c79, 0x60855dbedadb900a,
+      0xbdb4c0aa38567476, 0x9748802e8eec02cc, 0x5ced256d257f88de,
+      0x55acccdf9a80f155, 0xa64b55b071afbbea, 0xa205bfe6c724ce4d,
+      0x69dd26ca8ac21744, 0xef80e2ff2f6a9bc0, 0xde266c0baa202c20,
+      0xfa3463080ac74c50, 0x379d968a40125c2b, 0x4cbbd0a7b3c7d648,
+      0xc92afd93f4c665d2, 0x6e28f5adb7ae38dc, 0x7c689c9c237be35e,
+      0xaea41b29bd9d0f73, 0x832cef631d77e59f, 0x70cac8e87bc37dd3,
+      0x8e8c98bbde68e764, 0xd6117aeb3ddedded, 0xd796ab808e766240,
+      0x8953d0ea1a7d9814, 0xa212eba4281b391c, 0x21a555a8939ce597,
+      0x809d31660f6d81a8, 0x2356524b20ab400f, 0x5bc611e1e49d0478,
+      0xba9c065e2f385ce2, 0xb0a0fd12f4e83899, 0x14d076a35b1ff2ca,
+      0x8acd0bb8cf9a93c0, 0xe62e8ec094039ee4, 0x38a536a7072bdc61,
+      0xca256297602524f8, 0xfc62ebfb3530caeb, 0x8d8b0c05520569f6,
+      0xbbaca65cf154c59d, 0x3739b5ada7e338d3, 0xdb9ea31f47365340,
+      0x410b5c9c1da56755, 0x7e0abc03dbd10283, 0x136f87be70ed442e,
+      0x6b727d4feddbe1e9, 0x074ebb21183b01df, 0x3fe92185b1985484,
+      0xc5d8efd3c68305ca, 0xd9bada21b17e272e, 0x64d73133e1360f83,
+      0xeb8563aa993e21f9, 0xe5e8da50cceab28f, 0x7a6f92eb3223d2f3,
+      0xbdaf98370ea9b31b, 0x1682a84457f077bc, 0x4abd2d33b6e3be37,
+      0xb35bc81a7c9d4c04, 0x3e5bde3fb7cfe63d, 0xff3abe6e2ffec974,
+      0xb8116dd26cf6feec, 0x7a77a6e4ed0cf081, 0xb71eec2d5a184316,
+      0x6fa932f77b4da817, 0x795f79b33909b2c4, 0x1b8755ef6b5eb34e,
+      0x2255b72d7d6b2d79, 0xf2bdafafa90bd50a, 0x442a578f02cb1fc8,
+      0xc25aefe55ecf83db,
   };
 #endif
 
@@ -408,7 +404,7 @@
     ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
     ASSERT_GT(str.size(), 32);
     uint64_t h = absl::hash_internal::LowLevelHashLenGt32(
-        str.data(), str.size(), cases[i].seed, kSalt);
+        str.data(), str.size(), cases[i].seed);
     printf("0x%016" PRIx64 ", ", h);
     if (i % 3 == 2) {
       printf("\n");
@@ -424,7 +420,7 @@
     ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
     ASSERT_GT(str.size(), 32);
     EXPECT_EQ(absl::hash_internal::LowLevelHashLenGt32(str.data(), str.size(),
-                                                       cases[i].seed, kSalt),
+                                                       cases[i].seed),
               kGolden[i]);
   }
 #endif
diff --git a/absl/numeric/bits_test.cc b/absl/numeric/bits_test.cc
index 3b71ccc..2977976 100644
--- a/absl/numeric/bits_test.cc
+++ b/absl/numeric/bits_test.cc
@@ -27,15 +27,36 @@
 namespace {
 
 template <typename IntT>
+class UnsignedIntegerTypesTest : public ::testing::Test {};
+template <typename IntT>
 class IntegerTypesTest : public ::testing::Test {};
 
+using UnsignedIntegerTypes =
+    ::testing::Types<uint8_t, uint16_t, uint32_t, uint64_t>;
 using OneByteIntegerTypes = ::testing::Types<
     unsigned char,
     uint8_t
     >;
 
+TYPED_TEST_SUITE(UnsignedIntegerTypesTest, UnsignedIntegerTypes);
 TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes);
 
+TYPED_TEST(UnsignedIntegerTypesTest, ReturnTypes) {
+  using UIntType = TypeParam;
+
+  static_assert(std::is_same_v<decltype(byteswap(UIntType{0})), UIntType>);
+  static_assert(std::is_same_v<decltype(rotl(UIntType{0}, 0)), UIntType>);
+  static_assert(std::is_same_v<decltype(rotr(UIntType{0}, 0)), UIntType>);
+  static_assert(std::is_same_v<decltype(countl_zero(UIntType{0})), int>);
+  static_assert(std::is_same_v<decltype(countl_one(UIntType{0})), int>);
+  static_assert(std::is_same_v<decltype(countr_zero(UIntType{0})), int>);
+  static_assert(std::is_same_v<decltype(countr_one(UIntType{0})), int>);
+  static_assert(std::is_same_v<decltype(popcount(UIntType{0})), int>);
+  static_assert(std::is_same_v<decltype(bit_ceil(UIntType{0})), UIntType>);
+  static_assert(std::is_same_v<decltype(bit_floor(UIntType{0})), UIntType>);
+  static_assert(std::is_same_v<decltype(bit_width(UIntType{0})), int>);
+}
+
 TYPED_TEST(IntegerTypesTest, HandlesTypes) {
   using UIntType = TypeParam;
 
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index 920928e..5c49012 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -360,6 +360,7 @@
     linkopts = ABSL_DEFAULT_LINKOPTS,
     tags = [
         "no_test_wasm",
+        "noubsan",  # TODO(b/417700722):  timeouts under UBSAN.
     ],
     deps = [
         ":per_thread_sem_test_common",
diff --git a/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/absl/time/internal/cctz/include/cctz/civil_time_detail.h
index 2b0aed5..fe3b8bd 100644
--- a/absl/time/internal/cctz/include/cctz/civil_time_detail.h
+++ b/absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -96,6 +96,18 @@
 CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
   return is_leap_year(y + (m > 2)) ? 366 : 365;
 }
+// The compiler cannot optimize away the check if we use
+// -fsanitize=array-bounds.
+// m is guaranteed to be in [1:12] in the caller, but the compiler cannot
+// optimize away the check even when this function is inlined into BreakTime.
+// To reduce the overhead, we use no_sanitize to skip the unnecessary
+// -fsanitize=array-bounds check. Remove no_sanitize once the missed
+// optimization is fixed.
+#if defined(__clang__) && defined(__has_cpp_attribute)
+#if __has_cpp_attribute(clang::no_sanitize)
+[[clang::no_sanitize("array-bounds")]]
+#endif
+#endif
 CONSTEXPR_F int days_per_month(year_t y, month_t m) noexcept {
   CONSTEXPR_D int k_days_per_month[1 + 12] = {
       -1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31  // non leap year