diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index f332320..daa9ff6 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -522,7 +522,6 @@
 
   if (c.is_small()) {
     SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);
-    c.growth_info().OverwriteFullAsEmpty();
     return;
   }
 
@@ -1433,10 +1432,6 @@
 
   static_assert(NextCapacity(0) == 1);
   PrepareInsertCommon(common);
-  // TODO(b/413062340): maybe don't allocate growth info for capacity 1 tables.
-  // Doing so may require additional branches/complexity so it might not be
-  // worth it.
-  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(0);
 
   if (ABSL_PREDICT_FALSE(has_infoz)) {
     ReportSingleGroupTableGrowthToInfoz(common, infoz, get_hash());
@@ -1857,8 +1852,7 @@
   ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
   ABSL_SWISSTABLE_ASSERT(cap > 0);
   const size_t max_size_before_growth =
-      cap <= policy.soo_capacity() ? policy.soo_capacity()
-                                   : common.size() + common.growth_left();
+      IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();
   if (new_size <= max_size_before_growth) {
     return;
   }
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index ac0ff4a..7106bc8 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -521,15 +521,16 @@
   uint64_t data_;
 };
 
-// Extracts the H1 portion of a hash: 57 bits mixed with a per-table seed.
+// Mixes the hash with a per-table seed. Note that we only use the low bits of
+// H1 because we bitwise-and with capacity later.
 inline size_t H1(size_t hash, PerTableSeed seed) {
-  return (hash >> 7) ^ seed.seed();
+  return hash ^ seed.seed();
 }
 
-// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+// Extracts the H2 portion of a hash: the 7 most significant bits.
 //
 // These are used as an occupied control byte.
-inline h2_t H2(size_t hash) { return hash & 0x7F; }
+inline h2_t H2(size_t hash) { return hash >> (sizeof(size_t) * 8 - 7); }
 
 // When there is an insertion with no reserved growth, we rehash with
 // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
@@ -805,6 +806,9 @@
 // Helper class for computing offsets and allocation size of hash set fields.
 class RawHashSetLayout {
  public:
+  // TODO(b/413062340): maybe don't allocate growth info for capacity 1 tables.
+  // Doing so may require additional branches/complexity so it might not be
+  // worth it.
   explicit RawHashSetLayout(size_t capacity, size_t slot_size,
                             size_t slot_align, bool has_infoz)
       : control_offset_(ControlOffset(has_infoz)),
@@ -964,12 +968,6 @@
       generate_new_seed();
     }
   }
-  void* backing_array_start() const {
-    // growth_info (and maybe infoz) is stored before control bytes.
-    ABSL_SWISSTABLE_ASSERT(
-        reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
-    return control() - ControlOffset(has_infoz());
-  }
 
   // Note: we can't use slots() because Qt defines "slots" as a macro.
   void* slot_array() const { return heap_or_soo_.slot_array().get(); }
@@ -1030,6 +1028,7 @@
   size_t growth_left() const { return growth_info().GetGrowthLeft(); }
 
   GrowthInfo& growth_info() {
+    ABSL_SWISSTABLE_ASSERT(!is_small());
     return GetGrowthInfoFromControl(control());
   }
   GrowthInfo growth_info() const {
@@ -1039,14 +1038,21 @@
   bool has_infoz() const { return size_.has_infoz(); }
   void set_has_infoz() { size_.set_has_infoz(); }
 
+  HashtablezInfoHandle* infoz_ptr() const {
+    // growth_info is stored before control bytes.
+    ABSL_SWISSTABLE_ASSERT(
+        reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
+    ABSL_SWISSTABLE_ASSERT(has_infoz());
+    return reinterpret_cast<HashtablezInfoHandle*>(
+        control() - ControlOffset(/*has_infoz=*/true));
+  }
+
   HashtablezInfoHandle infoz() {
-    return has_infoz()
-               ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
-               : HashtablezInfoHandle();
+    return has_infoz() ? *infoz_ptr() : HashtablezInfoHandle();
   }
   void set_infoz(HashtablezInfoHandle infoz) {
     ABSL_SWISSTABLE_ASSERT(has_infoz());
-    *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
+    *infoz_ptr() = infoz;
   }
 
   bool should_rehash_for_bug_detection_on_insert() const {
@@ -1571,27 +1577,6 @@
   return std::is_same_v<CharAlloc, std::allocator<char>>;
 }
 
-template <bool kSooEnabled>
-bool ShouldSampleHashtablezInfoOnResize(bool force_sampling,
-                                        bool is_hashtablez_eligible,
-                                        size_t old_capacity, CommonFields& c) {
-  if (!is_hashtablez_eligible) return false;
-  // Force sampling is only allowed for SOO tables.
-  ABSL_SWISSTABLE_ASSERT(kSooEnabled || !force_sampling);
-  if (kSooEnabled && force_sampling) {
-    return true;
-  }
-  // In SOO, we sample on the first insertion so if this is an empty SOO case
-  // (e.g. when reserve is called), then we still need to sample.
-  if (kSooEnabled && old_capacity == SooCapacity() && c.empty()) {
-    return ShouldSampleNextTable();
-  }
-  if (!kSooEnabled && old_capacity == 0) {
-    return ShouldSampleNextTable();
-  }
-  return false;
-}
-
 // Allocates `n` bytes for a backing array.
 template <size_t AlignOfBackingArray, typename Alloc>
 ABSL_ATTRIBUTE_NOINLINE void* AllocateBackingArray(void* alloc, size_t n) {
@@ -3429,16 +3414,13 @@
   //
   // See `CapacityToGrowth()`.
   size_t growth_left() const {
-    ABSL_SWISSTABLE_ASSERT(!is_soo());
     return common().growth_left();
   }
 
   GrowthInfo& growth_info() {
-    ABSL_SWISSTABLE_ASSERT(!is_soo());
     return common().growth_info();
   }
   GrowthInfo growth_info() const {
-    ABSL_SWISSTABLE_ASSERT(!is_soo());
     return common().growth_info();
   }
 
@@ -3484,7 +3466,6 @@
     SooEnabled() ? common().set_empty_soo() : common().decrement_size();
     if (!SooEnabled()) {
       SanitizerPoisonObject(single_slot());
-      growth_info().OverwriteFullAsEmpty();
     }
   }
   iterator single_iterator() {
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
index 8a588ea..f7e2a44 100644
--- a/absl/debugging/internal/vdso_support.cc
+++ b/absl/debugging/internal/vdso_support.cc
@@ -17,6 +17,7 @@
 // VDSOSupport -- a class representing kernel VDSO (if present).
 
 #include "absl/debugging/internal/vdso_support.h"
+#include "absl/base/attributes.h"
 
 #ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
 
@@ -190,6 +191,9 @@
 // This function must be very fast, and may be called from very
 // low level (e.g. tcmalloc). Hence I avoid things like
 // GoogleOnceInit() and ::operator new.
+// The destination in VDSO is unknown to CFI and VDSO does not set MSAN
+// shadow for the return value.
+ABSL_ATTRIBUTE_NO_SANITIZE_CFI
 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
 int GetCPU() {
   unsigned cpu;
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index fa88227..5802064 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -39,7 +39,7 @@
 
 // For feature testing and determining which headers can be included.
 #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \
-    ABSL_INTERNAL_VERSION_HEADER_AVAILABLE
+    defined(ABSL_INTERNAL_VERSION_HEADER_AVAILABLE)
 #include <version>
 #else
 #include <ciso646>
@@ -74,7 +74,6 @@
 #include <vector>
 
 #include "absl/base/attributes.h"
-#include "absl/base/internal/endian.h"
 #include "absl/base/internal/unaligned_access.h"
 #include "absl/base/optimization.h"
 #include "absl/base/port.h"
@@ -105,8 +104,6 @@
 // returns the size of these chunks.
 constexpr size_t PiecewiseChunkSize() { return 1024; }
 
-// PiecewiseCombiner
-//
 // PiecewiseCombiner is an internal-only helper class for hashing a piecewise
 // buffer of `char` or `unsigned char` as though it were contiguous.  This class
 // provides two methods:
@@ -131,8 +128,6 @@
   PiecewiseCombiner(const PiecewiseCombiner&) = delete;
   PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
 
-  // PiecewiseCombiner::add_buffer()
-  //
   // Appends the given range of bytes to the sequence to be hashed, which may
   // modify the provided hash state.
   template <typename H>
@@ -143,8 +138,6 @@
                       reinterpret_cast<const unsigned char*>(data), size);
   }
 
-  // PiecewiseCombiner::finalize()
-  //
   // Finishes combining the hash sequence, which may may modify the provided
   // hash state.
   //
@@ -161,18 +154,15 @@
   bool added_something_ = false;
 };
 
-// is_hashable()
-//
 // Trait class which returns true if T is hashable by the absl::Hash framework.
 // Used for the AbslHashValue implementations for composite types below.
 template <typename T>
 struct is_hashable;
 
-// HashStateBase
-//
-// An internal implementation detail that contains common implementation details
-// for all of the "hash state objects" objects generated by Abseil.  This is not
-// a public API; users should not create classes that inherit from this.
+// HashStateBase is an internal implementation detail that contains common
+// implementation details for all of the "hash state objects" objects generated
+// by Abseil.  This is not a public API; users should not create classes that
+// inherit from this.
 //
 // A hash state object is the template argument `H` passed to `AbslHashValue`.
 // It represents an intermediate state in the computation of an unspecified hash
@@ -237,8 +227,6 @@
 template <typename H>
 class HashStateBase {
  public:
-  // HashStateBase::combine()
-  //
   // Combines an arbitrary number of values into a hash state, returning the
   // updated state.
   //
@@ -258,8 +246,6 @@
   static H combine(H state, const T& value, const Ts&... values);
   static H combine(H state) { return state; }
 
-  // HashStateBase::combine_contiguous()
-  //
   // Combines a contiguous array of `size` elements into a hash state, returning
   // the updated state.
   //
@@ -299,8 +285,6 @@
   };
 };
 
-// is_uniquely_represented
-//
 // `is_uniquely_represented<T>` is a trait class that indicates whether `T`
 // is uniquely represented.
 //
@@ -335,8 +319,6 @@
 template <typename T, typename Enable = void>
 struct is_uniquely_represented : std::false_type {};
 
-// is_uniquely_represented<unsigned char>
-//
 // unsigned char is a synonym for "byte", so it is guaranteed to be
 // uniquely represented.
 template <>
@@ -351,9 +333,6 @@
     Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>
     : std::true_type {};
 
-// is_uniquely_represented<bool>
-//
-//
 template <>
 struct is_uniquely_represented<bool> : std::false_type {};
 
@@ -375,8 +354,6 @@
   }
 };
 
-// hash_bytes()
-//
 // Convenience function that combines `hash_state` with the byte representation
 // of `value`.
 template <typename H, typename T,
@@ -504,8 +481,7 @@
                                                              T ptr) {
   auto v = reinterpret_cast<uintptr_t>(ptr);
   // Due to alignment, pointers tend to have low bits as zero, and the next few
-  // bits follow a pattern since they are also multiples of some base value. The
-  // byte swap in WeakMix helps ensure we still have good entropy in low bits.
+  // bits follow a pattern since they are also multiples of some base value.
   // Mix pointers twice to ensure we have good entropy in low bits.
   return H::combine(std::move(hash_state), v, v);
 }
@@ -561,8 +537,6 @@
   return H::combine(std::move(hash_state), p.first, p.second);
 }
 
-// hash_tuple()
-//
 // Helper function for hashing a tuple. The third argument should
 // be an index_sequence running from 0 to tuple_size<Tuple> - 1.
 template <typename H, typename Tuple, size_t... Is>
@@ -883,7 +857,6 @@
   return H::combine(std::move(hash_state), opt.has_value());
 }
 
-// VariantVisitor
 template <typename H>
 struct VariantVisitor {
   H&& hash_state;
@@ -932,8 +905,6 @@
 
 // -----------------------------------------------------------------------------
 
-// hash_range_or_bytes()
-//
 // Mixes all values in the range [data, data+size) into the hash state.
 // This overload accepts only uniquely-represented types, and hashes them by
 // hashing the entire range of bytes.
@@ -944,7 +915,6 @@
   return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);
 }
 
-// hash_range_or_bytes()
 template <typename H, typename T>
 typename std::enable_if<!is_uniquely_represented<T>::value, H>::type
 hash_range_or_bytes(H hash_state, const T* data, size_t size) {
@@ -977,8 +947,6 @@
 #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0
 #endif
 
-// HashSelect
-//
 // Type trait to select the appropriate hash implementation to use.
 // HashSelect::type<T> will give the proper hash implementation, to be invoked
 // as:
@@ -1075,7 +1043,6 @@
 struct is_hashable
     : std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
 
-// MixingHashState
 class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
   // absl::uint128 is not an alias or a thin wrapper around the intrinsic.
   // We use the intrinsic when available to improve performance.
@@ -1085,21 +1052,19 @@
   using uint128 = absl::uint128;
 #endif  // ABSL_HAVE_INTRINSIC_INT128
 
-  static constexpr uint64_t kMul =
-   uint64_t{0xdcb22ca68cb134ed};
-
   template <typename T>
   using IntegralFastPath =
       conjunction<std::is_integral<T>, is_uniquely_represented<T>,
                   FitsIn64Bits<T>>;
 
+  static constexpr uint64_t kMul =
+   uint64_t{0xdcb22ca68cb134ed};
+
  public:
   // Move only
   MixingHashState(MixingHashState&&) = default;
   MixingHashState& operator=(MixingHashState&&) = default;
 
-  // MixingHashState::combine_contiguous()
-  //
   // Fundamental base case for hash recursion: mixes the given range of bytes
   // into the hash state.
   static MixingHashState combine_contiguous(MixingHashState hash_state,
@@ -1111,8 +1076,6 @@
   }
   using MixingHashState::HashStateBase::combine_contiguous;
 
-  // MixingHashState::hash()
-  //
   // For performance reasons in non-opt mode, we specialize this for
   // integral types.
   // Otherwise we would be instantiating and calling dozens of functions for
@@ -1121,24 +1084,46 @@
   template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
   static size_t hash(T value) {
     return static_cast<size_t>(
-        WeakMix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));
+        Mix(Seed() ^ static_cast<std::make_unsigned_t<T>>(value), kMul));
   }
 
-  // Overload of MixingHashState::hash()
   template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
   static size_t hash(const T& value) {
     return static_cast<size_t>(combine(MixingHashState{}, value).state_);
   }
 
  private:
-  // Invoked only once for a given argument; that plus the fact that this is
-  // move-only ensures that there is only one non-moved-from object.
-  MixingHashState() : state_(Seed()) {}
-
   friend class MixingHashState::HashStateBase;
   template <typename H>
   friend H absl::hash_internal::hash_weakly_mixed_integer(H,
                                                           WeaklyMixedInteger);
+  // Allow the HashState type-erasure implementation to invoke
+  // RunCombinedUnordered() directly.
+  friend class absl::HashState;
+  friend struct CombineRaw;
+
+  // For use in Seed().
+  static const void* const kSeed;
+
+  // Invoked only once for a given argument; that plus the fact that this is
+  // move-only ensures that there is only one non-moved-from object.
+  MixingHashState() : state_(Seed()) {}
+
+  // Workaround for MSVC bug.
+  // We make the type copyable to fix the calling convention, even though we
+  // never actually copy it. Keep it private to not affect the public API of the
+  // type.
+  MixingHashState(const MixingHashState&) = default;
+
+  explicit MixingHashState(uint64_t state) : state_(state) {}
+
+  // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows
+  // us to be consistent with IntegralFastPath when combining raw types, but
+  // optimize Read1To3 and Read4To8 differently for the string case.
+  static MixingHashState combine_raw(MixingHashState hash_state,
+                                     uint64_t value) {
+    return MixingHashState(Mix(hash_state.state_ ^ value, kMul));
+  }
 
   static MixingHashState combine_weakly_mixed_integer(
       MixingHashState hash_state, WeaklyMixedInteger value) {
@@ -1168,27 +1153,6 @@
     return MixingHashState::combine(std::move(state), unordered_state);
   }
 
-  // Allow the HashState type-erasure implementation to invoke
-  // RunCombinedUnordered() directly.
-  friend class absl::HashState;
-  friend struct CombineRaw;
-
-  // Workaround for MSVC bug.
-  // We make the type copyable to fix the calling convention, even though we
-  // never actually copy it. Keep it private to not affect the public API of the
-  // type.
-  MixingHashState(const MixingHashState&) = default;
-
-  explicit MixingHashState(uint64_t state) : state_(state) {}
-
-  // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows
-  // us to be consistent with IntegralFastPath when combining raw types, but
-  // optimize Read1To3 and Read4To8 differently for the string case.
-  static MixingHashState combine_raw(MixingHashState hash_state,
-                                     uint64_t value) {
-    return MixingHashState(WeakMix(hash_state.state_, value));
-  }
-
   // Implementation of the base case for combine_contiguous where we actually
   // mix the bytes into the state.
   // Dispatch to different implementations of the combine_contiguous depending
@@ -1214,7 +1178,7 @@
       // Empty string must modify the state.
       v = 0x57;
     }
-    return WeakMix(state, v);
+    return Mix(state ^ v, kMul);
   }
 
   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t CombineContiguousImpl9to16(
@@ -1323,16 +1287,6 @@
     return Uint128High64(m) ^ Uint128Low64(m);
   }
 
-  // Slightly lower latency than Mix, but with lower quality. The byte swap
-  // helps ensure that low bits still have high quality.
-  ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t WeakMix(uint64_t lhs,
-                                                       uint64_t rhs) {
-    const uint64_t n = lhs ^ rhs;
-    // WeakMix doesn't work well on 32-bit platforms so just use Mix.
-    if constexpr (sizeof(size_t) < 8) return Mix(n, kMul);
-    return absl::gbswap_64(n * kMul);
-  }
-
   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
                                                       size_t len,
                                                       uint64_t state) {
@@ -1344,8 +1298,6 @@
 #endif
   }
 
-  // Seed()
-  //
   // A non-deterministic seed.
   //
   // The current purpose of this seed is to generate non-deterministic results
@@ -1371,12 +1323,10 @@
     return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
 #endif
   }
-  static const void* const kSeed;
 
   uint64_t state_;
 };
 
-// MixingHashState::CombineContiguousImpl()
 inline uint64_t MixingHashState::CombineContiguousImpl(
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 4> /* sizeof_size_t */) {
@@ -1396,7 +1346,6 @@
   return CombineLargeContiguousImpl32(first, len, state);
 }
 
-// Overload of MixingHashState::CombineContiguousImpl()
 inline uint64_t MixingHashState::CombineContiguousImpl(
     uint64_t state, const unsigned char* first, size_t len,
     std::integral_constant<int, 8> /* sizeof_size_t */) {
@@ -1426,8 +1375,6 @@
 
 struct AggregateBarrier {};
 
-// HashImpl
-
 // Add a private base class to make sure this type is not an aggregate.
 // Aggregates can be aggregate initialized even if the default constructor is
 // deleted.
@@ -1456,14 +1403,12 @@
                     values...);
 }
 
-// HashStateBase::combine_contiguous()
 template <typename H>
 template <typename T>
 H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
   return hash_internal::hash_range_or_bytes(std::move(state), data, size);
 }
 
-// HashStateBase::combine_unordered()
 template <typename H>
 template <typename I>
 H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
@@ -1471,7 +1416,6 @@
                                 CombineUnorderedCallback<I>{begin, end});
 }
 
-// HashStateBase::PiecewiseCombiner::add_buffer()
 template <typename H>
 H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
                                 size_t size) {
@@ -1504,7 +1448,6 @@
   return state;
 }
 
-// HashStateBase::PiecewiseCombiner::finalize()
 template <typename H>
 H PiecewiseCombiner::finalize(H state) {
   // Do not call combine_contiguous with empty remainder since it is modifying