diff --git a/absl/base/config.h b/absl/base/config.h
index 6015676..7514b86 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -466,6 +466,9 @@
 //
 // Checks the endianness of the platform.
 //
+// Prefer using `std::endian` in C++20, or `absl::endian` from
+// absl/numeric/bits.h prior to C++20.
+//
 // Notes: uses the built in endian macros provided by GCC (since 4.6) and
 // Clang (since 3.2); see
 // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h
index 943f3d9..e1a67f5 100644
--- a/absl/base/internal/endian.h
+++ b/absl/base/internal/endian.h
@@ -12,6 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+// This file is for Abseil internal use only.
+// See //absl/numeric/bits.h for supported functions related to endian-ness.
 
 #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
 #define ABSL_BASE_INTERNAL_ENDIAN_H_
@@ -28,44 +30,38 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-inline uint64_t gbswap_64(uint64_t host_int) {
+constexpr uint64_t gbswap_64(uint64_t x) {
 #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
-  return __builtin_bswap64(host_int);
-#elif defined(_MSC_VER)
-  return _byteswap_uint64(host_int);
+  return __builtin_bswap64(x);
 #else
-  return (((host_int & uint64_t{0xFF}) << 56) |
-          ((host_int & uint64_t{0xFF00}) << 40) |
-          ((host_int & uint64_t{0xFF0000}) << 24) |
-          ((host_int & uint64_t{0xFF000000}) << 8) |
-          ((host_int & uint64_t{0xFF00000000}) >> 8) |
-          ((host_int & uint64_t{0xFF0000000000}) >> 24) |
-          ((host_int & uint64_t{0xFF000000000000}) >> 40) |
-          ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+  return (((x & uint64_t{0xFF}) << 56) |
+          ((x & uint64_t{0xFF00}) << 40) |
+          ((x & uint64_t{0xFF0000}) << 24) |
+          ((x & uint64_t{0xFF000000}) << 8) |
+          ((x & uint64_t{0xFF00000000}) >> 8) |
+          ((x & uint64_t{0xFF0000000000}) >> 24) |
+          ((x & uint64_t{0xFF000000000000}) >> 40) |
+          ((x & uint64_t{0xFF00000000000000}) >> 56));
 #endif
 }
 
-inline uint32_t gbswap_32(uint32_t host_int) {
+constexpr uint32_t gbswap_32(uint32_t x) {
 #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
-  return __builtin_bswap32(host_int);
-#elif defined(_MSC_VER)
-  return _byteswap_ulong(host_int);
+  return __builtin_bswap32(x);
 #else
-  return (((host_int & uint32_t{0xFF}) << 24) |
-          ((host_int & uint32_t{0xFF00}) << 8) |
-          ((host_int & uint32_t{0xFF0000}) >> 8) |
-          ((host_int & uint32_t{0xFF000000}) >> 24));
+  return (((x & uint32_t{0xFF}) << 24) |
+          ((x & uint32_t{0xFF00}) << 8) |
+          ((x & uint32_t{0xFF0000}) >> 8) |
+          ((x & uint32_t{0xFF000000}) >> 24));
 #endif
 }
 
-inline uint16_t gbswap_16(uint16_t host_int) {
+constexpr uint16_t gbswap_16(uint16_t x) {
 #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
-  return __builtin_bswap16(host_int);
-#elif defined(_MSC_VER)
-  return _byteswap_ushort(host_int);
+  return __builtin_bswap16(x);
 #else
-  return (((host_int & uint16_t{0xFF}) << 8) |
-          ((host_int & uint16_t{0xFF00}) >> 8));
+  return (((x & uint16_t{0xFF}) << 8) |
+          ((x & uint16_t{0xFF00}) >> 8));
 #endif
 }
 
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index d92f724..f1026a5 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -756,6 +756,7 @@
         "//absl/log:check",
         "//absl/memory",
         "//absl/meta:type_traits",
+        "//absl/numeric:int128",
         "//absl/strings",
         "//absl/types:optional",
         "@googletest//:gtest",
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 9305ace..1419df7 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -812,6 +812,7 @@
     absl::hash_policy_testing
     absl::hashtable_debug
     absl::hashtablez_sampler
+    absl::int128
     absl::log
     absl::memory
     absl::node_hash_set
diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc
index 0d26fd4..d0dac37 100644
--- a/absl/container/btree_benchmark.cc
+++ b/absl/container/btree_benchmark.cc
@@ -735,7 +735,7 @@
 
 BIG_TYPE_PTR_BENCHMARKS(32);
 
-void BM_BtreeSet_IteratorSubtraction(benchmark::State& state) {
+void BM_BtreeSet_IteratorDifference(benchmark::State& state) {
   absl::InsecureBitGen bitgen;
   std::vector<int> vec;
   // Randomize the set's insertion order so the nodes aren't all full.
@@ -756,6 +756,52 @@
   }
 }
 
+BENCHMARK(BM_BtreeSet_IteratorDifference)->Range(1 << 10, 1 << 20);
+
+void BM_BtreeSet_IteratorAddition(benchmark::State& state) {
+  absl::InsecureBitGen bitgen;
+  std::vector<int> vec;
+  // Randomize the set's insertion order so the nodes aren't all full.
+  vec.reserve(static_cast<size_t>(state.range(0)));
+  for (int i = 0; i < state.range(0); ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  size_t distance = absl::Uniform(bitgen, 0u, set.size());
+  while (state.KeepRunningBatch(distance)) {
+    // Let the increment go all the way to the `end` iterator.
+    const size_t begin =
+        absl::Uniform(absl::IntervalClosed, bitgen, 0u, set.size() - distance);
+    auto it = set.find(static_cast<int>(begin));
+    benchmark::DoNotOptimize(it += static_cast<int>(distance));
+    distance = absl::Uniform(bitgen, 0u, set.size());
+  }
+}
+
+BENCHMARK(BM_BtreeSet_IteratorAddition)->Range(1 << 10, 1 << 20);
+
+void BM_BtreeSet_IteratorSubtraction(benchmark::State& state) {
+  absl::InsecureBitGen bitgen;
+  std::vector<int> vec;
+  // Randomize the set's insertion order so the nodes aren't all full.
+  vec.reserve(static_cast<size_t>(state.range(0)));
+  for (int i = 0; i < state.range(0); ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  size_t distance = absl::Uniform(bitgen, 0u, set.size());
+  while (state.KeepRunningBatch(distance)) {
+    size_t end = absl::Uniform(bitgen, distance, set.size());
+    auto it = set.find(static_cast<int>(end));
+    benchmark::DoNotOptimize(it -= static_cast<int>(distance));
+    distance = absl::Uniform(bitgen, 0u, set.size());
+  }
+}
+
 BENCHMARK(BM_BtreeSet_IteratorSubtraction)->Range(1 << 10, 1 << 20);
 
 }  // namespace
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 470de2a..32a82ef 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -47,8 +47,10 @@
 // iterator at the current position. Another important difference is that
 // key-types must be copy-constructible.
 //
-// Another API difference is that btree iterators can be subtracted, and this
-// is faster than using std::distance.
+// There are other API differences: first, btree iterators can be subtracted,
+// and this is faster than using `std::distance`. Additionally, btree
+// iterators can be advanced via `operator+=` and `operator-=`, which is faster
+// than using `std::advance`.
 //
 // B-tree maps are not exception-safe.
 
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index e57d6d9..16181de 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -46,8 +46,10 @@
 // reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
 // iterator at the current position.
 //
-// Another API difference is that btree iterators can be subtracted, and this
-// is faster than using std::distance.
+// There are other API differences: first, btree iterators can be subtracted,
+// and this is faster than using `std::distance`. Additionally, btree
+// iterators can be advanced via `operator+=` and `operator-=`, which is faster
+// than using `std::advance`.
 //
 // B-tree sets are not exception-safe.
 
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index c398922..1d2c2a6 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -3351,7 +3351,7 @@
   set.insert(0);
 }
 
-TEST(Btree, IteratorSubtraction) {
+TEST(Btree, IteratorDifference) {
   absl::BitGen bitgen;
   std::vector<int> vec;
   // Randomize the set's insertion order so the nodes aren't all full.
@@ -3369,6 +3369,94 @@
   }
 }
 
+TEST(Btree, IteratorAddition) {
+  absl::BitGen bitgen;
+  std::vector<int> vec;
+
+  // Randomize the set's insertion order so the nodes aren't all full.
+  constexpr int kSetSize = 1000000;
+  for (int i = 0; i < kSetSize; ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  for (int i = 0; i < 1000; ++i) {
+    int begin = absl::Uniform(bitgen, 0, kSetSize);
+    int end = absl::Uniform(bitgen, begin, kSetSize);
+    ASSERT_LE(begin, end);
+
+    auto it = set.find(begin);
+    it += end - begin;
+    ASSERT_EQ(it, set.find(end)) << end;
+
+    it += begin - end;
+    ASSERT_EQ(it, set.find(begin)) << begin;
+  }
+}
+
+TEST(Btree, IteratorAdditionOutOfBounds) {
+  const absl::btree_set<int> set({5});
+
+  auto it = set.find(5);
+
+  auto forward = it;
+  forward += 1;
+  EXPECT_EQ(forward, set.end());
+
+  auto backward = it;
+  EXPECT_EQ(backward, set.begin());
+
+  if (IsAssertEnabled()) {
+    EXPECT_DEATH(forward += 1, "n == 0");
+    EXPECT_DEATH(backward += -1, "position >= node->start");
+  }
+}
+
+TEST(Btree, IteratorSubtraction) {
+  absl::BitGen bitgen;
+  std::vector<int> vec;
+
+  // Randomize the set's insertion order so the nodes aren't all full.
+  constexpr int kSetSize = 1000000;
+  for (int i = 0; i < kSetSize; ++i) vec.push_back(i);
+  absl::c_shuffle(vec, bitgen);
+
+  absl::btree_set<int> set;
+  for (int i : vec) set.insert(i);
+
+  for (int i = 0; i < 1000; ++i) {
+    int begin = absl::Uniform(bitgen, 0, kSetSize);
+    int end = absl::Uniform(bitgen, begin, kSetSize);
+    ASSERT_LE(begin, end);
+
+    auto it = set.find(end);
+    it -= end - begin;
+    ASSERT_EQ(it, set.find(begin)) << begin;
+
+    it -= begin - end;
+    ASSERT_EQ(it, set.find(end)) << end;
+  }
+}
+
+TEST(Btree, IteratorSubtractionOutOfBounds) {
+  const absl::btree_set<int> set({5});
+
+  auto it = set.find(5);
+
+  auto backward = it;
+  EXPECT_EQ(backward, set.begin());
+
+  auto forward = it;
+  forward -= -1;
+  EXPECT_EQ(forward, set.end());
+
+  if (IsAssertEnabled()) {
+    EXPECT_DEATH(backward -= 1, "position >= node->start");
+    EXPECT_DEATH(forward -= -1, "n == 0");
+  }
+}
+
 TEST(Btree, DereferencingEndIterator) {
   if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
 
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index b5d0f7f..e1fd2a3 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -62,7 +62,7 @@
 // Its interface is similar to that of `std::unordered_set<T>` with the
 // following notable differences:
 //
-// * Requires keys that are CopyConstructible
+// * Requires keys that are MoveConstructible
 // * Supports heterogeneous lookup, through `find()` and `insert()`, provided
 //   that the set is provided a compatible heterogeneous hashing function and
 //   equality operator. See below for details.
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index 68ea7a0..bb90efa 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -351,6 +351,38 @@
   }
 }
 
+class MoveOnlyInt {
+ public:
+  explicit MoveOnlyInt(int value) : value_(value) {}
+
+  MoveOnlyInt(const MoveOnlyInt& other) = delete;
+  MoveOnlyInt& operator=(const MoveOnlyInt& other) = delete;
+
+  MoveOnlyInt(MoveOnlyInt&& other) = default;
+  MoveOnlyInt& operator=(MoveOnlyInt&& other) = default;
+
+  bool operator==(const MoveOnlyInt& other) const {
+    return value_ == other.value_;
+  }
+  bool operator==(int other) const { return value_ == other; }
+
+ private:
+  template <typename H>
+  friend H AbslHashValue(H h, const MoveOnlyInt& m) {
+    return H::combine(std::move(h), m.value_);
+  }
+
+  int value_;
+};
+
+TEST(FlatHashSet, MoveOnlyKey) {
+  flat_hash_set<MoveOnlyInt> s;
+  s.insert(MoveOnlyInt(1));
+  s.insert(MoveOnlyInt(2));
+  s.insert(MoveOnlyInt(3));
+  EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3));
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index 689e71a..a742829 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -60,6 +60,7 @@
 #include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
+#include "absl/base/optimization.h"
 #include "absl/container/internal/common.h"
 #include "absl/container/internal/common_policy_traits.h"
 #include "absl/container/internal/compressed_tuple.h"
@@ -708,6 +709,8 @@
   }
 
   // Getter for the parent of this node.
+  // TODO(ezb): assert that the child of the returned node at position
+  // `node_->position()` maps to the current node.
   btree_node *parent() const { return *GetField<0>(); }
   // Getter for whether the node is the root of the tree. The parent of the
   // root of the tree is the leftmost node in the tree which is guaranteed to
@@ -1175,6 +1178,26 @@
     return distance_slow(other);
   }
 
+  // Advances the iterator by `n`. Values of `n` must not result in going past
+  // the `end` iterator (for a positive `n`) or before the `begin` iterator (for
+  // a negative `n`).
+  btree_iterator &operator+=(difference_type n) {
+    assert_valid_generation(node_);
+    if (n == 0) return *this;
+    if (n < 0) return decrement_n_slow(-n);
+    return increment_n_slow(n);
+  }
+
+  // Moves the iterator by `n` positions backwards. Values of `n` must not
+  // result in going before the `begin` iterator (for a positive `n`) or past
+  // the `end` iterator (for a negative `n`).
+  btree_iterator &operator-=(difference_type n) {
+    assert_valid_generation(node_);
+    if (n == 0) return *this;
+    if (n < 0) return increment_n_slow(-n);
+    return decrement_n_slow(n);
+  }
+
   // Accessors for the key/value the iterator is pointing at.
   reference operator*() const {
     ABSL_HARDENING_ASSERT(node_ != nullptr);
@@ -1277,6 +1300,7 @@
     increment_slow();
   }
   void increment_slow();
+  btree_iterator &increment_n_slow(difference_type n);
 
   void decrement() {
     assert_valid_generation(node_);
@@ -1286,6 +1310,7 @@
     decrement_slow();
   }
   void decrement_slow();
+  btree_iterator &decrement_n_slow(difference_type n);
 
   const key_type &key() const {
     return node_->key(static_cast<size_type>(position_));
@@ -2172,6 +2197,80 @@
   }
 }
 
+template <typename N, typename R, typename P>
+btree_iterator<N, R, P> &btree_iterator<N, R, P>::increment_n_slow(
+    difference_type n) {
+  N *node = node_;
+  int position = position_;
+  ABSL_ASSUME(n > 0);
+  while (n > 0) {
+    if (node->is_leaf()) {
+      if (position + n < node->finish()) {
+        position += n;
+        break;
+      } else {
+        n -= node->finish() - position;
+        position = node->finish();
+        btree_iterator save = {node, position};
+        while (position == node->finish() && !node->is_root()) {
+          position = node->position();
+          node = node->parent();
+        }
+        if (position == node->finish()) {
+          ABSL_HARDENING_ASSERT(n == 0);
+          return *this = save;
+        }
+      }
+    } else {
+      --n;
+      assert(position < node->finish());
+      node = node->child(static_cast<field_type>(position + 1));
+      while (node->is_internal()) {
+        node = node->start_child();
+      }
+      position = node->start();
+    }
+  }
+  node_ = node;
+  position_ = position;
+  return *this;
+}
+
+template <typename N, typename R, typename P>
+btree_iterator<N, R, P> &btree_iterator<N, R, P>::decrement_n_slow(
+    difference_type n) {
+  N *node = node_;
+  int position = position_;
+  ABSL_ASSUME(n > 0);
+  while (n > 0) {
+    if (node->is_leaf()) {
+      if (position - n >= node->start()) {
+        position -= n;
+        break;
+      } else {
+        n -= 1 + position - node->start();
+        position = node->start() - 1;
+        while (position < node->start() && !node->is_root()) {
+          position = node->position() - 1;
+          node = node->parent();
+        }
+        ABSL_HARDENING_ASSERT(position >= node->start());
+      }
+    } else {
+      --n;
+      assert(position >= node->start());
+      node = node->child(static_cast<field_type>(position));
+      while (node->is_internal()) {
+        node = node->child(node->finish());
+      }
+      position = node->finish() - 1;
+    }
+  }
+  node_ = node;
+  position_ = position;
+  return *this;
+}
+
 ////
 // btree methods
 template <typename P>
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index c3edf54..01b334e 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -386,20 +386,6 @@
 
   using Tuple = CompressedTuple<int, double, CompressedTuple<int>, Empty<0>>;
 
-  constexpr int r0 =
-      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<0>();
-  constexpr double r1 =
-      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<1>();
-  constexpr int r2 =
-      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<2>().get<0>();
-  constexpr CallType r3 =
-      AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<3>().value();
-
-  EXPECT_EQ(r0, 1);
-  EXPECT_EQ(r1, 0.75);
-  EXPECT_EQ(r2, 9);
-  EXPECT_EQ(r3, CallType::kMutableRef);
-
   constexpr Tuple x(7, 1.25, CompressedTuple<int>(5), {});
   constexpr int x0 = x.get<0>();
   constexpr double x1 = x.get<1>();
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index 2f05f27..58c8d4f 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -192,7 +192,6 @@
 #include <typeinfo>
 #include <utility>
 
-#include "absl/base/attributes.h"
 #include "absl/base/config.h"
 #include "absl/debugging/internal/demangle.h"
 #include "absl/meta/type_traits.h"
@@ -591,10 +590,10 @@
   //
   // Requires: `p` is aligned to `Alignment()`.
   //
-  // Note: We mark the parameter as unused because GCC detects it is not used
-  // when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
+  // Note: We mark the parameter as maybe_unused because GCC detects it is not
+  // used when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
   template <class Char>
-  auto Slices(ABSL_ATTRIBUTE_UNUSED Char* p) const {
+  auto Slices([[maybe_unused]] Char* p) const {
     return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
         Slice<SizeSeq>(p)...);
   }
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index ffc6d80..3b894a5 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -69,15 +69,20 @@
 
 namespace {
 
+[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
+  ABSL_RAW_LOG(FATAL, "Hash table size overflow");
+}
+
+void ValidateMaxSize(size_t size, size_t slot_size) {
+  if (IsAboveValidSize(size, slot_size)) {
+    HashTableSizeOverflow();
+  }
+}
+
 // Returns "random" seed.
 inline size_t RandomSeed() {
 #ifdef ABSL_HAVE_THREAD_LOCAL
   static thread_local size_t counter = 0;
-  // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
-  // accessing thread local storage data from loaded libraries
-  // (https://github.com/google/sanitizers/issues/1265), for this reason counter
-  // needs to be annotated as initialized.
-  ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
   size_t value = ++counter;
 #else   // ABSL_HAVE_THREAD_LOCAL
   static std::atomic<size_t> counter(0);
@@ -86,24 +91,22 @@
   return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
 }
 
-bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) {
+bool ShouldRehashForBugDetection(PerTableSeed seed, size_t capacity) {
   // Note: we can't use the abseil-random library because abseil-random
   // depends on swisstable. We want to return true with probability
   // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
   // we probe based on a random hash and see if the offset is less than
   // RehashProbabilityConstant().
-  return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+  return probe(seed, capacity, absl::HashOf(RandomSeed())).offset() <
          RehashProbabilityConstant();
 }
 
 // Find a non-deterministic hash for single group table.
 // Last two bits are used to find a position for a newly inserted element after
 // resize.
-// This function is mixing all bits of hash and control pointer to maximize
-// entropy.
-size_t SingleGroupTableH1(size_t hash, ctrl_t* control) {
-  return static_cast<size_t>(absl::popcount(
-      hash ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(control))));
+// This function is mixing all bits of hash and seed to maximize entropy.
+size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
+  return static_cast<size_t>(absl::popcount(hash ^ seed.seed()));
 }
 
 // Returns the address of the slot `i` iterations after `slot` assuming each
@@ -132,26 +135,23 @@
 }
 
 bool CommonFieldsGenerationInfoEnabled::
-    should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+    should_rehash_for_bug_detection_on_insert(PerTableSeed seed,
                                               size_t capacity) const {
-  // As an optimization, we avoid calling ShouldRehashForBugDetection during the
-  // first two insertions. In these cases, we'll end up rehashing anyways.
-  if (capacity == 0 || (capacity == 1 && IsFull(ctrl[0]))) return false;
   if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
   if (reserved_growth_ > 0) return false;
-  return ShouldRehashForBugDetection(ctrl, capacity);
+  return ShouldRehashForBugDetection(seed, capacity);
 }
 
 bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
-    const ctrl_t* ctrl, size_t capacity) const {
-  return ShouldRehashForBugDetection(ctrl, capacity);
+    PerTableSeed seed, size_t capacity) const {
+  return ShouldRehashForBugDetection(seed, capacity);
 }
 
 bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
-                                   const ctrl_t* ctrl) {
+                                   PerTableSeed seed) {
   // To avoid problems with weak hashes and single bit tests, we use % 13.
   // TODO(kfm,sbenza): revisit after we do unconditional mixing
-  return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+  return !is_small(capacity) && (H1(hash, seed) ^ RandomSeed()) % 13 > 6;
 }
 
 void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
@@ -207,7 +207,7 @@
   // index 1 occupied, so we need to insert either at index 0 or index 2.
   static_assert(SooSlotIndex() == 1, "");
   PrepareInsertCommon(common);
-  const size_t offset = SingleGroupTableH1(hash, common.control()) & 2;
+  const size_t offset = SingleGroupTableH1(hash, common.seed()) & 2;
   common.growth_info().OverwriteEmptyAsFull();
   SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
   common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
@@ -428,8 +428,8 @@
 
 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
                        void* alloc, bool reuse, bool soo_enabled) {
-  c.set_size(0);
   if (reuse) {
+    c.set_size_to_zero();
     assert(!soo_enabled || c.capacity() > SooCapacity());
     ResetCtrl(c, policy.slot_size);
     ResetGrowthLeft(c);
@@ -459,11 +459,15 @@
   }
 }
 
-template <bool kGuaranteedEmpty, bool kGuaranteedAllocated>
+enum class ResizeNonSooMode {
+  kGuaranteedEmpty,
+  kGuaranteedAllocated,
+};
+
+template <ResizeNonSooMode kMode>
 void ResizeNonSooImpl(CommonFields& common, size_t new_capacity,
                       HashtablezInfoHandle infoz,
                       const PolicyFunctions& policy) {
-  static_assert(!kGuaranteedEmpty || !kGuaranteedAllocated);
   assert(IsValidCapacity(new_capacity));
   assert(new_capacity > policy.soo_capacity);
 
@@ -484,25 +488,25 @@
       reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
   common.set_generation(NextGeneration(old_generation));
 
-  common.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
+  common.set_control</*kGenerateSeed=*/true>(
+      reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
   common.set_slots(mem + layout.slot_offset());
 
   size_t total_probe_length = 0;
   ResetCtrl(common, slot_size);
-  assert(!kGuaranteedEmpty || old_capacity == policy.soo_capacity);
-  assert(!kGuaranteedAllocated || old_capacity > 0);
-  if constexpr (!kGuaranteedEmpty) {
-    if (kGuaranteedAllocated || old_capacity > 0) {
-      total_probe_length = policy.find_new_positions_and_transfer_slots(
-          common, old_ctrl, old_slots, old_capacity);
-      (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
-                        has_infoz);
-    }
+  assert(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
+         old_capacity == policy.soo_capacity);
+  assert(kMode != ResizeNonSooMode::kGuaranteedAllocated || old_capacity > 0);
+  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
+    total_probe_length = policy.find_new_positions_and_transfer_slots(
+        common, old_ctrl, old_slots, old_capacity);
+    (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
+                      has_infoz);
   }
 
   ResetGrowthLeft(common);
-  common.set_has_infoz(has_infoz);
   if (has_infoz) {
+    common.set_has_infoz();
     infoz.RecordStorageChanged(common.size(), new_capacity);
     infoz.RecordRehash(total_probe_length);
     common.set_infoz(infoz);
@@ -518,9 +522,6 @@
   assert(common.capacity() <= policy.soo_capacity);
   assert(common.empty());
   const size_t slot_size = policy.slot_size;
-  if (ABSL_PREDICT_FALSE(new_capacity > MaxValidCapacity(slot_size))) {
-    HashTableSizeOverflow();
-  }
   HashtablezInfoHandle infoz;
   const bool should_sample =
       policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
@@ -528,8 +529,8 @@
     infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
                             policy.soo_capacity);
   }
-  ResizeNonSooImpl</*kGuaranteedEmpty=*/true, /*kGuaranteedAllocated=*/false>(
-      common, new_capacity, infoz, policy);
+  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, new_capacity,
+                                                       infoz, policy);
 }
 
 // If the table was SOO, initializes new control bytes and transfers slot.
@@ -544,13 +545,14 @@
   assert(policy.soo_capacity == SooCapacity());
   size_t new_capacity = c.capacity();
 
-  size_t offset = probe(new_ctrl, new_capacity, hash).offset();
+  c.generate_new_seed();
+  size_t offset = probe(c.seed(), new_capacity, hash).offset();
   offset = offset == new_capacity ? 0 : offset;
   SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
   void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
   SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
   policy.transfer(&c, target_slot, c.soo_data(), 1);
-  c.set_control(new_ctrl);
+  c.set_control</*kGenerateSeed=*/false>(new_ctrl);
   c.set_slots(new_slots);
   ResetCtrl(c, policy.slot_size);
   SetCtrl(c, offset, H2(hash), policy.slot_size);
@@ -608,7 +610,7 @@
                                             new_slots, policy);
   ResetGrowthLeft(common);
   if (has_infoz) {
-    common.set_has_infoz(has_infoz);
+    common.set_has_infoz();
     common.set_infoz(infoz);
   }
 }
@@ -776,7 +778,7 @@
 
   ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
   void* new_slots = mem + layout.slot_offset();
-  common.set_control(new_ctrl);
+  common.set_control</*kGenerateSeed=*/false>(new_ctrl);
   common.set_slots(new_slots);
 
   h2_t new_h2 = H2(new_hash);
@@ -785,6 +787,7 @@
   if (old_capacity == 0) {
     static_assert(NextCapacity(0) == 1);
     InitializeSingleElementControlBytes(new_h2, new_ctrl);
+    common.generate_new_seed();
     find_info = FindInfo{0, 0};
   } else {
     if (is_single_group(new_capacity)) {
@@ -798,8 +801,9 @@
       PoisonEmptySlots(common, slot_size);
       // We put the new element either at the beginning or at the end of the
       // table with approximately equal probability.
-      size_t offset =
-          SingleGroupTableH1(new_hash, new_ctrl) & 1 ? 0 : new_capacity - 1;
+      size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 1
+                          ? 0
+                          : new_capacity - 1;
 
       assert(IsEmpty(new_ctrl[offset]));
       SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
@@ -818,8 +822,8 @@
   PrepareInsertCommon(common);
   ResetGrowthLeft(common);
 
-  common.set_has_infoz(has_infoz);
   if (has_infoz) {
+    common.set_has_infoz();
     infoz.RecordStorageChanged(common.size() - 1, new_capacity);
     infoz.RecordRehash(total_probe_length);
     infoz.RecordInsert(new_hash, find_info.probe_length);
@@ -920,16 +924,34 @@
   return &common;
 }
 
-void ResizeAllocatedTable(CommonFields& common, size_t new_capacity,
-                          const PolicyFunctions& policy) {
-  ResizeNonSooImpl</*kGuaranteedEmpty=*/false, /*kGuaranteedAllocated=*/true>(
+void ResizeAllocatedTableWithSeedChange(CommonFields& common,
+                                        size_t new_capacity,
+                                        const PolicyFunctions& policy) {
+  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
       common, new_capacity, common.infoz(), policy);
 }
 
-void ResizeEmptyNonAllocatedTable(CommonFields& common, size_t new_capacity,
-                                  const PolicyFunctions& policy) {
-  ResizeEmptyNonAllocatedTableImpl(common, new_capacity, /*force_infoz=*/false,
-                                   policy);
+
+void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common,
+                                               size_t new_size,
+                                               const PolicyFunctions& policy) {
+  ValidateMaxSize(new_size, policy.slot_size);
+  ResizeEmptyNonAllocatedTableImpl(
+      common, NormalizeCapacity(GrowthToLowerboundCapacity(new_size)),
+      /*force_infoz=*/false, policy);
+  // This is after resize, to ensure that we have completed the allocation
+  // and have potentially sampled the hashtable.
+  common.infoz().RecordReservation(new_size);
+  common.reset_reserved_growth(new_size);
+  common.set_reservation_size(new_size);
+}
+
+void ReserveEmptyNonAllocatedTableToFitBucketCount(
+    CommonFields& common, size_t bucket_count, const PolicyFunctions& policy) {
+  size_t new_capacity = NormalizeCapacity(bucket_count);
+  ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);
+  ResizeEmptyNonAllocatedTableImpl(common, new_capacity,
+                                   /*force_infoz=*/false, policy);
 }
 
 void GrowEmptySooTableToNextCapacityForceSampling(
@@ -970,7 +992,8 @@
         static constexpr size_t kInitialSampledCapacity =
             NextCapacity(SooCapacity());
         if (cap > kInitialSampledCapacity) {
-          ResizeAllocatedTable(common, kInitialSampledCapacity, policy);
+          ResizeAllocatedTableWithSeedChange(common, kInitialSampledCapacity,
+                                             policy);
         }
         // This asserts that we didn't lose sampling coverage in `resize`.
         assert(common.infoz().IsSampled());
@@ -978,7 +1001,7 @@
       }
       assert(slot_size <= sizeof(HeapOrSoo));
       assert(policy.slot_align <= alignof(HeapOrSoo));
-      HeapOrSoo tmp_slot;
+      HeapOrSoo tmp_slot(uninitialized_tag_t{});
       size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
       policy.transfer(&common, &tmp_slot,
                       SlotAddress(common.slot_array(), begin_offset, slot_size),
@@ -992,22 +1015,21 @@
 
   // bitor is a faster way of doing `max` here. We will round up to the next
   // power-of-2-minus-1, so bitor is good enough.
-  const size_t new_capacity =
-      NormalizeCapacity(n | GrowthToLowerboundCapacity(common.size()));
+  size_t new_size = n | GrowthToLowerboundCapacity(common.size());
+  ValidateMaxSize(n, policy.slot_size);
+  const size_t new_capacity = NormalizeCapacity(new_size);
   // n == 0 unconditionally rehashes as per the standard.
   if (n == 0 || new_capacity > cap) {
-    if (ABSL_PREDICT_FALSE(new_capacity > MaxValidCapacity(slot_size))) {
-      HashTableSizeOverflow();
-    }
     if (cap == policy.soo_capacity) {
       if (common.empty()) {
-        ResizeEmptyNonAllocatedTable(common, new_capacity, policy);
+        ResizeEmptyNonAllocatedTableImpl(common, new_capacity,
+                                         /*force_infoz=*/false, policy);
       } else {
         ResizeFullSooTable(common, new_capacity,
                            ResizeFullSooTableSamplingMode::kNoSampling, policy);
       }
     } else {
-      ResizeAllocatedTable(common, new_capacity, policy);
+      ResizeAllocatedTableWithSeedChange(common, new_capacity, policy);
     }
     // This is after resize, to ensure that we have completed the allocation
     // and have potentially sampled the hashtable.
@@ -1017,6 +1039,9 @@
 
 void ReserveAllocatedTable(CommonFields& common, size_t n,
                            const PolicyFunctions& policy) {
+  common.reset_reserved_growth(n);
+  common.set_reservation_size(n);
+
   const size_t cap = common.capacity();
   assert(!common.empty() || cap > policy.soo_capacity);
   assert(cap > 0);
@@ -1026,17 +1051,15 @@
   if (n <= max_size_before_growth) {
     return;
   }
+  ValidateMaxSize(n, policy.slot_size);
   const size_t new_capacity = NormalizeCapacity(GrowthToLowerboundCapacity(n));
-  if (ABSL_PREDICT_FALSE(new_capacity > MaxValidCapacity(policy.slot_size))) {
-    HashTableSizeOverflow();
-  }
   if (cap == policy.soo_capacity) {
     assert(!common.empty());
     ResizeFullSooTable(common, new_capacity,
                        ResizeFullSooTableSamplingMode::kNoSampling, policy);
   } else {
     assert(cap > policy.soo_capacity);
-    ResizeAllocatedTable(common, new_capacity, policy);
+    ResizeAllocatedTableWithSeedChange(common, new_capacity, policy);
   }
   common.infoz().RecordReservation(n);
 }
@@ -1050,7 +1073,7 @@
   if (rehash_for_bug_detection) {
     // Move to a different heap allocation in order to detect bugs.
     const size_t cap = common.capacity();
-    ResizeAllocatedTable(
+    ResizeAllocatedTableWithSeedChange(
         common, common.growth_left() > 0 ? cap : NextCapacity(cap), policy);
     target = find_first_non_full(common, hash);
   }
@@ -1069,9 +1092,18 @@
   return target.offset;
 }
 
-void HashTableSizeOverflow() {
-  ABSL_RAW_LOG(FATAL, "Hash table size overflow");
-}
+template void GrowFullSooTableToNextCapacity<0, false>(CommonFields&, size_t,
+                                                       const PolicyFunctions&);
+template void GrowFullSooTableToNextCapacity<1, true>(CommonFields&, size_t,
+                                                      const PolicyFunctions&);
+template void GrowFullSooTableToNextCapacity<4, true>(CommonFields&, size_t,
+                                                      const PolicyFunctions&);
+template void GrowFullSooTableToNextCapacity<8, true>(CommonFields&, size_t,
+                                                      const PolicyFunctions&);
+#if UINTPTR_MAX == UINT64_MAX
+template void GrowFullSooTableToNextCapacity<16, true>(CommonFields&, size_t,
+                                                       const PolicyFunctions&);
+#endif
 
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index d8429d6..599718b 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -424,18 +424,131 @@
   return *generation == SentinelEmptyGeneration();
 }
 
+// We only allow a maximum of 1 SOO element, which makes the implementation
+// much simpler. Complications with multiple SOO elements include:
+// - Satisfying the guarantee that erasing one element doesn't invalidate
+//   iterators to other elements means we would probably need actual SOO
+//   control bytes.
+// - In order to prevent user code from depending on iteration order for small
+//   tables, we would need to randomize the iteration order somehow.
+constexpr size_t SooCapacity() { return 1; }
+// Sentinel type to indicate SOO CommonFields construction.
+struct soo_tag_t {};
+// Sentinel type to indicate SOO CommonFields construction with full size.
+struct full_soo_tag_t {};
+// Sentinel type to indicate non-SOO CommonFields construction.
+struct non_soo_tag_t {};
+// Sentinel value to indicate an uninitialized value explicitly.
+struct uninitialized_tag_t {};
+// Sentinel value to indicate creation of an empty table without a seed.
+struct no_seed_empty_tag_t {};
+
+// Per table hash salt. This gets mixed into H1 to randomize iteration order
+// per-table.
+// The seed is needed to ensure non-determinism of iteration order.
+class PerTableSeed {
+ public:
+  // The number of bits in the seed.
+  // It is big enough to ensure non-determinism of iteration order.
+  // We store the seed inside a uint64_t together with size and other metadata.
+  static constexpr size_t kBitCount = 19;
+
+  // Returns the seed for the table. Only the lowest kBitCount are non zero.
+  size_t seed() const { return seed_; }
+
+ private:
+  friend class HashtableSize;
+  explicit PerTableSeed(size_t seed) : seed_(seed) {}
+
+  const size_t seed_;
+};
+
+// Returns next seed base number that is used for generating per-table seeds.
+// Only the lowest PerTableSeed::kBitCount bits are used for actual hash table
+// seed.
+inline size_t NextSeedBaseNumber() {
+  thread_local size_t seed = reinterpret_cast<uintptr_t>(&seed);
+  if constexpr (sizeof(size_t) == 4) {
+    seed += uint32_t{0xcc9e2d51};
+  } else {
+    seed += uint64_t{0xdcb22ca68cb134ed};
+  }
+  return seed;
+}
+
+// The size and also has additionally
+// 1) one bit that stores whether we have infoz.
+// 2) PerTableSeed::kBitCount bits for the seed.
+class HashtableSize {
+ public:
+  static constexpr size_t kSizeBitCount = 64 - PerTableSeed::kBitCount - 1;
+
+  explicit HashtableSize(uninitialized_tag_t) {}
+  explicit HashtableSize(no_seed_empty_tag_t) : data_(0) {}
+  explicit HashtableSize(full_soo_tag_t) : data_(kSizeOneNoMetadata) {}
+
+  // Returns actual size of the table.
+  size_t size() const { return static_cast<size_t>(data_ >> kSizeShift); }
+  void increment_size() { data_ += kSizeOneNoMetadata; }
+  void increment_size(size_t size) {
+    data_ += static_cast<uint64_t>(size) * kSizeOneNoMetadata;
+  }
+  void decrement_size() { data_ -= kSizeOneNoMetadata; }
+  // Returns true if the table is empty.
+  bool empty() const { return data_ < kSizeOneNoMetadata; }
+  // Sets the size to zero, but keeps all the metadata bits.
+  void set_size_to_zero_keep_metadata() { data_ = data_ & kMetadataMask; }
+
+  PerTableSeed seed() const {
+    return PerTableSeed(static_cast<size_t>(data_) & kSeedMask);
+  }
+
+  void generate_new_seed() {
+    size_t seed = NextSeedBaseNumber();
+    data_ = (data_ & ~kSeedMask) | (seed & kSeedMask);
+  }
+
+  // Returns true if the table has infoz.
+  bool has_infoz() const {
+    return ABSL_PREDICT_FALSE((data_ & kHasInfozMask) != 0);
+  }
+
+  // Sets the has_infoz bit.
+  void set_has_infoz() { data_ |= kHasInfozMask; }
+
+ private:
+  static constexpr size_t kSizeShift = 64 - kSizeBitCount;
+  static constexpr uint64_t kSizeOneNoMetadata = uint64_t{1} << kSizeShift;
+  static constexpr uint64_t kMetadataMask = kSizeOneNoMetadata - 1;
+  static constexpr size_t kSeedMask =
+      (size_t{1} << PerTableSeed::kBitCount) - 1;
+  // The next bit after the seed.
+  static constexpr uint64_t kHasInfozMask = kSeedMask + 1;
+  uint64_t data_;
+};
+
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table seed.
+inline size_t H1(size_t hash, PerTableSeed seed) {
+  return (hash >> 7) ^ seed.seed();
+}
+
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
+
 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
 // randomize insertion order within groups.
 bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
-                                   const ctrl_t* ctrl);
+                                   PerTableSeed seed);
 
 ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
     ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
-    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+    ABSL_ATTRIBUTE_UNUSED PerTableSeed seed) {
 #if defined(NDEBUG)
   return false;
 #else
-  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
+  return ShouldInsertBackwardsForDebug(capacity, hash, seed);
 #endif
 }
 
@@ -448,37 +561,16 @@
 ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
     Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
     ABSL_ATTRIBUTE_UNUSED size_t hash,
-    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
+    ABSL_ATTRIBUTE_UNUSED PerTableSeed seed) {
 #if defined(NDEBUG)
   return mask.LowestBitSet();
 #else
-  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
+  return ShouldInsertBackwardsForDebug(capacity, hash, seed)
              ? mask.HighestBitSet()
              : mask.LowestBitSet();
 #endif
 }
 
-// Returns a per-table, hash salt, which changes on resize. This gets mixed into
-// H1 to randomize iteration order per-table.
-//
-// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
-// non-determinism of iteration order in most cases.
-inline size_t PerTableSalt(const ctrl_t* ctrl) {
-  // The low bits of the pointer have little or no entropy because of
-  // alignment. We shift the pointer to try to use higher entropy bits. A
-  // good number seems to be 12 bits, because that aligns with page size.
-  return reinterpret_cast<uintptr_t>(ctrl) >> 12;
-}
-// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
-inline size_t H1(size_t hash, const ctrl_t* ctrl) {
-  return (hash >> 7) ^ PerTableSalt(ctrl);
-}
-
-// Extracts the H2 portion of a hash: the 7 bits not used for H1.
-//
-// These are used as an occupied control byte.
-inline h2_t H2(size_t hash) { return hash & 0x7F; }
-
 // When there is an insertion with no reserved growth, we rehash with
 // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
 // constant divided by capacity ensures that inserting N elements is still O(N)
@@ -513,10 +605,10 @@
   // references. We rehash on the first insertion after reserved_growth_ reaches
   // 0 after a call to reserve. We also do a rehash with low probability
   // whenever reserved_growth_ is zero.
-  bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+  bool should_rehash_for_bug_detection_on_insert(PerTableSeed seed,
                                                  size_t capacity) const;
   // Similar to above, except that we don't depend on reserved_growth_.
-  bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
+  bool should_rehash_for_bug_detection_on_move(PerTableSeed seed,
                                                size_t capacity) const;
   void maybe_increment_generation_on_insert() {
     if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
@@ -570,10 +662,10 @@
   CommonFieldsGenerationInfoDisabled& operator=(
       CommonFieldsGenerationInfoDisabled&&) = default;
 
-  bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+  bool should_rehash_for_bug_detection_on_insert(PerTableSeed, size_t) const {
     return false;
   }
-  bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
+  bool should_rehash_for_bug_detection_on_move(PerTableSeed, size_t) const {
     return false;
   }
   void maybe_increment_generation_on_insert() {}
@@ -783,23 +875,6 @@
 
 struct HashtableFreeFunctionsAccess;
 
-// We only allow a maximum of 1 SOO element, which makes the implementation
-// much simpler. Complications with multiple SOO elements include:
-// - Satisfying the guarantee that erasing one element doesn't invalidate
-//   iterators to other elements means we would probably need actual SOO
-//   control bytes.
-// - In order to prevent user code from depending on iteration order for small
-//   tables, we would need to randomize the iteration order somehow.
-constexpr size_t SooCapacity() { return 1; }
-// Sentinel type to indicate SOO CommonFields construction.
-struct soo_tag_t {};
-// Sentinel type to indicate SOO CommonFields construction with full size.
-struct full_soo_tag_t {};
-// Sentinel type to indicate non-SOO CommonFields construction.
-struct non_soo_tag_t {};
-// Sentinel value to indicate an uninitialized CommonFields for use in swapping.
-struct uninitialized_tag_t {};
-
 // Suppress erroneous uninitialized memory errors on GCC. For example, GCC
 // thinks that the call to slot_array() in find_or_prepare_insert() is reading
 // uninitialized memory, but slot_array is only called there when the table is
@@ -827,7 +902,7 @@
 };
 
 struct HeapPtrs {
-  HeapPtrs() = default;
+  explicit HeapPtrs(uninitialized_tag_t) {}
   explicit HeapPtrs(ctrl_t* c) : control(c) {}
 
   // The control bytes (and, also, a pointer near to the base of the backing
@@ -852,7 +927,7 @@
 // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
 // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
 union HeapOrSoo {
-  HeapOrSoo() = default;
+  explicit HeapOrSoo(uninitialized_tag_t) : heap(uninitialized_tag_t{}) {}
   explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
 
   ctrl_t*& control() {
@@ -883,13 +958,21 @@
 // of this state to helper functions as a single argument.
 class CommonFields : public CommonFieldsGenerationInfo {
  public:
-  explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
+  explicit CommonFields(soo_tag_t)
+      : capacity_(SooCapacity()),
+        size_(no_seed_empty_tag_t{}),
+        heap_or_soo_(uninitialized_tag_t{}) {}
   explicit CommonFields(full_soo_tag_t)
-      : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
+      : capacity_(SooCapacity()),
+        size_(full_soo_tag_t{}),
+        heap_or_soo_(uninitialized_tag_t{}) {}
   explicit CommonFields(non_soo_tag_t)
-      : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
+      : capacity_(0),
+        size_(no_seed_empty_tag_t{}),
+        heap_or_soo_(EmptyGroup()) {}
   // For use in swapping.
-  explicit CommonFields(uninitialized_tag_t) {}
+  explicit CommonFields(uninitialized_tag_t)
+      : size_(uninitialized_tag_t{}), heap_or_soo_(uninitialized_tag_t{}) {}
 
   // Not copyable
   CommonFields(const CommonFields&) = delete;
@@ -917,7 +1000,20 @@
   void* soo_data() { return heap_or_soo_.get_soo_data(); }
 
   ctrl_t* control() const { return heap_or_soo_.control(); }
-  void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
+
+  // When we set the control bytes, we also often want to generate a new seed.
+  // So we bundle these two operations together to make sure we don't forget to
+  // generate a new seed.
+  // The table will be invalidated if
+  // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
+  // being changed. In such cases, we will need to rehash the table.
+  template <bool kGenerateSeed>
+  void set_control(ctrl_t* c) {
+    heap_or_soo_.control() = c;
+    if constexpr (kGenerateSeed) {
+      generate_new_seed();
+    }
+  }
   void* backing_array_start() const {
     // growth_info (and maybe infoz) is stored before control bytes.
     ABSL_SWISSTABLE_ASSERT(
@@ -931,27 +1027,38 @@
   void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
 
   // The number of filled slots.
-  size_t size() const { return size_ >> HasInfozShift(); }
-  void set_size(size_t s) {
-    size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
-  }
+  size_t size() const { return size_.size(); }
+  // Sets the size to zero, but keeps hashinfoz bit and seed.
+  void set_size_to_zero() { size_.set_size_to_zero_keep_metadata(); }
   void set_empty_soo() {
     AssertInSooMode();
-    size_ = 0;
+    size_ = HashtableSize(no_seed_empty_tag_t{});
   }
   void set_full_soo() {
     AssertInSooMode();
-    size_ = size_t{1} << HasInfozShift();
+    size_ = HashtableSize(full_soo_tag_t{});
   }
   void increment_size() {
     ABSL_SWISSTABLE_ASSERT(size() < capacity());
-    size_ += size_t{1} << HasInfozShift();
+    size_.increment_size();
+  }
+  void increment_size(size_t n) {
+    ABSL_SWISSTABLE_ASSERT(size() + n <= capacity());
+    size_.increment_size(n);
   }
   void decrement_size() {
     ABSL_SWISSTABLE_ASSERT(!empty());
-    size_ -= size_t{1} << HasInfozShift();
+    size_.decrement_size();
   }
-  bool empty() const { return size() == 0; }
+  bool empty() const { return size_.empty(); }
+
+  // The seed used for the H1 part of the hash function.
+  PerTableSeed seed() const { return size_.seed(); }
+  // Generates a new seed for the H1 part of the hash function.
+  // The table will be invalidated if
+  // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is
+  // being changed. In such cases, we will need to rehash the table.
+  void generate_new_seed() { size_.generate_new_seed(); }
 
   // The total number of available slots.
   size_t capacity() const { return capacity_; }
@@ -978,12 +1085,8 @@
     return const_cast<CommonFields*>(this)->growth_info();
   }
 
-  bool has_infoz() const {
-    return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
-  }
-  void set_has_infoz(bool has_infoz) {
-    size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
-  }
+  bool has_infoz() const { return size_.has_infoz(); }
+  void set_has_infoz() { size_.set_has_infoz(); }
 
   HashtablezInfoHandle infoz() {
     return has_infoz()
@@ -996,12 +1099,18 @@
   }
 
   bool should_rehash_for_bug_detection_on_insert() const {
+    if constexpr (!SwisstableGenerationsEnabled()) {
+      return false;
+    }
+    // As an optimization, we avoid calling ShouldRehashForBugDetection if we
+    // will end up rehashing anyways.
+    if (growth_left() == 0) return false;
     return CommonFieldsGenerationInfo::
-        should_rehash_for_bug_detection_on_insert(control(), capacity());
+        should_rehash_for_bug_detection_on_insert(seed(), capacity());
   }
   bool should_rehash_for_bug_detection_on_move() const {
     return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
-        control(), capacity());
+        seed(), capacity());
   }
   void reset_reserved_growth(size_t reservation) {
     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
@@ -1063,11 +1172,10 @@
   // regressions, presumably because we need capacity to do find operations.
   size_t capacity_;
 
-  // The size and also has one bit that stores whether we have infoz.
   // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
   // encode the size in SOO case. We would be making size()/capacity() more
   // expensive in order to have more SOO space.
-  size_t size_;
+  HashtableSize size_;
 
   // Either the control/slots pointers or the SOO slot.
   HeapOrSoo heap_or_soo_;
@@ -1097,14 +1205,6 @@
   return n ? ~size_t{} >> countl_zero(n) : 1;
 }
 
-constexpr size_t MaxValidCapacity(size_t slot_size) {
-  return NormalizeCapacity((std::numeric_limits<size_t>::max)() / 4 /
-                           slot_size);
-}
-
-// Use a non-inlined function to avoid code bloat.
-[[noreturn]] void HashTableSizeOverflow();
-
 // General notes on capacity/growth methods below:
 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
 //   average of two empty slots per group.
@@ -1115,7 +1215,7 @@
 
 // Given `capacity`, applies the load factor; i.e., it returns the maximum
 // number of values we should put into the table before a resizing rehash.
-inline size_t CapacityToGrowth(size_t capacity) {
+constexpr size_t CapacityToGrowth(size_t capacity) {
   ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
   // `capacity*7/8`
   if (Group::kWidth == 8 && capacity == 7) {
@@ -1340,12 +1440,12 @@
 }
 
 // Begins a probing operation on `common.control`, using `hash`.
-inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+inline probe_seq<Group::kWidth> probe(PerTableSeed seed, const size_t capacity,
                                       size_t hash) {
-  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+  return probe_seq<Group::kWidth>(H1(hash, seed), capacity);
 }
 inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
-  return probe(common.control(), common.capacity(), hash);
+  return probe(common.seed(), common.capacity(), hash);
 }
 
 // Probes an array of control bits using a probe sequence derived from `hash`,
@@ -1359,8 +1459,9 @@
 inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
   auto seq = probe(common, hash);
   const ctrl_t* ctrl = common.control();
+  const PerTableSeed seed = common.seed();
   if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
-      !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
+      !ShouldInsertBackwards(common.capacity(), hash, seed)) {
     return {seq.offset(), /*probe_length=*/0};
   }
   while (true) {
@@ -1368,7 +1469,7 @@
     auto mask = g.MaskEmptyOrDeleted();
     if (mask) {
       return {
-          seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
+          seq.offset(GetInsertionOffset(mask, common.capacity(), hash, seed)),
           seq.index()};
     }
     seq.next();
@@ -1572,6 +1673,53 @@
                                                   size_t old_capacity);
 };
 
+// Returns the maximum valid size for a table with 1-byte slots.
+// This function is an utility shared by MaxValidSize and IsAboveValidSize.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr size_t MaxValidSizeFor1ByteSlot() {
+  if constexpr (kSizeOfSizeT == 8) {
+    return CapacityToGrowth(
+        static_cast<size_t>(uint64_t{1} << HashtableSize::kSizeBitCount) - 1);
+  } else {
+    static_assert(kSizeOfSizeT == 4);
+    return CapacityToGrowth((size_t{1} << (kSizeOfSizeT * 8 - 2)) - 1);
+  }
+}
+
+// Returns the maximum valid size for a table with provided slot size.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr size_t MaxValidSize(size_t slot_size) {
+  if constexpr (kSizeOfSizeT == 8) {
+    // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
+    if (slot_size < size_t{1} << (64 - HashtableSize::kSizeBitCount)) {
+      return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+    }
+    return (size_t{1} << (kSizeOfSizeT * 8 - 2)) / slot_size;
+  } else {
+    return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>() / slot_size;
+  }
+}
+
+// Returns true if size is larger than the maximum valid size.
+// It is an optimization to avoid the division operation in the common case.
+// Template parameter is only used to enable testing.
+template <size_t kSizeOfSizeT = sizeof(size_t)>
+constexpr bool IsAboveValidSize(size_t size, size_t slot_size) {
+  if constexpr (kSizeOfSizeT == 8) {
+    // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
+    if (ABSL_PREDICT_TRUE(slot_size <
+                          (size_t{1} << (64 - HashtableSize::kSizeBitCount)))) {
+      return size > MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+    }
+    return size > MaxValidSize<kSizeOfSizeT>(slot_size);
+  } else {
+    return uint64_t{size} * slot_size >
+           MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
+  }
+}
+
 // Returns the index of the SOO slot when growing from SOO to non-SOO in a
 // single group. See also InitializeSmallControlBytesAfterSoo(). It's important
 // to use index 1 so that when resizing from capacity 1 to 3, we can still have
@@ -1584,14 +1732,20 @@
 // Allowing till 16 would require additional store that can be avoided.
 constexpr size_t MaxSmallAfterSooCapacity() { return 7; }
 
-// Resizes empty non-allocated table to the new capacity.
+// Resizes empty non-allocated table to the capacity to fit new_size elements.
 // Requires:
 //   1. `c.capacity() == policy.soo_capacity`.
 //   2. `c.empty()`.
-//   3. `new_capacity > policy.soo_capacity`.
+//   3. `new_size > policy.soo_capacity`.
 // The table will be attempted to be sampled.
-void ResizeEmptyNonAllocatedTable(CommonFields& common, size_t new_capacity,
-                                  const PolicyFunctions& policy);
+void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common,
+                                               size_t new_size,
+                                               const PolicyFunctions& policy);
+
+// The same as ReserveEmptyNonAllocatedTableToFitNewSize, but resizes to the
+// next valid capacity after `bucket_count`.
+void ReserveEmptyNonAllocatedTableToFitBucketCount(
+    CommonFields& common, size_t bucket_count, const PolicyFunctions& policy);
 
 // Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()) and
 // forces the table to be sampled.
@@ -1745,7 +1899,7 @@
     static_assert(SooSlotMemcpySize == 0);
     policy.transfer(&common, target_slot, common.soo_data(), 1);
   }
-  common.set_control(new_ctrl);
+  common.set_control</*kGenerateSeed=*/true>(new_ctrl);
   common.set_slots(new_slots);
 
   ResetGrowthLeft(common);
@@ -1757,11 +1911,12 @@
 void GrowFullSooTableToNextCapacityForceSampling(CommonFields& common,
                                                  const PolicyFunctions& policy);
 
-// Resizes table with allocated slots. Tables with SOO enabled must have
-// capacity > policy.soo_capacity.
+// Resizes table with allocated slots and change the table seed.
+// Tables with SOO enabled must have capacity > policy.soo_capacity.
 // No sampling will be performed since table is already allocated.
-void ResizeAllocatedTable(CommonFields& common, size_t new_capacity,
-                          const PolicyFunctions& policy);
+void ResizeAllocatedTableWithSeedChange(CommonFields& common,
+                                        size_t new_capacity,
+                                        const PolicyFunctions& policy);
 
 inline void PrepareInsertCommon(CommonFields& common) {
   common.increment_size();
@@ -2125,8 +2280,8 @@
       : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
                   alloc) {
     if (bucket_count > DefaultCapacity()) {
-      ResizeEmptyNonAllocatedTable(common(), NormalizeCapacity(bucket_count),
-                                   GetPolicyFunctions());
+      ReserveEmptyNonAllocatedTableToFitBucketCount(common(), bucket_count,
+                                                    GetPolicyFunctions());
     }
   }
 
@@ -2262,7 +2417,7 @@
     //     `capacity + 1` (2^N).
     size_t offset = cap;
     const size_t shift =
-        is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
+        is_single_group(cap) ? (common().seed().seed() | 1) : 0;
     IterateOverFullSlots(
         that.common(), sizeof(slot_type),
         [&](const ctrl_t* that_ctrl, void* that_slot_void) {
@@ -2295,7 +2450,7 @@
       // RecordInsert requires hash, but it is unknown for small tables.
       infoz().RecordStorageChanged(size, cap);
     }
-    common().set_size(size);
+    common().increment_size(size);
     growth_info().OverwriteManyEmptyAsFull(size);
   }
 
@@ -2402,9 +2557,7 @@
     ABSL_ASSUME(cap >= kDefaultCapacity);
     return cap;
   }
-  size_t max_size() const {
-    return CapacityToGrowth(MaxValidCapacity(sizeof(slot_type)));
-  }
+  size_t max_size() const { return MaxValidSize(sizeof(slot_type)); }
 
   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
     if (SwisstableGenerationsEnabled() &&
@@ -2813,16 +2966,10 @@
       ReserveAllocatedTable(common(), n, GetPolicyFunctions());
     } else {
       if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
-        ResizeEmptyNonAllocatedTable(
-            common(), NormalizeCapacity(GrowthToLowerboundCapacity(n)),
-            GetPolicyFunctions());
-        // This is after resize, to ensure that we have completed the allocation
-        // and have potentially sampled the hashtable.
-        infoz().RecordReservation(n);
+        ReserveEmptyNonAllocatedTableToFitNewSize(common(), n,
+                                                  GetPolicyFunctions());
       }
     }
-    common().reset_reserved_growth(n);
-    common().set_reservation_size(n);
   }
 
   // Extension API: support for heterogeneous keys.
@@ -3214,7 +3361,8 @@
     }
     common().increment_generation();
     if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
-      ResizeAllocatedTable(common(), capacity(), GetPolicyFunctions());
+      ResizeAllocatedTableWithSeedChange(common(), capacity(),
+                                         GetPolicyFunctions());
     }
   }
 
@@ -3307,7 +3455,7 @@
       auto mask_empty = g.MaskEmpty();
       if (ABSL_PREDICT_TRUE(mask_empty)) {
         size_t target = seq.offset(
-            GetInsertionOffset(mask_empty, capacity(), hash, control()));
+            GetInsertionOffset(mask_empty, capacity(), hash, common().seed()));
         return {iterator_at(PrepareInsertNonSoo(common(), hash,
                                                 GetPolicyFunctions(),
                                                 FindInfo{target, seq.index()})),
@@ -3558,10 +3706,15 @@
   }
 
   static const PolicyFunctions& GetPolicyFunctions() {
-    static_assert(sizeof(slot_type) <= (std::numeric_limits<uint32_t>::max)());
-    static_assert(alignof(slot_type) <= (std::numeric_limits<uint16_t>::max)());
-    static_assert(sizeof(key_type) <= (std::numeric_limits<uint32_t>::max)());
-    static_assert(sizeof(value_type) <= (std::numeric_limits<uint32_t>::max)());
+    static_assert(sizeof(slot_type) <= (std::numeric_limits<uint32_t>::max)(),
+                  "Slot size is too large. Use std::unique_ptr for value type "
+                  "or use absl::node_hash_{map,set}.");
+    static_assert(alignof(slot_type) <=
+                  size_t{(std::numeric_limits<uint16_t>::max)()});
+    static_assert(sizeof(key_type) <=
+                  size_t{(std::numeric_limits<uint32_t>::max)()});
+    static_assert(sizeof(value_type) <=
+                  size_t{(std::numeric_limits<uint32_t>::max)()});
     static constexpr size_t kBackingArrayAlignment =
         BackingArrayAlignment(alignof(slot_type));
     static constexpr PolicyFunctions value = {
@@ -3721,6 +3874,21 @@
 };
 
 }  // namespace hashtable_debug_internal
+
+// Extern template instantiations reduce binary size and linker input size.
+extern template void GrowFullSooTableToNextCapacity<0, false>(
+    CommonFields&, size_t, const PolicyFunctions&);
+extern template void GrowFullSooTableToNextCapacity<1, true>(
+    CommonFields&, size_t, const PolicyFunctions&);
+extern template void GrowFullSooTableToNextCapacity<4, true>(
+    CommonFields&, size_t, const PolicyFunctions&);
+extern template void GrowFullSooTableToNextCapacity<8, true>(
+    CommonFields&, size_t, const PolicyFunctions&);
+#if UINTPTR_MAX == UINT64_MAX
+extern template void GrowFullSooTableToNextCapacity<16, true>(
+    CommonFields&, size_t, const PolicyFunctions&);
+#endif
+
 }  // namespace container_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 2cd9be2..7a5edd6 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -19,6 +19,7 @@
 #include <algorithm>
 #include <array>
 #include <atomic>
+#include <bitset>
 #include <cmath>
 #include <cstddef>
 #include <cstdint>
@@ -68,6 +69,7 @@
 #include "absl/log/log.h"
 #include "absl/memory/memory.h"
 #include "absl/meta/type_traits.h"
+#include "absl/numeric/int128.h"
 #include "absl/strings/str_cat.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/optional.h"
@@ -928,16 +930,16 @@
   static_assert(std::is_empty<std::allocator<int>>::value, "");
 
   struct MockTable {
+    size_t capacity;
+    uint64_t size;
     void* ctrl;
     void* slots;
-    size_t size;
-    size_t capacity;
   };
   struct StatelessHash {
     size_t operator()(absl::string_view) const { return 0; }
   };
   struct StatefulHash : StatelessHash {
-    size_t dummy;
+    uint64_t dummy;
   };
 
   struct GenerationData {
@@ -1550,14 +1552,14 @@
 
 TYPED_TEST(SooTest, InsertEraseStressTest) {
   TypeParam t;
-  const size_t kMinElementCount = 250;
+  const size_t kMinElementCount = 50;
   std::deque<int> keys;
   size_t i = 0;
   for (; i < MaxDensitySize(kMinElementCount); ++i) {
     t.emplace(static_cast<int64_t>(i));
     keys.push_back(i);
   }
-  const size_t kNumIterations = 1000000;
+  const size_t kNumIterations = 20000;
   for (; i < kNumIterations; ++i) {
     ASSERT_EQ(1, t.erase(keys.front()));
     keys.pop_front();
@@ -1579,8 +1581,8 @@
 
 TYPED_TEST(SooTest, LargeTable) {
   TypeParam t;
-  for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
-  for (int64_t i = 0; i != 100000; ++i)
+  for (int64_t i = 0; i != 10000; ++i) t.emplace(i << 40);
+  for (int64_t i = 0; i != 10000; ++i)
     ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
 }
 
@@ -2699,38 +2701,41 @@
   return res;
 }
 
+// Generate irrelevant seeds to avoid being stuck in the same last bit
+// in seed.
+void GenerateIrrelevantSeeds(int cnt) {
+  for (int i = cnt % 17; i > 0; --i) {
+    NextSeedBaseNumber();
+  }
+}
+
 // These IterationOrderChanges tests depend on non-deterministic behavior.
-// We are injecting non-determinism from the pointer of the table, but do so in
-// a way that only the page matters. We have to retry enough times to make sure
-// we are touching different memory pages to cause the ordering to change.
-// We also need to keep the old tables around to avoid getting the same memory
-// blocks over and over.
+// We are injecting non-determinism to the table.
+// We have to retry enough times to make sure that the seed changes in bits that
+// matter for the iteration order.
 TYPED_TEST(SooTest, IterationOrderChangesByInstance) {
+  DisableSampling();  // We do not want test to pass only because of sampling.
   for (bool do_reserve : {false, true}) {
     for (size_t size : {2u, 6u, 12u, 20u}) {
       SCOPED_TRACE(absl::StrCat("size: ", size, " do_reserve: ", do_reserve));
       const auto reference_table = MakeSimpleTable<TypeParam>(size, do_reserve);
       const auto reference = OrderOfIteration(reference_table);
 
-      std::vector<TypeParam> tables;
       bool found_difference = false;
-      for (int i = 0; !found_difference && i < 5000; ++i) {
-        tables.push_back(MakeSimpleTable<TypeParam>(size, do_reserve));
-        found_difference = OrderOfIteration(tables.back()) != reference;
+      for (int i = 0; !found_difference && i < 500; ++i) {
+        auto new_table = MakeSimpleTable<TypeParam>(size, do_reserve);
+        found_difference = OrderOfIteration(new_table) != reference;
+        GenerateIrrelevantSeeds(i);
       }
       if (!found_difference) {
-        FAIL() << "Iteration order remained the same across many attempts with "
-                  "size "
-               << size;
+        FAIL() << "Iteration order remained the same across many attempts.";
       }
     }
   }
 }
 
 TYPED_TEST(SooTest, IterationOrderChangesOnRehash) {
-#ifdef ABSL_HAVE_ADDRESS_SANITIZER
-  GTEST_SKIP() << "Hash quality is lower in asan mode, causing flakiness.";
-#endif
+  DisableSampling();  // We do not want test to pass only because of sampling.
 
   // We test different sizes with many small numbers, because small table
   // resize has a different codepath.
@@ -2743,11 +2748,17 @@
            }) {
         SCOPED_TRACE(absl::StrCat("size: ", size, " rehash_size: ", rehash_size,
                                   " do_reserve: ", do_reserve));
-        std::vector<TypeParam> garbage;
         bool ok = false;
-        for (int i = 0; i < 5000; ++i) {
-          auto t = MakeSimpleTable<TypeParam>(size, do_reserve);
-          const auto reference = OrderOfIteration(t);
+        auto t = MakeSimpleTable<TypeParam>(size, do_reserve);
+        const size_t original_capacity = t.capacity();
+        auto reference = OrderOfIteration(t);
+        for (int i = 0; i < 500; ++i) {
+          if (i > 0 && rehash_size != 0) {
+            // Rehash back to original size.
+            t.rehash(0);
+            ASSERT_EQ(t.capacity(), original_capacity);
+            reference = OrderOfIteration(t);
+          }
           // Force rehash.
           t.rehash(rehash_size);
           auto trial = OrderOfIteration(t);
@@ -2756,7 +2767,7 @@
             ok = true;
             break;
           }
-          garbage.push_back(std::move(t));
+          GenerateIrrelevantSeeds(i);
         }
         EXPECT_TRUE(ok)
             << "Iteration order remained the same across many attempts " << size
@@ -3119,7 +3130,7 @@
   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
 
   std::vector<CustomAllocIntTable> tables;
-  for (int i = 0; i < 1000000; ++i) {
+  for (int i = 0; i < 100000; ++i) {
     tables.emplace_back();
     tables.back().insert(1);
   }
@@ -3177,28 +3188,27 @@
   // first slot when alignof(value_type) is 1. We test repeated
   // insertions/erases and verify that the behavior is correct.
   TypeParam t;
-  std::unordered_set<uint8_t> verifier;  // NOLINT
+  std::bitset<256> verifier;
 
   // Do repeated insertions/erases from the table.
-  for (int64_t i = 0; i < 100000; ++i) {
+  for (int64_t i = 0; i < 10000; ++i) {
     SCOPED_TRACE(i);
     const uint8_t u = (i * -i) & 0xFF;
     auto it = t.find(u);
-    auto verifier_it = verifier.find(u);
     if (it == t.end()) {
-      ASSERT_EQ(verifier_it, verifier.end());
+      ASSERT_FALSE(verifier.test(u));
       t.insert(u);
-      verifier.insert(u);
+      verifier.set(u);
     } else {
-      ASSERT_NE(verifier_it, verifier.end());
+      ASSERT_TRUE(verifier.test(u));
       t.erase(it);
-      verifier.erase(verifier_it);
+      verifier.reset(u);
     }
   }
 
-  EXPECT_EQ(t.size(), verifier.size());
+  EXPECT_EQ(t.size(), verifier.count());
   for (uint8_t u : t) {
-    EXPECT_EQ(verifier.count(u), 1);
+    ASSERT_TRUE(verifier.test(u));
   }
 }
 
@@ -4039,12 +4049,58 @@
   }
 }
 
+TEST(Table, MaxValidSize) {
+  IntTable t;
+  EXPECT_EQ(MaxValidSize(sizeof(IntTable::value_type)), t.max_size());
+  if constexpr (sizeof(size_t) == 8) {
+    for (size_t i = 0; i < 35; ++i) {
+      SCOPED_TRACE(i);
+      size_t slot_size = size_t{1} << i;
+      size_t max_size = MaxValidSize(slot_size);
+      ASSERT_FALSE(IsAboveValidSize(max_size, slot_size));
+      ASSERT_TRUE(IsAboveValidSize(max_size + 1, slot_size));
+      ASSERT_LT(max_size, uint64_t{1} << 60);
+      // For non gigantic slot sizes we expect max size to be at least 2^40.
+      if (i <= 22) {
+        ASSERT_FALSE(IsAboveValidSize(size_t{1} << 40, slot_size));
+        ASSERT_GE(max_size, uint64_t{1} << 40);
+      }
+      ASSERT_LT(NormalizeCapacity(GrowthToLowerboundCapacity(max_size)),
+                uint64_t{1} << HashtableSize::kSizeBitCount);
+      ASSERT_LT(absl::uint128(max_size) * slot_size, uint64_t{1} << 63);
+    }
+  }
+  EXPECT_LT(MaxValidSize</*kSizeOfSizeT=*/4>(1), 1 << 30);
+  EXPECT_LT(MaxValidSize</*kSizeOfSizeT=*/4>(2), 1 << 29);
+  for (size_t i = 0; i < 29; ++i) {
+    size_t slot_size = size_t{1} << i;
+    size_t max_size = MaxValidSize</*kSizeOfSizeT=*/4>(slot_size);
+    ASSERT_FALSE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size, slot_size));
+    ASSERT_TRUE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size + 1, slot_size));
+    ASSERT_LT(max_size, 1 << 30);
+    size_t max_capacity =
+        NormalizeCapacity(GrowthToLowerboundCapacity(max_size));
+    ASSERT_LT(max_capacity, (size_t{1} << 31) / slot_size);
+    ASSERT_GT(max_capacity, (1 << 29) / slot_size);
+    ASSERT_LT(max_capacity * slot_size, size_t{1} << 31);
+  }
+}
+
 TEST(Table, MaxSizeOverflow) {
   size_t overflow = (std::numeric_limits<size_t>::max)();
   EXPECT_DEATH_IF_SUPPORTED(IntTable t(overflow), "Hash table size overflow");
   IntTable t;
   EXPECT_DEATH_IF_SUPPORTED(t.reserve(overflow), "Hash table size overflow");
   EXPECT_DEATH_IF_SUPPORTED(t.rehash(overflow), "Hash table size overflow");
+  size_t slightly_overflow = MaxValidSize(sizeof(IntTable::value_type)) + 1;
+  size_t slightly_overflow_capacity =
+      NextCapacity(NormalizeCapacity(slightly_overflow));
+  EXPECT_DEATH_IF_SUPPORTED(IntTable t2(slightly_overflow_capacity - 10),
+                            "Hash table size overflow");
+  EXPECT_DEATH_IF_SUPPORTED(t.reserve(slightly_overflow),
+                            "Hash table size overflow");
+  EXPECT_DEATH_IF_SUPPORTED(t.rehash(slightly_overflow),
+                            "Hash table size overflow");
 }
 
 // TODO(b/397453582): Remove support for const hasher and ermove this test.
diff --git a/absl/log/BUILD.bazel b/absl/log/BUILD.bazel
index 2a9a66d..62ece45 100644
--- a/absl/log/BUILD.bazel
+++ b/absl/log/BUILD.bazel
@@ -491,6 +491,7 @@
         ":check",
         ":log",
         ":scoped_mock_log",
+        "//absl/base:config",
         "//absl/log/internal:test_matchers",
         "//absl/strings",
         "//absl/strings:str_format",
diff --git a/absl/log/CMakeLists.txt b/absl/log/CMakeLists.txt
index 01e2b28..949d442 100644
--- a/absl/log/CMakeLists.txt
+++ b/absl/log/CMakeLists.txt
@@ -1005,6 +1005,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::check
+    absl::config
     absl::log
     absl::log_internal_test_matchers
     absl::optional
diff --git a/absl/log/log_format_test.cc b/absl/log/log_format_test.cc
index 7a75ca4..ecd6968 100644
--- a/absl/log/log_format_test.cc
+++ b/absl/log/log_format_test.cc
@@ -690,12 +690,21 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
+  // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1147r1.html
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202302L
+  EXPECT_CALL(
+      test_sink,
+      Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                 TextMessage(AnyOf(Eq("(nil)"), Eq("0"), Eq("0x0"),
+                                   Eq("00000000"), Eq("0000000000000000"))))));
+#else
   EXPECT_CALL(
       test_sink,
       Send(AllOf(
           TextMessage(MatchesOstream(comparison_stream)),
           TextMessage(Eq("false")),
           ENCODED_MESSAGE(HasValues(ElementsAre(ValueWithStr(Eq("false"))))))));
+#endif
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
@@ -708,12 +717,23 @@
   auto comparison_stream = ComparisonStream();
   comparison_stream << value;
 
+  // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1147r1.html
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202302L
+  EXPECT_CALL(test_sink,
+              Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
+                         TextMessage(AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"),
+                                           Eq("00000000DEADBEEF"))),
+                         ENCODED_MESSAGE(HasValues(ElementsAre(
+                             AnyOf(ValueWithStr(Eq("0xdeadbeef")),
+                                   ValueWithStr(Eq("00000000DEADBEEF")))))))));
+#else
   EXPECT_CALL(
       test_sink,
       Send(AllOf(
           TextMessage(MatchesOstream(comparison_stream)),
           TextMessage(Eq("true")),
           ENCODED_MESSAGE(HasValues(ElementsAre(ValueWithStr(Eq("true"))))))));
+#endif
 
   test_sink.StartCapturingLogs();
   LOG(INFO) << value;
diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel
index 7b92eed..4503a15 100644
--- a/absl/numeric/BUILD.bazel
+++ b/absl/numeric/BUILD.bazel
@@ -41,6 +41,7 @@
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:endian",
     ],
 )
 
diff --git a/absl/numeric/CMakeLists.txt b/absl/numeric/CMakeLists.txt
index da3b6ef..68446b0 100644
--- a/absl/numeric/CMakeLists.txt
+++ b/absl/numeric/CMakeLists.txt
@@ -23,7 +23,9 @@
   COPTS
     ${ABSL_DEFAULT_COPTS}
   DEPS
+    absl::config
     absl::core_headers
+    absl::endian
   PUBLIC
 )
 
diff --git a/absl/numeric/bits.h b/absl/numeric/bits.h
index 2871ca8..9a0c229 100644
--- a/absl/numeric/bits.h
+++ b/absl/numeric/bits.h
@@ -27,6 +27,10 @@
 //  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html
 // P1956R1:
 //  http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf
+// P0463R1
+//  https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0463r1.html
+// P1272R4
+//  https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1272r4.html
 //
 // When using a standard library that implements these functions, we use the
 // standard library's implementation.
@@ -45,6 +49,7 @@
 #endif
 
 #include "absl/base/attributes.h"
+#include "absl/base/internal/endian.h"
 #include "absl/numeric/internal/bits.h"
 
 namespace absl {
@@ -190,6 +195,67 @@
 
 #endif
 
+#if defined(__cpp_lib_endian) && __cpp_lib_endian >= 201907L
+
+// https://en.cppreference.com/w/cpp/types/endian
+//
+// Indicates the endianness of all scalar types:
+//   * If all scalar types are little-endian, `absl::endian::native` equals
+//     absl::endian::little.
+//   * If all scalar types are big-endian, `absl::endian::native` equals
+//     `absl::endian::big`.
+//   * Platforms that use anything else are unsupported.
+using std::endian;
+
+#else
+
+enum class endian {
+  little,
+  big,
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+  native = little
+#elif defined(ABSL_IS_BIG_ENDIAN)
+  native = big
+#else
+#error "Endian detection needs to be set up for this platform"
+#endif
+};
+
+#endif  // defined(__cpp_lib_endian) && __cpp_lib_endian >= 201907L
+
+#if defined(__cpp_lib_byteswap) && __cpp_lib_byteswap >= 202110L
+
+// https://en.cppreference.com/w/cpp/numeric/byteswap
+//
+// Reverses the bytes in the given integer value `x`.
+//
+// `absl::byteswap` participates in overload resolution only if `T` satisfies
+// integral, i.e., `T` is an integer type. The program is ill-formed if `T` has
+// padding bits.
+using std::byteswap;
+
+#else
+
+template <class T>
+[[nodiscard]] constexpr T byteswap(T x) noexcept {
+  static_assert(std::is_integral_v<T>,
+                "byteswap requires an integral argument");
+  static_assert(
+      sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+      "byteswap works only with 8, 16, 32, or 64-bit integers");
+  if constexpr (sizeof(T) == 1) {
+    return x;
+  } else if constexpr (sizeof(T) == 2) {
+    return static_cast<T>(gbswap_16(static_cast<uint16_t>(x)));
+  } else if constexpr (sizeof(T) == 4) {
+    return static_cast<T>(gbswap_32(static_cast<uint32_t>(x)));
+  } else if constexpr (sizeof(T) == 8) {
+    return static_cast<T>(gbswap_64(static_cast<uint64_t>(x)));
+  }
+}
+
+#endif  // defined(__cpp_lib_byteswap) && __cpp_lib_byteswap >= 202110L
+
 ABSL_NAMESPACE_END
 }  // namespace absl
 
diff --git a/absl/numeric/bits_test.cc b/absl/numeric/bits_test.cc
index 14955eb..3b71ccc 100644
--- a/absl/numeric/bits_test.cc
+++ b/absl/numeric/bits_test.cc
@@ -14,6 +14,7 @@
 
 #include "absl/numeric/bits.h"
 
+#include <cstdint>
 #include <limits>
 #include <type_traits>
 
@@ -636,6 +637,61 @@
 static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CTZ, "ctz should be constexpr");
 #endif
 
+TEST(Endian, Comparison) {
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+  static_assert(absl::endian::native == absl::endian::little);
+  static_assert(absl::endian::native != absl::endian::big);
+#endif
+#if defined(ABSL_IS_BIG_ENDIAN)
+  static_assert(absl::endian::native != absl::endian::little);
+  static_assert(absl::endian::native == absl::endian::big);
+#endif
+}
+
+TEST(Byteswap, Constexpr) {
+  static_assert(absl::byteswap<int8_t>(0x12) == 0x12);
+  static_assert(absl::byteswap<int16_t>(0x1234) == 0x3412);
+  static_assert(absl::byteswap<int32_t>(0x12345678) == 0x78563412);
+  static_assert(absl::byteswap<int64_t>(0x123456789abcdef0) ==
+                static_cast<int64_t>(0xf0debc9a78563412));
+  static_assert(absl::byteswap<uint8_t>(0x21) == 0x21);
+  static_assert(absl::byteswap<uint16_t>(0x4321) == 0x2143);
+  static_assert(absl::byteswap<uint32_t>(0x87654321) == 0x21436587);
+  static_assert(absl::byteswap<uint64_t>(0xfedcba9876543210) ==
+                static_cast<uint64_t>(0x1032547698badcfe));
+  static_assert(absl::byteswap<int32_t>(static_cast<int32_t>(0xdeadbeef)) ==
+                static_cast<int32_t>(0xefbeadde));
+}
+
+TEST(Byteswap, NotConstexpr) {
+  int8_t a = 0x12;
+  int16_t b = 0x1234;
+  int32_t c = 0x12345678;
+  int64_t d = 0x123456789abcdef0;
+  uint8_t e = 0x21;
+  uint16_t f = 0x4321;
+  uint32_t g = 0x87654321;
+  uint64_t h = 0xfedcba9876543210;
+  EXPECT_EQ(absl::byteswap<int8_t>(a), 0x12);
+  EXPECT_EQ(absl::byteswap<int16_t>(b), 0x3412);
+  EXPECT_EQ(absl::byteswap(c), 0x78563412);
+  EXPECT_EQ(absl::byteswap(d), 0xf0debc9a78563412);
+  EXPECT_EQ(absl::byteswap<uint8_t>(e), 0x21);
+  EXPECT_EQ(absl::byteswap<uint16_t>(f), 0x2143);
+  EXPECT_EQ(absl::byteswap(g), 0x21436587);
+  EXPECT_EQ(absl::byteswap(h), 0x1032547698badcfe);
+  EXPECT_EQ(absl::byteswap(absl::byteswap<int8_t>(a)), a);
+  EXPECT_EQ(absl::byteswap(absl::byteswap<int16_t>(b)), b);
+  EXPECT_EQ(absl::byteswap(absl::byteswap(c)), c);
+  EXPECT_EQ(absl::byteswap(absl::byteswap(d)), d);
+  EXPECT_EQ(absl::byteswap(absl::byteswap<uint8_t>(e)), e);
+  EXPECT_EQ(absl::byteswap(absl::byteswap<uint16_t>(f)), f);
+  EXPECT_EQ(absl::byteswap(absl::byteswap(g)), g);
+  EXPECT_EQ(absl::byteswap(absl::byteswap(h)), h);
+  EXPECT_EQ(absl::byteswap<uint32_t>(0xdeadbeef), 0xefbeadde);
+  EXPECT_EQ(absl::byteswap<const uint32_t>(0xdeadbeef), 0xefbeadde);
+}
+
 }  // namespace
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel
index 5ba670e..887ab0f 100644
--- a/absl/random/BUILD.bazel
+++ b/absl/random/BUILD.bazel
@@ -447,6 +447,7 @@
     deps = [
         ":bit_gen_ref",
         ":random",
+        "//absl/base:config",
         "//absl/base:fast_type_id",
         "//absl/random/internal:sequence_urbg",
         "@googletest//:gtest",
diff --git a/absl/random/CMakeLists.txt b/absl/random/CMakeLists.txt
index f9e01b4..e0294a1 100644
--- a/absl/random/CMakeLists.txt
+++ b/absl/random/CMakeLists.txt
@@ -59,6 +59,7 @@
   LINKOPTS
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
+    absl::config
     absl::random_bit_gen_ref
     absl::random_random
     absl::random_internal_sequence_urbg
diff --git a/absl/random/bit_gen_ref.h b/absl/random/bit_gen_ref.h
index 40e7b60..fa36eb7 100644
--- a/absl/random/bit_gen_ref.h
+++ b/absl/random/bit_gen_ref.h
@@ -88,7 +88,7 @@
 //
 class BitGenRef {
   // SFINAE to detect whether the URBG type includes a member matching
-  // bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
+  // bool InvokeMock(key_id, args_tuple*, result*).
   //
   // These live inside BitGenRef so that they have friend access
   // to MockingBitGen. (see similar methods in DistributionCaller).
@@ -158,19 +158,19 @@
 
   // Get a type-erased InvokeMock pointer.
   template <typename URBG>
-  static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType type,
+  static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType key_id,
                        void* result, void* arg_tuple) {
-    return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(type, result,
+    return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(key_id, result,
                                                         arg_tuple);
   }
   static bool NotAMock(uintptr_t, base_internal::FastTypeIdType, void*, void*) {
     return false;
   }
 
-  inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
+  inline bool InvokeMock(base_internal::FastTypeIdType key_id, void* args_tuple,
                          void* result) {
     if (mock_call_ == NotAMock) return false;  // avoids an indirect call.
-    return mock_call_(t_erased_gen_ptr_, type, args_tuple, result);
+    return mock_call_(t_erased_gen_ptr_, key_id, args_tuple, result);
   }
 
   uintptr_t t_erased_gen_ptr_;
diff --git a/absl/random/bit_gen_ref_test.cc b/absl/random/bit_gen_ref_test.cc
index 1135cf2..00876d9 100644
--- a/absl/random/bit_gen_ref_test.cc
+++ b/absl/random/bit_gen_ref_test.cc
@@ -15,8 +15,13 @@
 //
 #include "absl/random/bit_gen_ref.h"
 
+#include <cstdint>
+#include <random>
+#include <vector>
+
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
+#include "absl/base/config.h"
 #include "absl/base/internal/fast_type_id.h"
 #include "absl/random/internal/sequence_urbg.h"
 #include "absl/random/random.h"
@@ -34,7 +39,7 @@
   result_type operator()() { return 1; }
 
   // InvokeMock method
-  bool InvokeMock(base_internal::FastTypeIdType index, void*, void* result) {
+  bool InvokeMock(base_internal::FastTypeIdType, void*, void* result) {
     *static_cast<int*>(result) = 42;
     return true;
   }
diff --git a/absl/random/internal/distribution_caller.h b/absl/random/internal/distribution_caller.h
index 2534ca9..bfe8fac 100644
--- a/absl/random/internal/distribution_caller.h
+++ b/absl/random/internal/distribution_caller.h
@@ -38,7 +38,7 @@
   static_assert(!std::is_pointer<URBG>::value,
                 "You must pass a reference, not a pointer.");
   // SFINAE to detect whether the URBG type includes a member matching
-  // bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
+  // bool InvokeMock(key_id, args_tuple*, result*).
   //
   // These live inside BitGenRef so that they have friend access
   // to MockingBitGen. (see similar methods in DistributionCaller).
@@ -50,8 +50,8 @@
 
   template <class T>
   using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
-      std::declval<::absl::base_internal::FastTypeIdType>(),
-      std::declval<void*>(), std::declval<void*>()));
+      std::declval<base_internal::FastTypeIdType>(), std::declval<void*>(),
+      std::declval<void*>()));
 
   using HasInvokeMock = typename detector<invoke_mock_t, void, URBG>::type;
 
@@ -74,7 +74,7 @@
 
     ArgTupleT arg_tuple(std::forward<Args>(args)...);
     ResultT result;
-    if (!urbg->InvokeMock(::absl::base_internal::FastTypeId<KeyT>(), &arg_tuple,
+    if (!urbg->InvokeMock(base_internal::FastTypeId<KeyT>(), &arg_tuple,
                           &result)) {
       auto dist = absl::make_from_tuple<DistrT>(arg_tuple);
       result = dist(*urbg);
diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h
index 19d0561..b78b251 100644
--- a/absl/random/internal/mock_helpers.h
+++ b/absl/random/internal/mock_helpers.h
@@ -82,7 +82,7 @@
                                                 Args&&... args) {
     ArgTupleT arg_tuple(std::forward<Args>(args)...);
     ReturnT result;
-    if (urbg->InvokeMock(::absl::base_internal::FastTypeId<KeyT>(), &arg_tuple,
+    if (urbg->InvokeMock(base_internal::FastTypeId<KeyT>(), &arg_tuple,
                          &result)) {
       return result;
     }
@@ -92,9 +92,9 @@
  public:
   // InvokeMock is private; this provides access for some specialized use cases.
   template <typename URBG>
-  static inline bool PrivateInvokeMock(URBG* urbg, IdType type,
+  static inline bool PrivateInvokeMock(URBG* urbg, IdType key_id,
                                        void* args_tuple, void* result) {
-    return urbg->InvokeMock(type, args_tuple, result);
+    return urbg->InvokeMock(key_id, args_tuple, result);
   }
 
   // Invoke a mock for the KeyT (may or may not be a signature).
diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h
index ba7ceae..7cecd88 100644
--- a/absl/random/mocking_bit_gen.h
+++ b/absl/random/mocking_bit_gen.h
@@ -177,7 +177,7 @@
             typename ValidatorT>
   auto RegisterMock(SelfT&, base_internal::FastTypeIdType type, ValidatorT)
       -> decltype(GetMockFnType(std::declval<ResultT>(),
-                                std::declval<ArgTupleT>())) & {
+                                std::declval<ArgTupleT>()))& {
     using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
                                               std::declval<ArgTupleT>()));
 
@@ -203,8 +203,8 @@
 
   // MockingBitGen::InvokeMock
   //
-  // InvokeMock(FastTypeIdType, args, result) is the entrypoint for invoking
-  // mocks registered on MockingBitGen.
+  // bool InvokeMock(key_id, args_tuple*, result*) is the entrypoint
+  // for invoking mocks registered on MockingBitGen.
   //
   // When no mocks are registered on the provided FastTypeIdType, returns false.
   // Otherwise attempts to invoke the mock function ResultT(Args...) that
@@ -212,10 +212,10 @@
   // Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
   // used to invoke the mock function.
   // Requires result to point to a ResultT, which is the result of the call.
-  inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
+  inline bool InvokeMock(base_internal::FastTypeIdType key_id, void* args_tuple,
                          void* result) {
     // Trigger a mock, if there exists one that matches `param`.
-    auto it = mocks_.find(type);
+    auto it = mocks_.find(key_id);
     if (it == mocks_.end()) return false;
     it->second->Apply(args_tuple, result);
     return true;
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel
index b81d6f6..cb8d900 100644
--- a/absl/strings/BUILD.bazel
+++ b/absl/strings/BUILD.bazel
@@ -605,9 +605,7 @@
     hdrs = ["internal/cordz_handle.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
+    visibility = ["//absl:__subpackages__"],
     deps = [
         "//absl/base:config",
         "//absl/base:no_destructor",
@@ -648,9 +646,7 @@
     hdrs = ["internal/cordz_update_scope.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
+    visibility = ["//absl:__subpackages__"],
     deps = [
         ":cord_internal",
         ":cordz_info",
diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt
index 817606b..f48accf 100644
--- a/absl/types/CMakeLists.txt
+++ b/absl/types/CMakeLists.txt
@@ -151,3 +151,27 @@
     absl::compare
     GTest::gmock_main
 )
+
+# Deprecated empty library.
+# Clients should remove this dependency.
+absl_cc_library(
+  NAME
+    bad_any_cast
+  PUBLIC
+)
+
+# Deprecated empty library.
+# Clients should remove this dependency.
+absl_cc_library(
+  NAME
+    bad_optional_access
+  PUBLIC
+)
+
+# Deprecated empty library.
+# Clients should remove this dependency.
+absl_cc_library(
+  NAME
+    bad_variant_access
+  PUBLIC
+)