diff --git a/absl/base/internal/hardening.h b/absl/base/internal/hardening.h
index 31638a4..6ea2833 100644
--- a/absl/base/internal/hardening.h
+++ b/absl/base/internal/hardening.h
@@ -52,7 +52,7 @@
 // Prefer a more specific assertion function over this more general one,
 // as assertion functions which perform the comparison themselves
 // can have the cost of the comparison attributed to them.
-inline void HardeningAssert(bool cond) {
+constexpr void HardeningAssert(bool cond) {
   ABSL_ASSERT(cond);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (ABSL_PREDICT_FALSE(!cond)) {
@@ -66,7 +66,7 @@
 //
 // When `NDEBUG` is not defined, `HardeningAssertSlow`'s behavior is identical
 // to `ABSL_ASSERT`.
-inline void HardeningAssertSlow(bool cond) {
+constexpr void HardeningAssertSlow(bool cond) {
   ABSL_ASSERT(cond);
 #if (ABSL_OPTION_HARDENED == 1) && defined(NDEBUG)
   if (ABSL_PREDICT_FALSE(!cond)) {
@@ -76,7 +76,7 @@
 }
 
 template <typename T>
-inline void HardeningAssertGT(T val1, T val2) {
+constexpr void HardeningAssertGT(T val1, T val2) {
   ABSL_ASSERT(val1 > val2);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (!ABSL_PREDICT_TRUE(val1 > val2)) {
@@ -86,7 +86,7 @@
 }
 
 template <typename T>
-inline void HardeningAssertGE(T val1, T val2) {
+constexpr void HardeningAssertGE(T val1, T val2) {
   ABSL_ASSERT(val1 >= val2);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (!ABSL_PREDICT_TRUE(val1 >= val2)) {
@@ -96,7 +96,7 @@
 }
 
 template <typename T>
-inline void HardeningAssertLT(T val1, T val2) {
+constexpr void HardeningAssertLT(T val1, T val2) {
   ABSL_ASSERT(val1 < val2);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (!ABSL_PREDICT_TRUE(val1 < val2)) {
@@ -106,7 +106,7 @@
 }
 
 template <typename T>
-inline void HardeningAssertLE(T val1, T val2) {
+constexpr void HardeningAssertLE(T val1, T val2) {
   ABSL_ASSERT(val1 <= val2);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (!ABSL_PREDICT_TRUE(val1 <= val2)) {
@@ -115,12 +115,12 @@
 #endif
 }
 
-inline void HardeningAssertInBounds(size_t index, size_t size) {
+constexpr void HardeningAssertInBounds(size_t index, size_t size) {
   HardeningAssertLT(index, size);
 }
 
 template <typename T>
-inline void HardeningAssertNonEmpty(const T& container) {
+constexpr void HardeningAssertNonEmpty(const T& container) {
   ABSL_ASSERT(!container.empty());
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (ABSL_PREDICT_FALSE(container.empty())) {
@@ -130,7 +130,7 @@
 }
 
 template <typename T>
-inline void HardeningAssertNonNull(T ptr) {
+constexpr void HardeningAssertNonNull(T ptr) {
   ABSL_ASSERT(ptr != nullptr);
 #if (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) && defined(NDEBUG)
   if (ABSL_PREDICT_FALSE(ptr == nullptr)) {
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index a62dd31..cd08e51 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -244,8 +244,7 @@
   int rc = clock_gettime(CLOCK_MONOTONIC, &t);
 #endif
   if (rc != 0) {
-    ABSL_INTERNAL_LOG(
-        FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
+    ABSL_RAW_LOG(FATAL, "clock_gettime() failed: (%d)", errno);
   }
   return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
 }
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index bfd9887..94c4bea 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -47,6 +47,10 @@
 
 #if ABSL_USE_UNSCALED_CYCLECLOCK
 
+namespace gloop_do_not_use {
+class UnscaledCycleClockWrapperForPerCpuTest;
+}  // namespace gloop_do_not_use
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace time_internal {
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 048d2ee..d7379ef 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -135,7 +135,7 @@
 
 // Must be defined out-of-line to avoid MSVC error C2482 on some platforms,
 // which is caused by non-constexpr initialization.
-uint16_t HashtableSize::NextSeed() {
+uint16_t HashtableInlineData::NextSeed() {
   static_assert(PerTableSeed::kBitCount == 16);
   thread_local uint16_t seed =
       static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 11fa31a..3372ea4 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -374,18 +374,6 @@
 
 ABSL_DLL extern ctrl_t kDefaultIterControl;
 
-// We use these sentinel capacity values in debug mode to indicate different
-// classes of bugs.
-enum InvalidCapacity : size_t {
-  kAboveMaxValidCapacity = ~size_t{} - 100,
-  kReentrance,
-  kDestroyed,
-
-  // These two must be last because we use `>= kMovedFrom` to mean moved-from.
-  kMovedFrom,
-  kSelfMovedFrom,
-};
-
 // Returns a pointer to a control byte that can be used by default-constructed
 // iterators. We don't expect this pointer to be dereferenced.
 inline ctrl_t* DefaultIterControl() { return &kDefaultIterControl; }
@@ -432,6 +420,142 @@
 // Sentinel value to indicate creation of an empty table without a seed.
 struct no_seed_empty_tag_t {};
 
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
+constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// Whether a table is small enough that we don't need to hash any keys.
+constexpr bool IsSmallCapacity(size_t capacity) { return capacity <= 1; }
+
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
+constexpr size_t NormalizeCapacity(size_t n) {
+  return n ? ~size_t{} >> countl_zero(n) : 1;
+}
+
+// Returns the next valid capacity after `n`.
+constexpr size_t NextCapacity(size_t n) {
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
+  return n * 2 + 1;
+}
+
+// Returns the previous valid capacity before `n`.
+constexpr size_t PreviousCapacity(size_t n) {
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
+  return n / 2;
+}
+
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+//   average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+//   never need to probe (the whole table fits in one group) so we don't need a
+//   load factor less than 1.
+
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
+constexpr size_t CapacityToGrowth(size_t capacity) {
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
+  // `capacity*7/8`
+  if (Group::kWidth == 8 && capacity == 7) {
+    // x-x/8 does not work when x==7.
+    return 6;
+  }
+  return capacity - capacity / 8;
+}
+
+// Given `size`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// For size == 0, returns 0.
+// For other values, returns the same as `NormalizeCapacity(size*8/7)`.
+constexpr size_t SizeToCapacity(size_t size) {
+  if (size == 0) {
+    return 0;
+  }
+  // The minimum possible capacity is NormalizeCapacity(size).
+  // Shifting right `~size_t{}` by `leading_zeros` yields
+  // NormalizeCapacity(size).
+  int leading_zeros = absl::countl_zero(size);
+  constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3);
+  // max_size_for_next_capacity = max_load_factor * next_capacity
+  //                            = (7/8) * (~size_t{} >> leading_zeros)
+  //                            = (7/8*~size_t{}) >> leading_zeros
+  //                            = kLast3Bits >> leading_zeros
+  size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros;
+  // Decrease shift if size is too big for the minimum capacity.
+  leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
+  if constexpr (Group::kWidth == 8) {
+    // Formula doesn't work when size==7 for 8-wide groups.
+    leading_zeros -= (size == 7);
+  }
+  return (~size_t{}) >> leading_zeros;
+}
+
+// The number of slots in the backing array. This is always 2^N-1 for an
+// integer N.
+// NOTE: this class exists to simplify experiments with different ways to store
+// capacity within size.
+// NOTE: we tried experimenting with compressing the capacity and storing it
+// together with size_: (a) using 6 bits to store the corresponding power (N in
+// 2^N-1), and (b) storing 2^N as the most significant bit of size_ and storing
+// size in the low bits. Both of these experiments were regressions, presumably
+// because we need capacity to do find operations.
+class HashtableCapacity {
+ public:
+  static constexpr HashtableCapacity CreateDestroyed() {
+    return HashtableCapacity(kDestroyed);
+  }
+  static constexpr HashtableCapacity CreateReentrance() {
+    return HashtableCapacity(kReentrance);
+  }
+  static constexpr HashtableCapacity CreateMovedFrom() {
+    return HashtableCapacity(kMovedFrom);
+  }
+  static constexpr HashtableCapacity CreateSelfMovedFrom() {
+    return HashtableCapacity(kSelfMovedFrom);
+  }
+
+  explicit HashtableCapacity(uninitialized_tag_t) {}
+  explicit constexpr HashtableCapacity(size_t capacity) : capacity_(capacity) {
+    ABSL_SWISSTABLE_ASSERT(capacity == 0 || IsValidCapacity(capacity));
+  }
+
+  constexpr bool IsValid() const { return capacity_ <= kAboveMaxValidCapacity; }
+
+  constexpr bool IsDestroyed() const { return capacity_ == kDestroyed; }
+  constexpr bool IsReentrance() const { return capacity_ == kReentrance; }
+  // Returns true if the table is moved-from including self moved-from.
+  constexpr bool IsMovedFrom() const { return capacity_ >= kMovedFrom; }
+  constexpr bool IsSelfMovedFrom() const { return capacity_ == kSelfMovedFrom; }
+
+  constexpr size_t capacity() const {
+    ABSL_SWISSTABLE_ASSERT(IsValid());
+    return capacity_;
+  }
+
+ private:
+  // We use these sentinel capacity values in debug mode to indicate different
+  // classes of bugs.
+  enum InvalidCapacity : size_t {
+    kAboveMaxValidCapacity = ~size_t{} - 100,
+    kReentrance,
+    kDestroyed,
+
+    // These two must be last because we use `>= kMovedFrom` to mean moved-from.
+    kMovedFrom,
+    kSelfMovedFrom,
+  };
+
+  explicit constexpr HashtableCapacity(InvalidCapacity capacity)
+      : capacity_(capacity) {
+    ABSL_SWISSTABLE_ASSERT(capacity_ > kAboveMaxValidCapacity);
+  }
+
+  size_t capacity_;
+};
+
 // Per table hash salt. This gets mixed into H1 to randomize iteration order
 // per-table.
 // The seed is needed to ensure non-determinism of iteration order.
@@ -449,7 +573,7 @@
   size_t seed() const { return seed_; }
 
  private:
-  friend class HashtableSize;
+  friend class HashtableInlineData;
   explicit PerTableSeed(uint16_t seed) : seed_(seed) {}
 
   // The most significant bit of the seed is always 1 when there is a non-zero
@@ -457,20 +581,29 @@
   const uint16_t seed_;
 };
 
-// The size and also has additionally
+// Capacity, size and also has additionally
 // 1) one bit that stores whether we have infoz.
 // 2) PerTableSeed::kBitCount bits for the seed. (For SOO tables, the lowest
 //    bit of the seed is repurposed to track if sampling has been tried).
-class HashtableSize {
+class HashtableInlineData {
  public:
   static constexpr size_t kSizeBitCount = 64 - PerTableSeed::kBitCount - 1;
 
-  explicit HashtableSize(uninitialized_tag_t) {}
-  explicit HashtableSize(no_seed_empty_tag_t) : data_(0) {}
-  HashtableSize(full_soo_tag_t, bool has_tried_sampling)
-      : data_(kSizeOneNoMetadata |
+  explicit HashtableInlineData(uninitialized_tag_t)
+      : capacity_(uninitialized_tag_t{}) {}
+  explicit HashtableInlineData(HashtableCapacity capacity, no_seed_empty_tag_t)
+      : capacity_(capacity), data_(0) {}
+  HashtableInlineData(HashtableCapacity capacity, full_soo_tag_t,
+                      bool has_tried_sampling)
+      : capacity_(capacity),
+        data_(kSizeOneNoMetadata |
               (has_tried_sampling ? kSooHasTriedSamplingMask : 0)) {}
 
+  size_t capacity() const { return capacity_.capacity(); }
+  HashtableCapacity maybe_invalid_capacity() const { return capacity_; }
+  void set_capacity(HashtableCapacity c) { capacity_ = c; }
+  void set_capacity(size_t c) { set_capacity(HashtableCapacity(c)); }
+
   // Returns actual size of the table.
   size_t size() const { return static_cast<size_t>(data_ >> kSizeShift); }
   void increment_size() { data_ += kSizeOneNoMetadata; }
@@ -538,6 +671,8 @@
   // For SOO tables, the seed is unused, and bit 0 is repurposed to track
   // whether the table has already queried should_sample_soo().
   static constexpr uint64_t kSooHasTriedSamplingMask = 1;
+  HashtableCapacity capacity_;
+  // Stores the size and metadata bits. See above for layout.
   uint64_t data_;
 };
 
@@ -782,14 +917,6 @@
 static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
 static_assert(alignof(GrowthInfo) == alignof(size_t), "");
 
-// Returns whether `n` is a valid capacity (i.e., number of slots).
-//
-// A valid capacity is a non-zero integer `2^m - 1`.
-constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-
-// Whether a table is small enough that we don't need to hash any keys.
-constexpr bool IsSmallCapacity(size_t capacity) { return capacity <= 1; }
-
 // Returns the number of "cloned control bytes".
 //
 // This is the number of control bytes that are present both at the beginning
@@ -928,13 +1055,15 @@
 class CommonFields : public CommonFieldsGenerationInfo {
  public:
   explicit CommonFields(soo_tag_t)
-      : capacity_(SooCapacity()), size_(no_seed_empty_tag_t{}) {}
+      : inline_data_(HashtableCapacity(SooCapacity()), no_seed_empty_tag_t{}) {}
   explicit CommonFields(full_soo_tag_t, bool has_tried_sampling)
-      : capacity_(SooCapacity()), size_(full_soo_tag_t{}, has_tried_sampling) {}
+      : inline_data_(HashtableCapacity(SooCapacity()), full_soo_tag_t{},
+                     has_tried_sampling) {}
   explicit CommonFields(non_soo_tag_t)
-      : capacity_(0), size_(no_seed_empty_tag_t{}) {}
+      : inline_data_(HashtableCapacity(0), no_seed_empty_tag_t{}) {}
   // For use in swapping.
-  explicit CommonFields(uninitialized_tag_t) : size_(uninitialized_tag_t{}) {}
+  explicit CommonFields(uninitialized_tag_t)
+      : inline_data_(uninitialized_tag_t{}) {}
 
   // Not copyable
   CommonFields(const CommonFields&) = delete;
@@ -942,10 +1071,7 @@
 
   // Copy with guarantee that it is not SOO.
   CommonFields(non_soo_tag_t, const CommonFields& that)
-      : capacity_(that.capacity_),
-        size_(that.size_),
-        heap_or_soo_(that.heap_or_soo_) {
-  }
+      : inline_data_(that.inline_data_), heap_or_soo_(that.heap_or_soo_) {}
 
   // Movable
   CommonFields(CommonFields&& that) = default;
@@ -983,35 +1109,39 @@
   void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
 
   // The number of filled slots.
-  size_t size() const { return size_.size(); }
+  size_t size() const { return inline_data_.size(); }
   // Sets the size to zero, but keeps hashinfoz bit and seed.
-  void set_size_to_zero() { size_.set_size_keep_metadata(0); }
+  void set_size_to_zero() { inline_data_.set_size_keep_metadata(0); }
   void set_empty_soo() {
     AssertInSooMode();
-    size_.set_size_keep_metadata(0);
+    inline_data_.set_size_keep_metadata(0);
   }
   void set_full_soo() {
     AssertInSooMode();
-    size_.set_size_keep_metadata(1);
+    inline_data_.set_size_keep_metadata(1);
   }
   void increment_size() {
     ABSL_SWISSTABLE_ASSERT(size() < capacity());
-    size_.increment_size();
+    inline_data_.increment_size();
   }
   void increment_size(size_t n) {
     ABSL_SWISSTABLE_ASSERT(size() + n <= capacity());
-    size_.increment_size(n);
+    inline_data_.increment_size(n);
   }
   void decrement_size() {
     ABSL_SWISSTABLE_ASSERT(!empty());
-    size_.decrement_size();
+    inline_data_.decrement_size();
   }
-  bool empty() const { return size_.empty(); }
-  void set_soo_has_tried_sampling() { size_.set_soo_has_tried_sampling(); }
-  bool soo_has_tried_sampling() const { return size_.soo_has_tried_sampling(); }
+  bool empty() const { return inline_data_.empty(); }
+  void set_soo_has_tried_sampling() {
+    inline_data_.set_soo_has_tried_sampling();
+  }
+  bool soo_has_tried_sampling() const {
+    return inline_data_.soo_has_tried_sampling();
+  }
 
   // The seed used for the hash function.
-  PerTableSeed seed() const { return size_.seed(); }
+  PerTableSeed seed() const { return inline_data_.seed(); }
   // Generates a new seed the hash function.
   // The table will be invalidated if `!empty()` because hash is being changed.
   // In such cases, we will need to rehash the table.
@@ -1019,22 +1149,23 @@
     // Note: we can't use has_infoz() here because we set has_infoz later than
     // we generate the seed.
     if (ABSL_PREDICT_FALSE(has_infoz)) {
-      size_.set_sampled_seed();
+      inline_data_.set_sampled_seed();
       return;
     }
-    size_.generate_new_seed();
+    inline_data_.generate_new_seed();
   }
-  void set_no_seed_for_testing() { size_.set_no_seed_for_testing(); }
+  void set_no_seed_for_testing() { inline_data_.set_no_seed_for_testing(); }
 
   // The total number of available slots.
-  size_t capacity() const { return capacity_; }
-  void set_capacity(size_t c) {
-    // We allow setting above the max valid capacity for debugging purposes.
-    ABSL_SWISSTABLE_ASSERT(c == 0 || IsValidCapacity(c) ||
-                           c > kAboveMaxValidCapacity);
-    capacity_ = c;
+  size_t capacity() const { return inline_data_.capacity(); }
+  HashtableCapacity maybe_invalid_capacity() const {
+    return inline_data_.maybe_invalid_capacity();
   }
-  bool is_small() const { return IsSmallCapacity(capacity_); }
+  void set_capacity(HashtableCapacity c) { inline_data_.set_capacity(c); }
+  void set_capacity(size_t c) {
+    set_capacity(HashtableCapacity(c));
+  }
+  bool is_small() const { return IsSmallCapacity(capacity()); }
 
   // The number of slots we can still fill without needing to rehash.
   // This is stored in the heap allocation before the control bytes.
@@ -1050,10 +1181,10 @@
     return const_cast<CommonFields*>(this)->growth_info();
   }
 
-  bool has_infoz() const { return size_.has_infoz(); }
+  bool has_infoz() const { return inline_data_.has_infoz(); }
   void set_has_infoz() {
-    ABSL_SWISSTABLE_ASSERT(size_.is_sampled_seed());
-    size_.set_has_infoz();
+    ABSL_SWISSTABLE_ASSERT(inline_data_.is_sampled_seed());
+    inline_data_.set_has_infoz();
   }
 
   HashtablezInfoHandle* infoz_ptr() const {
@@ -1101,8 +1232,7 @@
   void move_non_heap_or_soo_fields(CommonFields& that) {
     static_cast<CommonFieldsGenerationInfo&>(*this) =
         std::move(static_cast<CommonFieldsGenerationInfo&>(that));
-    capacity_ = that.capacity_;
-    size_ = that.size_;
+    inline_data_ = that.inline_data_;
   }
 
   // Returns the number of control bytes set to kDeleted. For testing only.
@@ -1119,8 +1249,8 @@
     f();
     return;
 #endif
-    const size_t cap = capacity();
-    set_capacity(InvalidCapacity::kReentrance);
+    const HashtableCapacity cap = maybe_invalid_capacity();
+    set_capacity(HashtableCapacity::CreateReentrance());
     f();
     set_capacity(cap);
   }
@@ -1139,18 +1269,10 @@
     ABSL_SWISSTABLE_ASSERT(!has_infoz());
   }
 
-  // The number of slots in the backing array. This is always 2^N-1 for an
-  // integer N. NOTE: we tried experimenting with compressing the capacity and
-  // storing it together with size_: (a) using 6 bits to store the corresponding
-  // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
-  // size_ and storing size in the low bits. Both of these experiments were
-  // regressions, presumably because we need capacity to do find operations.
-  size_t capacity_;
-
   // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
   // encode the size in SOO case. We would be making size()/capacity() more
   // expensive in order to have more SOO space.
-  HashtableSize size_;
+  HashtableInlineData inline_data_;
 
   // Either the control/slots pointers or the SOO slot.
   HeapOrSoo heap_or_soo_;
@@ -1159,18 +1281,6 @@
 template <class Policy, class... Params>
 class raw_hash_set;
 
-// Returns the next valid capacity after `n`.
-constexpr size_t NextCapacity(size_t n) {
-  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
-  return n * 2 + 1;
-}
-
-// Returns the previous valid capacity before `n`.
-constexpr size_t PreviousCapacity(size_t n) {
-  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
-  return n / 2;
-}
-
 // Applies the following mapping to every byte in the control array:
 //   * kDeleted -> kEmpty
 //   * kEmpty -> kEmpty
@@ -1181,59 +1291,6 @@
 //   ctrl[i] != ctrl_t::kSentinel for all i < capacity
 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
 
-// Converts `n` into the next valid capacity, per `IsValidCapacity`.
-constexpr size_t NormalizeCapacity(size_t n) {
-  return n ? ~size_t{} >> countl_zero(n) : 1;
-}
-
-// General notes on capacity/growth methods below:
-// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
-//   average of two empty slots per group.
-// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
-// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
-//   never need to probe (the whole table fits in one group) so we don't need a
-//   load factor less than 1.
-
-// Given `capacity`, applies the load factor; i.e., it returns the maximum
-// number of values we should put into the table before a resizing rehash.
-constexpr size_t CapacityToGrowth(size_t capacity) {
-  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
-  // `capacity*7/8`
-  if (Group::kWidth == 8 && capacity == 7) {
-    // x-x/8 does not work when x==7.
-    return 6;
-  }
-  return capacity - capacity / 8;
-}
-
-// Given `size`, "unapplies" the load factor to find how large the capacity
-// should be to stay within the load factor.
-//
-// For size == 0, returns 0.
-// For other values, returns the same as `NormalizeCapacity(size*8/7)`.
-constexpr size_t SizeToCapacity(size_t size) {
-  if (size == 0) {
-    return 0;
-  }
-  // The minimum possible capacity is NormalizeCapacity(size).
-  // Shifting right `~size_t{}` by `leading_zeros` yields
-  // NormalizeCapacity(size).
-  int leading_zeros = absl::countl_zero(size);
-  constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3);
-  // max_size_for_next_capacity = max_load_factor * next_capacity
-  //                            = (7/8) * (~size_t{} >> leading_zeros)
-  //                            = (7/8*~size_t{}) >> leading_zeros
-  //                            = kLast3Bits >> leading_zeros
-  size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros;
-  // Decrease shift if size is too big for the minimum capacity.
-  leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
-  if constexpr (Group::kWidth == 8) {
-    // Formula doesn't work when size==7 for 8-wide groups.
-    leading_zeros -= (size == 7);
-  }
-  return (~size_t{}) >> leading_zeros;
-}
-
 template <class InputIter>
 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
                                      size_t bucket_count) {
@@ -1598,7 +1655,9 @@
 }
 
 constexpr size_t MaxStorableSize() {
-  return static_cast<size_t>(uint64_t{1} << HashtableSize::kSizeBitCount) - 1;
+  return static_cast<size_t>(uint64_t{1}
+                             << HashtableInlineData::kSizeBitCount) -
+         1;
 }
 
 // There are no more than 2^sizeof(key_type) unique key_types (and hashtable
@@ -1613,8 +1672,8 @@
 template <size_t kSizeOfSizeT = sizeof(size_t)>
 constexpr size_t MaxValidSizeForSlotSize(size_t slot_size) {
   if constexpr (kSizeOfSizeT == 8) {
-    // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
-    if (slot_size < size_t{1} << (64 - HashtableSize::kSizeBitCount)) {
+    // For small slot sizes we are limited by HashtableStackData::kSizeBitCount.
+    if (slot_size < size_t{1} << (64 - HashtableInlineData::kSizeBitCount)) {
       return MaxStorableSize();
     }
   }
@@ -1897,7 +1956,10 @@
     return SooEnabled() && size <= SooCapacity();
   }
   // Whether this table is in SOO mode or non-SOO mode.
-  bool is_soo() const { return fits_in_soo(maybe_invalid_capacity()); }
+  bool is_soo() const {
+    HashtableCapacity cap = maybe_invalid_capacity();
+    return cap.IsValid() && fits_in_soo(cap.capacity());
+  }
   bool is_full_soo() const { return is_soo() && !empty(); }
 
   bool is_small() const { return common().is_small(); }
@@ -2347,7 +2409,7 @@
   ~raw_hash_set() {
     destructor_impl();
     if constexpr (SwisstableAssertAccessToDestroyedTable()) {
-      common().set_capacity(InvalidCapacity::kDestroyed);
+      common().set_capacity(HashtableCapacity::CreateDestroyed());
     }
   }
 
@@ -2399,7 +2461,7 @@
 
   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
     if (SwisstableGenerationsEnabled() &&
-        maybe_invalid_capacity() >= InvalidCapacity::kMovedFrom) {
+        maybe_invalid_capacity().IsMovedFrom()) {
       common().set_capacity(DefaultCapacity());
     }
     AssertNotDebugCapacity();
@@ -3044,7 +3106,9 @@
   // construction when the size can fit in SOO capacity.
   bool should_sample_soo() {
     ABSL_SWISSTABLE_ASSERT(is_soo());
-    if (!ShouldSampleHashtablezInfoForAlloc<CharAlloc>()) return false;
+    if constexpr (!ShouldSampleHashtablezInfoForAlloc<CharAlloc>()) {
+      return false;
+    }
     if (common().soo_has_tried_sampling()) {
       // Already evaluated sampling on this SOO table; do not re-evaluate
       // sampling each time it transitions from empty to full SOO state.
@@ -3070,7 +3134,7 @@
     };
     if constexpr (SwisstableAssertAccessToDestroyedTable()) {
       CommonFields common_copy(non_soo_tag_t{}, this->common());
-      common().set_capacity(InvalidCapacity::kDestroyed);
+      common().set_capacity(HashtableCapacity::CreateDestroyed());
       IterateOverFullSlots(common_copy, sizeof(slot_type), destroy_slot);
       common().set_capacity(common_copy.capacity());
     } else {
@@ -3091,7 +3155,7 @@
 
   void destructor_impl() {
     if (SwisstableGenerationsEnabled() &&
-        maybe_invalid_capacity() >= InvalidCapacity::kMovedFrom) {
+        maybe_invalid_capacity().IsMovedFrom()) {
       return;
     }
     if (capacity() == 0) return;
@@ -3186,12 +3250,13 @@
     // than using NDEBUG) to avoid issues in which NDEBUG is enabled in some
     // translation units but not in others.
     if (SwisstableGenerationsEnabled()) {
-      that.common().set_capacity(this == &that ? InvalidCapacity::kSelfMovedFrom
-                                               : InvalidCapacity::kMovedFrom);
+      that.common().set_capacity(this == &that
+                                     ? HashtableCapacity::CreateSelfMovedFrom()
+                                     : HashtableCapacity::CreateMovedFrom());
     }
     if (!SwisstableGenerationsEnabled() ||
-        maybe_invalid_capacity() == DefaultCapacity() ||
-        maybe_invalid_capacity() > kAboveMaxValidCapacity) {
+        !maybe_invalid_capacity().IsValid() ||
+        capacity() == DefaultCapacity()) {
       return;
     }
     common().increment_generation();
@@ -3360,21 +3425,21 @@
       return;
     }
 #endif
-    const size_t cap = maybe_invalid_capacity();
-    if (ABSL_PREDICT_TRUE(cap < InvalidCapacity::kAboveMaxValidCapacity)) {
+    const HashtableCapacity cap = maybe_invalid_capacity();
+    if (ABSL_PREDICT_TRUE(cap.IsValid())) {
       return;
     }
-    assert(cap != InvalidCapacity::kReentrance &&
+    assert(!cap.IsReentrance() &&
            "Reentrant container access during element construction/destruction "
            "is not allowed.");
     if constexpr (SwisstableAssertAccessToDestroyedTable()) {
-      if (cap == InvalidCapacity::kDestroyed) {
+      if (cap.IsDestroyed()) {
         ABSL_RAW_LOG(FATAL, "Use of destroyed hash table.");
       }
     }
     if (SwisstableGenerationsEnabled() &&
-        ABSL_PREDICT_FALSE(cap >= InvalidCapacity::kMovedFrom)) {
-      if (cap == InvalidCapacity::kSelfMovedFrom) {
+        ABSL_PREDICT_FALSE(cap.IsMovedFrom())) {
+      if (cap.IsSelfMovedFrom()) {
         // If this log triggers, then a hash table was move-assigned to itself
         // and then used again later without being reinitialized.
         ABSL_RAW_LOG(FATAL, "Use of self-move-assigned hash table.");
@@ -3502,9 +3567,11 @@
   CommonFields& common() { return settings_.template get<0>(); }
   const CommonFields& common() const { return settings_.template get<0>(); }
 
-  // For use when the capacity is potentially invalid so we shouldn't assume
-  // that the capacity is valid (which is done in `capacity()`).
-  size_t maybe_invalid_capacity() const { return common().capacity(); }
+  // For use when the capacity is potentially invalid we return
+  // HashtableCapacity directly.
+  HashtableCapacity maybe_invalid_capacity() const {
+    return common().maybe_invalid_capacity();
+  }
   ctrl_t* control() const {
     ABSL_SWISSTABLE_ASSERT(!is_soo());
     return common().control();
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 4b847b9..d34a056 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2602,7 +2602,7 @@
 // in seed.
 void GenerateIrrelevantSeeds(int cnt) {
   for (int i = cnt % 17; i > 0; --i) {
-    HashtableSize::NextSeed();
+    HashtableInlineData::NextSeed();
   }
 }
 
@@ -4128,7 +4128,7 @@
 TEST(HashtableSize, GenerateNewSeedDoesntChangeSize) {
   size_t size = 1;
   do {
-    HashtableSize hs(no_seed_empty_tag_t{});
+    HashtableInlineData hs(HashtableCapacity(15), no_seed_empty_tag_t{});
     hs.increment_size(size);
     EXPECT_EQ(hs.size(), size);
     hs.generate_new_seed();
@@ -4155,7 +4155,7 @@
       } else if (i <= 21) {
         ASSERT_GE(max_size, uint64_t{1} << 40);
       }
-      ASSERT_LE(max_size, uint64_t{1} << HashtableSize::kSizeBitCount);
+      ASSERT_LE(max_size, uint64_t{1} << HashtableInlineData::kSizeBitCount);
       ASSERT_LT(absl::uint128(max_size) * slot_size, uint64_t{1} << 63);
     }
   }
diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc
index 1171e8d..ea5a958 100644
--- a/absl/strings/escaping.cc
+++ b/absl/strings/escaping.cc
@@ -30,6 +30,7 @@
 #include "absl/base/internal/unaligned_access.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
 #include "absl/strings/ascii.h"
 #include "absl/strings/charset.h"
 #include "absl/strings/internal/append_and_overwrite.h"
@@ -467,6 +468,165 @@
       });
 }
 
+// The two strings below provide maps from normal 6-bit characters to their
+// base64-escaped equivalent.
+// For the inverse case, see kUn(WebSafe)Base64 in the external
+// escaping.cc.
+constexpr char kBase64Chars[] =
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+constexpr char kWebSafeBase64Chars[] =
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+// ----------------------------------------------------------------------
+//   Take the input in groups of 4 characters and turn each
+//   character into a code 0 to 63 thus:
+//           A-Z map to 0 to 25
+//           a-z map to 26 to 51
+//           0-9 map to 52 to 61
+//           +(- for WebSafe) maps to 62
+//           /(_ for WebSafe) maps to 63
+//   There will be four numbers, all less than 64 which can be represented
+//   by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+//   Arrange the 6 digit binary numbers into three bytes as such:
+//   aaaaaabb bbbbcccc ccdddddd
+//   Equals signs (one or two) are used at the end of the encoded block to
+//   indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
+size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
+                            size_t szdest, const char* base64,
+                            bool do_padding) {
+  constexpr char kPad64 = '=';
+
+  constexpr size_t kMaxSize = (std::numeric_limits<size_t>::max() - 1) / 4 * 3;
+  if (ABSL_PREDICT_FALSE(szsrc > kMaxSize || szsrc * 4 > szdest * 3)) return 0;
+
+  char* cur_dest = dest;
+  const unsigned char* cur_src = src;
+
+  char* const limit_dest = dest + szdest;
+  const unsigned char* const limit_src = src + szsrc;
+
+  // (from https://tools.ietf.org/html/rfc3548)
+  // Special processing is performed if fewer than 24 bits are available
+  // at the end of the data being encoded.  A full encoding quantum is
+  // always completed at the end of a quantity.  When fewer than 24 input
+  // bits are available in an input group, zero bits are added (on the
+  // right) to form an integral number of 6-bit groups.
+  //
+  // If do_padding is true, padding at the end of the data is performed. This
+  // output padding uses the '=' character.
+
+  // Three bytes of data encodes to four characters of cyphertext.
+  // So we can pump through three-byte chunks atomically.
+  if (szsrc >= 3) {                    // "limit_src - 3" is UB if szsrc < 3.
+    while (cur_src < limit_src - 3) {  // While we have >= 32 bits.
+      uint32_t in = absl::big_endian::Load32(cur_src) >> 8;
+
+      cur_dest[0] = base64[in >> 18];
+      in &= 0x3FFFF;
+      cur_dest[1] = base64[in >> 12];
+      in &= 0xFFF;
+      cur_dest[2] = base64[in >> 6];
+      in &= 0x3F;
+      cur_dest[3] = base64[in];
+
+      cur_dest += 4;
+      cur_src += 3;
+    }
+  }
+  // To save time, we didn't update szdest or szsrc in the loop.  So do it now.
+  szdest = static_cast<size_t>(limit_dest - cur_dest);
+  szsrc = static_cast<size_t>(limit_src - cur_src);
+
+  /* now deal with the tail (<=3 bytes) */
+  switch (szsrc) {
+    case 0:
+      // Nothing left; nothing more to do.
+      break;
+    case 1: {
+      // One byte left: this encodes to two characters, and (optionally)
+      // two pad characters to round out the four-character cypherblock.
+      if (szdest < 2) return 0;
+      uint32_t in = cur_src[0];
+      cur_dest[0] = base64[in >> 2];
+      in &= 0x3;
+      cur_dest[1] = base64[in << 4];
+      cur_dest += 2;
+      szdest -= 2;
+      if (do_padding) {
+        if (szdest < 2) return 0;
+        cur_dest[0] = kPad64;
+        cur_dest[1] = kPad64;
+        cur_dest += 2;
+        szdest -= 2;
+      }
+      break;
+    }
+    case 2: {
+      // Two bytes left: this encodes to three characters, and (optionally)
+      // one pad character to round out the four-character cypherblock.
+      if (szdest < 3) return 0;
+      uint32_t in = absl::big_endian::Load16(cur_src);
+      cur_dest[0] = base64[in >> 10];
+      in &= 0x3FF;
+      cur_dest[1] = base64[in >> 4];
+      in &= 0x00F;
+      cur_dest[2] = base64[in << 2];
+      cur_dest += 3;
+      szdest -= 3;
+      if (do_padding) {
+        if (szdest < 1) return 0;
+        cur_dest[0] = kPad64;
+        cur_dest += 1;
+        szdest -= 1;
+      }
+      break;
+    }
+    case 3: {
+      // Three bytes left: same as in the big loop above.  We can't do this in
+      // the loop because the loop above always reads 4 bytes, and the fourth
+      // byte is past the end of the input.
+      if (szdest < 4) return 0;
+      uint32_t in =
+          (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
+      cur_dest[0] = base64[in >> 18];
+      in &= 0x3FFFF;
+      cur_dest[1] = base64[in >> 12];
+      in &= 0xFFF;
+      cur_dest[2] = base64[in >> 6];
+      in &= 0x3F;
+      cur_dest[3] = base64[in];
+      cur_dest += 4;
+      szdest -= 4;
+      break;
+    }
+    default:
+      // Should not be reached: blocks of 4 bytes are handled
+      // in the while loop before this switch statement.
+      ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
+      break;
+  }
+  return static_cast<size_t>(cur_dest - dest);
+}
+
+std::string Base64EscapeToStringInternal(const unsigned char* src, size_t szsrc,
+                                         bool do_padding,
+                                         const char* base64_chars) {
+  std::string escaped;
+  const size_t calc_escaped_size =
+      strings_internal::CalculateBase64EscapedLenInternal(szsrc, do_padding);
+  StringResizeAndOverwrite(
+      escaped, calc_escaped_size,
+      [src, szsrc, base64_chars, do_padding](char* buf, size_t buf_size) {
+        const size_t escaped_len = Base64EscapeInternal(
+            src, szsrc, buf, buf_size, base64_chars, do_padding);
+        assert(escaped_len == buf_size);
+        return escaped_len;
+      });
+  return escaped;
+}
+
 // Reverses the mapping in Base64EscapeInternal; see that method's
 // documentation for details of the mapping.
 bool Base64UnescapeInternal(const char* absl_nullable src_param, size_t szsrc,
@@ -955,19 +1115,15 @@
 }
 
 std::string Base64Escape(absl::string_view src) {
-  std::string dest;
-  strings_internal::Base64EscapeInternal(
-      reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
-      true, strings_internal::kBase64Chars);
-  return dest;
+  return Base64EscapeToStringInternal(
+      reinterpret_cast<const unsigned char*>(src.data()), src.size(), true,
+      kBase64Chars);
 }
 
 std::string WebSafeBase64Escape(absl::string_view src) {
-  std::string dest;
-  strings_internal::Base64EscapeInternal(
-      reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
-      false, strings_internal::kWebSafeBase64Chars);
-  return dest;
+  return Base64EscapeToStringInternal(
+      reinterpret_cast<const unsigned char*>(src.data()), src.size(), false,
+      kWebSafeBase64Chars);
 }
 
 bool HexStringToBytes(absl::string_view hex, std::string* absl_nonnull bytes) {
diff --git a/absl/strings/internal/escaping.cc b/absl/strings/internal/escaping.cc
index d2abe66..a70b5dc 100644
--- a/absl/strings/internal/escaping.cc
+++ b/absl/strings/internal/escaping.cc
@@ -23,16 +23,6 @@
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-// The two strings below provide maps from normal 6-bit characters to their
-// base64-escaped equivalent.
-// For the inverse case, see kUn(WebSafe)Base64 in the external
-// escaping.cc.
-ABSL_CONST_INIT const char kBase64Chars[] =
-    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-
-ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
-    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
 size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
   // Base64 encodes three bytes of input at a time. If the input is not
   // divisible by three, we pad as appropriate.
@@ -73,137 +63,6 @@
   return len;
 }
 
-// ----------------------------------------------------------------------
-//   Take the input in groups of 4 characters and turn each
-//   character into a code 0 to 63 thus:
-//           A-Z map to 0 to 25
-//           a-z map to 26 to 51
-//           0-9 map to 52 to 61
-//           +(- for WebSafe) maps to 62
-//           /(_ for WebSafe) maps to 63
-//   There will be four numbers, all less than 64 which can be represented
-//   by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
-//   Arrange the 6 digit binary numbers into three bytes as such:
-//   aaaaaabb bbbbcccc ccdddddd
-//   Equals signs (one or two) are used at the end of the encoded block to
-//   indicate that the text was not an integer multiple of three bytes long.
-// ----------------------------------------------------------------------
-size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
-                            size_t szdest, const char* base64,
-                            bool do_padding) {
-  static const char kPad64 = '=';
-
-  if (szsrc * 4 > szdest * 3) return 0;
-
-  char* cur_dest = dest;
-  const unsigned char* cur_src = src;
-
-  char* const limit_dest = dest + szdest;
-  const unsigned char* const limit_src = src + szsrc;
-
-  // (from https://tools.ietf.org/html/rfc3548)
-  // Special processing is performed if fewer than 24 bits are available
-  // at the end of the data being encoded.  A full encoding quantum is
-  // always completed at the end of a quantity.  When fewer than 24 input
-  // bits are available in an input group, zero bits are added (on the
-  // right) to form an integral number of 6-bit groups.
-  //
-  // If do_padding is true, padding at the end of the data is performed. This
-  // output padding uses the '=' character.
-
-  // Three bytes of data encodes to four characters of cyphertext.
-  // So we can pump through three-byte chunks atomically.
-  if (szsrc >= 3) {                    // "limit_src - 3" is UB if szsrc < 3.
-    while (cur_src < limit_src - 3) {  // While we have >= 32 bits.
-      uint32_t in = absl::big_endian::Load32(cur_src) >> 8;
-
-      cur_dest[0] = base64[in >> 18];
-      in &= 0x3FFFF;
-      cur_dest[1] = base64[in >> 12];
-      in &= 0xFFF;
-      cur_dest[2] = base64[in >> 6];
-      in &= 0x3F;
-      cur_dest[3] = base64[in];
-
-      cur_dest += 4;
-      cur_src += 3;
-    }
-  }
-  // To save time, we didn't update szdest or szsrc in the loop.  So do it now.
-  szdest = static_cast<size_t>(limit_dest - cur_dest);
-  szsrc = static_cast<size_t>(limit_src - cur_src);
-
-  /* now deal with the tail (<=3 bytes) */
-  switch (szsrc) {
-    case 0:
-      // Nothing left; nothing more to do.
-      break;
-    case 1: {
-      // One byte left: this encodes to two characters, and (optionally)
-      // two pad characters to round out the four-character cypherblock.
-      if (szdest < 2) return 0;
-      uint32_t in = cur_src[0];
-      cur_dest[0] = base64[in >> 2];
-      in &= 0x3;
-      cur_dest[1] = base64[in << 4];
-      cur_dest += 2;
-      szdest -= 2;
-      if (do_padding) {
-        if (szdest < 2) return 0;
-        cur_dest[0] = kPad64;
-        cur_dest[1] = kPad64;
-        cur_dest += 2;
-        szdest -= 2;
-      }
-      break;
-    }
-    case 2: {
-      // Two bytes left: this encodes to three characters, and (optionally)
-      // one pad character to round out the four-character cypherblock.
-      if (szdest < 3) return 0;
-      uint32_t in = absl::big_endian::Load16(cur_src);
-      cur_dest[0] = base64[in >> 10];
-      in &= 0x3FF;
-      cur_dest[1] = base64[in >> 4];
-      in &= 0x00F;
-      cur_dest[2] = base64[in << 2];
-      cur_dest += 3;
-      szdest -= 3;
-      if (do_padding) {
-        if (szdest < 1) return 0;
-        cur_dest[0] = kPad64;
-        cur_dest += 1;
-        szdest -= 1;
-      }
-      break;
-    }
-    case 3: {
-      // Three bytes left: same as in the big loop above.  We can't do this in
-      // the loop because the loop above always reads 4 bytes, and the fourth
-      // byte is past the end of the input.
-      if (szdest < 4) return 0;
-      uint32_t in =
-          (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
-      cur_dest[0] = base64[in >> 18];
-      in &= 0x3FFFF;
-      cur_dest[1] = base64[in >> 12];
-      in &= 0xFFF;
-      cur_dest[2] = base64[in >> 6];
-      in &= 0x3F;
-      cur_dest[3] = base64[in];
-      cur_dest += 4;
-      szdest -= 4;
-      break;
-    }
-    default:
-      // Should not be reached: blocks of 4 bytes are handled
-      // in the while loop before this switch statement.
-      ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
-      break;
-  }
-  return static_cast<size_t>(cur_dest - dest);
-}
-
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/strings/internal/escaping.h b/absl/strings/internal/escaping.h
index b71fb7e..d936db6 100644
--- a/absl/strings/internal/escaping.h
+++ b/absl/strings/internal/escaping.h
@@ -15,43 +15,19 @@
 #ifndef ABSL_STRINGS_INTERNAL_ESCAPING_H_
 #define ABSL_STRINGS_INTERNAL_ESCAPING_H_
 
-#include <cassert>
+#include <cstddef>
 
-#include "absl/strings/resize_and_overwrite.h"
+#include "absl/base/config.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 namespace strings_internal {
 
-ABSL_CONST_INIT extern const char kBase64Chars[];
-ABSL_CONST_INIT extern const char kWebSafeBase64Chars[];
-
 // Calculates the length of a Base64 encoding (RFC 4648) of a string of length
 // `input_len`, with or without padding per `do_padding`. Note that 'web-safe'
 // encoding (section 5 of the RFC) does not change this length.
 size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding);
 
-// Base64-encodes `src` using the alphabet provided in `base64` (which
-// determines whether to do web-safe encoding or not) and writes the result to
-// `dest`. If `do_padding` is true, `dest` is padded with '=' chars until its
-// length is a multiple of 3. Returns the length of `dest`.
-size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
-                            size_t szdest, const char* base64, bool do_padding);
-template <typename String>
-void Base64EscapeInternal(const unsigned char* src, size_t szsrc, String* dest,
-                          bool do_padding, const char* base64_chars) {
-  const size_t calc_escaped_size =
-      CalculateBase64EscapedLenInternal(szsrc, do_padding);
-  StringResizeAndOverwrite(
-      *dest, calc_escaped_size,
-      [src, szsrc, base64_chars, do_padding](char* buf, size_t buf_size) {
-        const size_t escaped_len = Base64EscapeInternal(
-            src, szsrc, buf, buf_size, base64_chars, do_padding);
-        assert(escaped_len == buf_size);
-        return escaped_len;
-      });
-}
-
 }  // namespace strings_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index 769f830..a3c038a 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -93,6 +93,7 @@
         "//absl/algorithm",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:hardening",
         "//absl/base:nullability",
         "//absl/base:throw_delegate",
         "//absl/hash:weakly_mixed_integer",
@@ -133,6 +134,7 @@
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:hardening",
         "//absl/base:nullability",
         "//absl/base:raw_logging_internal",
         "//absl/base:throw_delegate",
@@ -258,6 +260,7 @@
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:hardening",
     ],
 )
 
diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt
index fd2f92f..f00b574 100644
--- a/absl/types/CMakeLists.txt
+++ b/absl/types/CMakeLists.txt
@@ -39,6 +39,7 @@
   DEPS
     absl::config
     absl::core_headers
+    absl::hardening
     absl::nullability
     absl::raw_logging_internal
     absl::throw_delegate
@@ -78,6 +79,7 @@
     absl::algorithm
     absl::config
     absl::core_headers
+    absl::hardening
     absl::nullability
     absl::throw_delegate
     absl::type_traits
@@ -231,6 +233,7 @@
   DEPS
     absl::config
     absl::core_headers
+    absl::hardening
   PUBLIC
 )
 
diff --git a/absl/types/any_span.h b/absl/types/any_span.h
index 98ad4f9..d20f7bf 100644
--- a/absl/types/any_span.h
+++ b/absl/types/any_span.h
@@ -213,6 +213,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/hardening.h"
 #include "absl/base/internal/raw_logging.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
@@ -292,7 +293,7 @@
       "Iter must be a random access iterator.");
 
   Range(Iter begin, Iter end) {
-    ABSL_HARDENING_ASSERT(begin <= end);
+    absl::base_internal::HardeningAssertLE(begin, end);
     begin_ = begin;
     end_ = end;
   }
@@ -300,7 +301,7 @@
   std::size_t size() const { return end_ - begin_; }
 
   decltype(std::declval<Iter>()[0]) operator[](std::size_t i) const {
-    ABSL_HARDENING_ASSERT(i < (end_ - begin_));
+    absl::base_internal::HardeningAssertLT(i, size());
     return begin_[i];
   }
 
@@ -708,19 +709,22 @@
     if (len == AnySpan<T>::npos) {
       len = this_size - pos;
     }
-    ABSL_HARDENING_ASSERT(pos <= this_size && len <= this_size - pos);
+    absl::base_internal::HardeningAssertLE(pos, this_size);
+    absl::base_internal::HardeningAssertLE(len,
+                                           static_cast<size_type>(this_size
+                                                                  - pos));
     return AnySpan<T>(getter_.Offset(pos), len);
   }
 
   constexpr AnySpan subspan(size_type pos) const {
-    ABSL_HARDENING_ASSERT(pos <= size());
+    absl::base_internal::HardeningAssertLE(pos, size());
     return AnySpan(getter_.Offset(pos), size() - pos);
   }
 
   // Returns a `AnySpan` containing first `len` elements. Parameter `len`
   // must be non-negative and <= size().
   constexpr AnySpan first(size_type len) const {
-    ABSL_HARDENING_ASSERT(len != AnySpan<T>::npos);
+    absl::base_internal::HardeningAssert(len != AnySpan<T>::npos);
     return subspan(0, len);
   }
 
@@ -734,7 +738,7 @@
 
   // Element access.
   constexpr reference operator[](size_type index) const {
-    ABSL_HARDENING_ASSERT(index < size());
+    absl::base_internal::HardeningAssertLT(index, size());
     return getter_.Get(index);
   }
   constexpr reference at(size_type index) const {
@@ -744,11 +748,11 @@
     return getter_.Get(index);
   }
   constexpr reference front() const {
-    ABSL_HARDENING_ASSERT(size() > 0);
+    absl::base_internal::HardeningAssertGT(size(), size_type{0});
     return (*this)[0];
   }
   constexpr reference back() const {
-    ABSL_HARDENING_ASSERT(size() > 0);
+    absl::base_internal::HardeningAssertGT(size(), size_type{0});
     return (*this)[size() - 1];
   }
 
diff --git a/absl/types/optional_ref.h b/absl/types/optional_ref.h
index fb21333..920fe56 100644
--- a/absl/types/optional_ref.h
+++ b/absl/types/optional_ref.h
@@ -67,6 +67,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/hardening.h"
 #include "absl/base/macros.h"
 #include "absl/base/optimization.h"
 
@@ -162,11 +163,11 @@
   // Accesses the underlying `T` value of an `optional_ref`. If the
   // `optional_ref` is empty, behavior is undefined.
   constexpr T& operator*() const {
-    ABSL_HARDENING_ASSERT(ptr_ != nullptr);
+    absl::base_internal::HardeningAssertNonNull(ptr_);
     return *ptr_;
   }
   constexpr T* operator->() const {
-    ABSL_HARDENING_ASSERT(ptr_ != nullptr);
+    absl::base_internal::HardeningAssertNonNull(ptr_);
     return ptr_;
   }
 
diff --git a/absl/types/span.h b/absl/types/span.h
index 8c5736c..2327962 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -64,6 +64,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
+#include "absl/base/internal/hardening.h"
 #include "absl/base/macros.h"
 #include "absl/base/nullability.h"
 #include "absl/base/optimization.h"
@@ -335,7 +336,7 @@
   //
   // Returns a reference to the i'th element of this span.
   constexpr reference operator[](size_type i) const noexcept {
-    ABSL_HARDENING_ASSERT(i < size());
+    absl::base_internal::HardeningAssertLT(i, size());
     return ptr_[i];
   }
 
@@ -354,7 +355,7 @@
   // Returns a reference to the first element of this span. The span must not
   // be empty.
   constexpr reference front() const noexcept {
-    ABSL_HARDENING_ASSERT(size() > 0);
+    absl::base_internal::HardeningAssertGT(size(), static_cast<size_t>(0));
     return *data();
   }
 
@@ -363,7 +364,7 @@
   // Returns a reference to the last element of this span. The span must not
   // be empty.
   constexpr reference back() const noexcept {
-    ABSL_HARDENING_ASSERT(size() > 0);
+    absl::base_internal::HardeningAssertGT(size(), static_cast<size_t>(0));
     return *(data() + size() - 1);
   }
 
@@ -429,7 +430,7 @@
   //
   // Removes the first `n` elements from the span.
   void remove_prefix(size_type n) noexcept {
-    ABSL_HARDENING_ASSERT(size() >= n);
+    absl::base_internal::HardeningAssertGE(size(), n);
     ptr_ += n;
     len_ -= n;
   }
@@ -438,7 +439,7 @@
   //
   // Removes the last `n` elements from the span.
   void remove_suffix(size_type n) noexcept {
-    ABSL_HARDENING_ASSERT(size() >= n);
+    absl::base_internal::HardeningAssertGE(size(), n);
     len_ -= n;
   }
 
@@ -737,7 +738,7 @@
 template <int&... ExplicitArgumentBarrier, typename T>
 Span<T> MakeSpan(T* absl_nullable begin ABSL_ATTRIBUTE_LIFETIME_BOUND,
                  T* absl_nullable end) noexcept {
-  ABSL_HARDENING_ASSERT(begin <= end);
+  absl::base_internal::HardeningAssertLE(begin, end);
   return Span<T>(begin, static_cast<size_t>(end - begin));
 }
 
@@ -798,7 +799,7 @@
 Span<const T> MakeConstSpan(T* absl_nullable begin
                                 ABSL_ATTRIBUTE_LIFETIME_BOUND,
                             T* absl_nullable end) noexcept {
-  ABSL_HARDENING_ASSERT(begin <= end);
+  absl::base_internal::HardeningAssertLE(begin, end);
   return Span<const T>(begin, end - begin);
 }