diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index df071af..9f43ff9 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -10,6 +10,7 @@
   "base/config.h"
   "base/const_init.h"
   "base/dynamic_annotations.h"
+  "base/fast_type_id.h"
   "base/internal/atomic_hook.h"
   "base/internal/cycleclock.cc"
   "base/internal/cycleclock.h"
@@ -18,7 +19,6 @@
   "base/internal/dynamic_annotations.h"
   "base/internal/endian.h"
   "base/internal/errno_saver.h"
-  "base/internal/fast_type_id.h"
   "base/internal/hide_ptr.h"
   "base/internal/identity.h"
   "base/internal/iterator_traits.h"
@@ -94,6 +94,7 @@
   "container/internal/raw_hash_map.h"
   "container/internal/raw_hash_set.cc"
   "container/internal/raw_hash_set.h"
+  "container/internal/raw_hash_set_resize_impl.h"
   "container/internal/tracked.h"
   "container/node_hash_map.h"
   "container/node_hash_set.h"
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 83fd607..2054794 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -541,6 +541,9 @@
     ],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
     deps = [
         ":base",
         ":config",
@@ -804,12 +807,9 @@
 
 cc_library(
     name = "fast_type_id",
-    hdrs = ["internal/fast_type_id.h"],
+    hdrs = ["fast_type_id.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl:__subpackages__",
-    ],
     deps = [
         ":config",
     ],
@@ -818,10 +818,11 @@
 cc_test(
     name = "fast_type_id_test",
     size = "small",
-    srcs = ["internal/fast_type_id_test.cc"],
+    srcs = ["fast_type_id_test.cc"],
     copts = ABSL_TEST_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
     deps = [
+        ":core_headers",
         ":fast_type_id",
         "@googletest//:gtest",
         "@googletest//:gtest_main",
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index c98a63e..59a9a95 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -669,12 +669,11 @@
     GTest::gtest_main
 )
 
-# Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
     fast_type_id
   HDRS
-    "internal/fast_type_id.h"
+    "fast_type_id.h"
   COPTS
     ${ABSL_DEFAULT_COPTS}
   LINKOPTS
@@ -687,10 +686,11 @@
   NAME
     fast_type_id_test
   SRCS
-    "internal/fast_type_id_test.cc"
+    "fast_type_id_test.cc"
   COPTS
     ${ABSL_TEST_COPTS}
   DEPS
+    absl::core_headers
     absl::fast_type_id
     GTest::gtest_main
 )
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
index bd4b657..7bfd916 100644
--- a/absl/base/call_once.h
+++ b/absl/base/call_once.h
@@ -49,8 +49,8 @@
 class once_flag;
 
 namespace base_internal {
-absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
-    absl::Nonnull<absl::once_flag*> flag);
+std::atomic<uint32_t>* absl_nonnull ControlWord(
+    absl::once_flag* absl_nonnull flag);
 }  // namespace base_internal
 
 // call_once()
@@ -93,8 +93,8 @@
   once_flag& operator=(const once_flag&) = delete;
 
  private:
-  friend absl::Nonnull<std::atomic<uint32_t>*> base_internal::ControlWord(
-      absl::Nonnull<once_flag*> flag);
+  friend std::atomic<uint32_t>* absl_nonnull base_internal::ControlWord(
+      once_flag* absl_nonnull flag);
   std::atomic<uint32_t> control_;
 };
 
@@ -108,7 +108,7 @@
 // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
 // initialize entities used by the scheduler implementation.
 template <typename Callable, typename... Args>
-void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+void LowLevelCallOnce(absl::once_flag* absl_nonnull flag, Callable&& fn,
                       Args&&... args);
 
 // Disables scheduling while on stack when scheduling mode is non-cooperative.
@@ -150,7 +150,7 @@
 
 template <typename Callable, typename... Args>
     void
-    CallOnceImpl(absl::Nonnull<std::atomic<uint32_t>*> control,
+    CallOnceImpl(std::atomic<uint32_t>* absl_nonnull control,
                  base_internal::SchedulingMode scheduling_mode, Callable&& fn,
                  Args&&... args) {
 #ifndef NDEBUG
@@ -190,13 +190,13 @@
   }  // else *control is already kOnceDone
 }
 
-inline absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
-    absl::Nonnull<once_flag*> flag) {
+inline std::atomic<uint32_t>* absl_nonnull ControlWord(
+    once_flag* absl_nonnull flag) {
   return &flag->control_;
 }
 
 template <typename Callable, typename... Args>
-void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+void LowLevelCallOnce(absl::once_flag* absl_nonnull flag, Callable&& fn,
                       Args&&... args) {
   std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
   uint32_t s = once->load(std::memory_order_acquire);
diff --git a/absl/base/internal/fast_type_id.h b/absl/base/fast_type_id.h
similarity index 72%
rename from absl/base/internal/fast_type_id.h
rename to absl/base/fast_type_id.h
index 36372f5..ff25027 100644
--- a/absl/base/internal/fast_type_id.h
+++ b/absl/base/fast_type_id.h
@@ -1,4 +1,3 @@
-//
 // Copyright 2020 The Abseil Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,32 +13,33 @@
 // limitations under the License.
 //
 
-#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
-#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#ifndef ABSL_BASE_FAST_TYPE_ID_H_
+#define ABSL_BASE_FAST_TYPE_ID_H_
 
 #include "absl/base/config.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
-namespace base_internal {
 
+namespace base_internal {
 template <typename Type>
 struct FastTypeTag {
-  constexpr static char dummy_var = 0;
+  static constexpr char kDummyVar = 0;
 };
+}  // namespace base_internal
 
-// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
-// passed-in type. These are meant to be good match for keys into maps or
-// straight up comparisons.
+// The type returned by `absl::FastTypeId<T>()`.
 using FastTypeIdType = const void*;
 
+// `absl::FastTypeId<Type>()` evaluates at compile-time to a unique id for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
 template <typename Type>
-constexpr inline FastTypeIdType FastTypeId() {
-  return &FastTypeTag<Type>::dummy_var;
+constexpr FastTypeIdType FastTypeId() {
+  return &base_internal::FastTypeTag<Type>::kDummyVar;
 }
 
-}  // namespace base_internal
 ABSL_NAMESPACE_END
 }  // namespace absl
 
-#endif  // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#endif  // ABSL_BASE_FAST_TYPE_ID_H_
diff --git a/absl/base/fast_type_id_test.cc b/absl/base/fast_type_id_test.cc
new file mode 100644
index 0000000..3068e4b
--- /dev/null
+++ b/absl/base/fast_type_id_test.cc
@@ -0,0 +1,127 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/fast_type_id.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/macros.h"
+
+namespace {
+
+// NOLINTBEGIN(runtime/int)
+#define PRIM_TYPES(A)   \
+  A(bool)               \
+  A(short)              \
+  A(unsigned short)     \
+  A(int)                \
+  A(unsigned int)       \
+  A(long)               \
+  A(unsigned long)      \
+  A(long long)          \
+  A(unsigned long long) \
+  A(float)              \
+  A(double)             \
+  A(long double)
+// NOLINTEND(runtime/int)
+
+TEST(FastTypeIdTest, PrimitiveTypes) {
+  // clang-format off
+  constexpr absl::FastTypeIdType kTypeIds[] = {
+#define A(T) absl::FastTypeId<T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<const T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<const volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+  };
+  // clang-format on
+
+  for (size_t i = 0; i < ABSL_ARRAYSIZE(kTypeIds); ++i) {
+    EXPECT_EQ(kTypeIds[i], kTypeIds[i]);
+    for (size_t j = 0; j < i; ++j) {
+      EXPECT_NE(kTypeIds[i], kTypeIds[j]);
+    }
+  }
+}
+
+#define FIXED_WIDTH_TYPES(A) \
+  A(int8_t)                  \
+  A(uint8_t)                 \
+  A(int16_t)                 \
+  A(uint16_t)                \
+  A(int32_t)                 \
+  A(uint32_t)                \
+  A(int64_t)                 \
+  A(uint64_t)
+
+TEST(FastTypeIdTest, FixedWidthTypes) {
+  // clang-format off
+  constexpr absl::FastTypeIdType kTypeIds[] = {
+#define A(T) absl::FastTypeId<T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<const T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) absl::FastTypeId<const volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+  };
+  // clang-format on
+
+  for (size_t i = 0; i < ABSL_ARRAYSIZE(kTypeIds); ++i) {
+    EXPECT_EQ(kTypeIds[i], kTypeIds[i]);
+    for (size_t j = 0; j < i; ++j) {
+      EXPECT_NE(kTypeIds[i], kTypeIds[j]);
+    }
+  }
+}
+
+TEST(FastTypeIdTest, AliasTypes) {
+  using int_alias = int;
+  EXPECT_EQ(absl::FastTypeId<int_alias>(), absl::FastTypeId<int>());
+}
+
+TEST(FastTypeIdTest, TemplateSpecializations) {
+  EXPECT_NE(absl::FastTypeId<std::vector<int>>(),
+            absl::FastTypeId<std::vector<long>>());  // NOLINT(runtime/int)
+
+  EXPECT_NE((absl::FastTypeId<std::map<int, float>>()),
+            (absl::FastTypeId<std::map<int, double>>()));
+}
+
+struct Base {};
+struct Derived : Base {};
+struct PDerived : private Base {};
+
+TEST(FastTypeIdTest, Inheritance) {
+  EXPECT_NE(absl::FastTypeId<Base>(), absl::FastTypeId<Derived>());
+  EXPECT_NE(absl::FastTypeId<Base>(), absl::FastTypeId<PDerived>());
+}
+
+}  // namespace
diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h
index e1a67f5..fb38f60 100644
--- a/absl/base/internal/endian.h
+++ b/absl/base/internal/endian.h
@@ -157,27 +157,27 @@
 }
 
 // Functions to do unaligned loads and stores in little-endian order.
-inline uint16_t Load16(absl::Nonnull<const void *> p) {
+inline uint16_t Load16(const void* absl_nonnull p) {
   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
 }
 
-inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+inline void Store16(void* absl_nonnull p, uint16_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
 }
 
-inline uint32_t Load32(absl::Nonnull<const void *> p) {
+inline uint32_t Load32(const void* absl_nonnull p) {
   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
 }
 
-inline void Store32(absl::Nonnull<void *> p, uint32_t v) {
+inline void Store32(void* absl_nonnull p, uint32_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
 }
 
-inline uint64_t Load64(absl::Nonnull<const void *> p) {
+inline uint64_t Load64(const void* absl_nonnull p) {
   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
 }
 
-inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+inline void Store64(void* absl_nonnull p, uint64_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
 }
 
@@ -247,27 +247,27 @@
 }
 
 // Functions to do unaligned loads and stores in big-endian order.
-inline uint16_t Load16(absl::Nonnull<const void *> p) {
+inline uint16_t Load16(const void* absl_nonnull p) {
   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
 }
 
-inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+inline void Store16(void* absl_nonnull p, uint16_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
 }
 
-inline uint32_t Load32(absl::Nonnull<const void *> p) {
+inline uint32_t Load32(const void* absl_nonnull p) {
   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
 }
 
-inline void Store32(absl::Nonnull<void *>p, uint32_t v) {
+inline void Store32(void* absl_nonnull p, uint32_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
 }
 
-inline uint64_t Load64(absl::Nonnull<const void *> p) {
+inline uint64_t Load64(const void* absl_nonnull p) {
   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
 }
 
-inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+inline void Store64(void* absl_nonnull p, uint64_t v) {
   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
 }
 
diff --git a/absl/base/internal/fast_type_id_test.cc b/absl/base/internal/fast_type_id_test.cc
deleted file mode 100644
index 16f3c14..0000000
--- a/absl/base/internal/fast_type_id_test.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2020 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/fast_type_id.h"
-
-#include <cstdint>
-#include <map>
-#include <vector>
-
-#include "gtest/gtest.h"
-
-namespace {
-namespace bi = absl::base_internal;
-
-// NOLINTNEXTLINE
-#define PRIM_TYPES(A)   \
-  A(bool)               \
-  A(short)              \
-  A(unsigned short)     \
-  A(int)                \
-  A(unsigned int)       \
-  A(long)               \
-  A(unsigned long)      \
-  A(long long)          \
-  A(unsigned long long) \
-  A(float)              \
-  A(double)             \
-  A(long double)
-
-TEST(FastTypeIdTest, PrimitiveTypes) {
-  bi::FastTypeIdType type_ids[] = {
-#define A(T) bi::FastTypeId<T>(),
-    PRIM_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<const T>(),
-    PRIM_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<volatile T>(),
-    PRIM_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<const volatile T>(),
-    PRIM_TYPES(A)
-#undef A
-  };
-  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
-
-  for (int i = 0; i < total_type_ids; ++i) {
-    EXPECT_EQ(type_ids[i], type_ids[i]);
-    for (int j = 0; j < i; ++j) {
-      EXPECT_NE(type_ids[i], type_ids[j]);
-    }
-  }
-}
-
-#define FIXED_WIDTH_TYPES(A) \
-  A(int8_t)                  \
-  A(uint8_t)                 \
-  A(int16_t)                 \
-  A(uint16_t)                \
-  A(int32_t)                 \
-  A(uint32_t)                \
-  A(int64_t)                 \
-  A(uint64_t)
-
-TEST(FastTypeIdTest, FixedWidthTypes) {
-  bi::FastTypeIdType type_ids[] = {
-#define A(T) bi::FastTypeId<T>(),
-    FIXED_WIDTH_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<const T>(),
-    FIXED_WIDTH_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<volatile T>(),
-    FIXED_WIDTH_TYPES(A)
-#undef A
-#define A(T) bi::FastTypeId<const volatile T>(),
-    FIXED_WIDTH_TYPES(A)
-#undef A
-  };
-  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
-
-  for (int i = 0; i < total_type_ids; ++i) {
-    EXPECT_EQ(type_ids[i], type_ids[i]);
-    for (int j = 0; j < i; ++j) {
-      EXPECT_NE(type_ids[i], type_ids[j]);
-    }
-  }
-}
-
-TEST(FastTypeIdTest, AliasTypes) {
-  using int_alias = int;
-  EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>());
-}
-
-TEST(FastTypeIdTest, TemplateSpecializations) {
-  EXPECT_NE(bi::FastTypeId<std::vector<int>>(),
-            bi::FastTypeId<std::vector<long>>());
-
-  EXPECT_NE((bi::FastTypeId<std::map<int, float>>()),
-            (bi::FastTypeId<std::map<int, double>>()));
-}
-
-struct Base {};
-struct Derived : Base {};
-struct PDerived : private Base {};
-
-TEST(FastTypeIdTest, Inheritance) {
-  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>());
-  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>());
-}
-
-}  // namespace
diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h
index 4fea457..3f5dd6f 100644
--- a/absl/base/internal/unaligned_access.h
+++ b/absl/base/internal/unaligned_access.h
@@ -36,33 +36,33 @@
 ABSL_NAMESPACE_BEGIN
 namespace base_internal {
 
-inline uint16_t UnalignedLoad16(absl::Nonnull<const void *> p) {
+inline uint16_t UnalignedLoad16(const void* absl_nonnull p) {
   uint16_t t;
   memcpy(&t, p, sizeof t);
   return t;
 }
 
-inline uint32_t UnalignedLoad32(absl::Nonnull<const void *> p) {
+inline uint32_t UnalignedLoad32(const void* absl_nonnull p) {
   uint32_t t;
   memcpy(&t, p, sizeof t);
   return t;
 }
 
-inline uint64_t UnalignedLoad64(absl::Nonnull<const void *> p) {
+inline uint64_t UnalignedLoad64(const void* absl_nonnull p) {
   uint64_t t;
   memcpy(&t, p, sizeof t);
   return t;
 }
 
-inline void UnalignedStore16(absl::Nonnull<void *> p, uint16_t v) {
+inline void UnalignedStore16(void* absl_nonnull p, uint16_t v) {
   memcpy(p, &v, sizeof v);
 }
 
-inline void UnalignedStore32(absl::Nonnull<void *> p, uint32_t v) {
+inline void UnalignedStore32(void* absl_nonnull p, uint32_t v) {
   memcpy(p, &v, sizeof v);
 }
 
-inline void UnalignedStore64(absl::Nonnull<void *> p, uint64_t v) {
+inline void UnalignedStore64(void* absl_nonnull p, uint64_t v) {
   memcpy(p, &v, sizeof v);
 }
 
diff --git a/absl/base/no_destructor.h b/absl/base/no_destructor.h
index 2478d69..9d960ee 100644
--- a/absl/base/no_destructor.h
+++ b/absl/base/no_destructor.h
@@ -135,11 +135,11 @@
   // Pretend to be a smart pointer to T with deep constness.
   // Never returns a null pointer.
   T& operator*() { return *get(); }
-  absl::Nonnull<T*> operator->() { return get(); }
-  absl::Nonnull<T*> get() { return impl_.get(); }
+  T* absl_nonnull operator->() { return get(); }
+  T* absl_nonnull get() { return impl_.get(); }
   const T& operator*() const { return *get(); }
-  absl::Nonnull<const T*> operator->() const { return get(); }
-  absl::Nonnull<const T*> get() const { return impl_.get(); }
+  const T* absl_nonnull operator->() const { return get(); }
+  const T* absl_nonnull get() const { return impl_.get(); }
 
  private:
   class DirectImpl {
@@ -147,8 +147,8 @@
     template <typename... Args>
     explicit constexpr DirectImpl(Args&&... args)
         : value_(std::forward<Args>(args)...) {}
-    absl::Nonnull<const T*> get() const { return &value_; }
-    absl::Nonnull<T*> get() { return &value_; }
+    const T* absl_nonnull get() const { return &value_; }
+    T* absl_nonnull get() { return &value_; }
 
    private:
     T value_;
@@ -160,10 +160,10 @@
     explicit PlacementImpl(Args&&... args) {
       new (&space_) T(std::forward<Args>(args)...);
     }
-    absl::Nonnull<const T*> get() const {
+    const T* absl_nonnull get() const {
       return std::launder(reinterpret_cast<const T*>(&space_));
     }
-    absl::Nonnull<T*> get() {
+    T* absl_nonnull get() {
       return std::launder(reinterpret_cast<T*>(&space_));
     }
 
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index ccaed1c..768b8ab 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -694,6 +694,27 @@
 )
 
 cc_library(
+    name = "raw_hash_set_resize_impl",
+    hdrs = ["internal/raw_hash_set_resize_impl.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = ["//absl/base:config"],
+)
+
+cc_test(
+    name = "raw_hash_set_resize_impl_test",
+    srcs = ["internal/raw_hash_set_resize_impl_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":raw_hash_set_resize_impl",
+        "//absl/base:config",
+        "@googletest//:gtest",
+        "@googletest//:gtest_main",
+    ],
+)
+
+cc_library(
     name = "raw_hash_set",
     srcs = ["internal/raw_hash_set.cc"],
     hdrs = ["internal/raw_hash_set.h"],
@@ -709,6 +730,7 @@
         ":hashtable_control_bytes",
         ":hashtable_debug_hooks",
         ":hashtablez_sampler",
+        ":raw_hash_set_resize_impl",
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
@@ -746,6 +768,7 @@
         ":hashtablez_sampler",
         ":node_hash_set",
         ":raw_hash_set",
+        ":raw_hash_set_resize_impl",
         ":test_allocator",
         ":test_instance_tracker",
         "//absl/base",
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 39ff083..66fe405 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -828,6 +828,32 @@
     GTest::gmock_main
 )
 
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    raw_hash_set_resize_impl
+  HDRS
+    "internal/raw_hash_set_resize_impl.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    raw_hash_set_resize_impl_test
+  SRCS
+    "internal/raw_hash_set_resize_impl_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::raw_hash_set_resize_impl
+    GTest::gmock_main
+)
+
 absl_cc_test(
   NAME
     raw_hash_set_allocator_test
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 13fd275..6d61872 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -29,6 +29,7 @@
 #include "absl/container/internal/container_memory.h"
 #include "absl/container/internal/hashtable_control_bytes.h"
 #include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/raw_hash_set_resize_impl.h"
 #include "absl/functional/function_ref.h"
 #include "absl/hash/hash.h"
 #include "absl/numeric/bits.h"
@@ -69,6 +70,15 @@
 
 namespace {
 
+#ifdef ABSL_SWISSTABLE_ASSERT
+#error ABSL_SWISSTABLE_ASSERT cannot be directly set
+#else
+// We use this macro for assertions that users may see when the table is in an
+// invalid state that sanitizers may help diagnose.
+#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
+  assert((CONDITION) && "Try enabling sanitizers.")
+#endif
+
 [[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
   ABSL_RAW_LOG(FATAL, "Hash table size overflow");
 }
@@ -104,9 +114,9 @@
 // Find a non-deterministic hash for single group table.
 // Last two bits are used to find a position for a newly inserted element after
 // resize.
-// This function is mixing all bits of hash and seed to maximize entropy.
+// This function basically using H2 last bits to save on shift operation.
 size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
-  return static_cast<size_t>(absl::popcount(hash ^ seed.seed()));
+  return hash ^ seed.seed();
 }
 
 // Returns the address of the slot `i` iterations after `slot` assuming each
@@ -147,11 +157,44 @@
   return ShouldRehashForBugDetection(seed, capacity);
 }
 
-bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
-                                   PerTableSeed seed) {
-  // To avoid problems with weak hashes and single bit tests, we use % 13.
-  // TODO(kfm,sbenza): revisit after we do unconditional mixing
-  return !is_small(capacity) && (H1(hash, seed) ^ RandomSeed()) % 13 > 6;
+namespace {
+
+FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
+                                     size_t capacity) {
+  auto seq = probe(h1, capacity);
+  if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
+    return {seq.offset(), /*probe_length=*/0};
+  }
+  while (true) {
+    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
+    auto mask = g.MaskEmptyOrDeleted();
+    if (mask) {
+      return {seq.offset(mask.LowestBitSet()), seq.index()};
+    }
+    seq.next();
+    ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
+  }
+}
+
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
+//
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot.
+constexpr bool is_small(size_t capacity) {
+  return capacity < Group::kWidth - 1;
+}
+
+}  // namespace
+
+FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
+  return find_first_non_full_from_h1(common.control(), H1(hash, common.seed()),
+                                     common.capacity());
 }
 
 void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
@@ -167,8 +210,8 @@
     // Small tables capacity fits into portable group, where
     // GroupPortableImpl::MaskFull is more efficient for the
     // capacity <= GroupPortableImpl::kWidth.
-    assert(cap <= GroupPortableImpl::kWidth &&
-           "unexpectedly large small capacity");
+    ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
+                           "unexpectedly large small capacity");
     static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
                   "unexpected group width");
     // Group starts from kSentinel slot, so indices in the mask will
@@ -185,38 +228,26 @@
   ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
   while (remaining != 0) {
     for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
-      assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
+      ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
+                             "hash table was modified unexpectedly");
       cb(ctrl + i, SlotAddress(slot, i, slot_size));
       --remaining;
     }
     ctrl += Group::kWidth;
     slot = NextSlot(slot, slot_size, Group::kWidth);
-    assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
-           "hash table was modified unexpectedly");
+    ABSL_SWISSTABLE_ASSERT(
+        (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
+        "hash table was modified unexpectedly");
   }
   // NOTE: erasure of the current element is allowed in callback for
   // absl::erase_if specialization. So we use `>=`.
-  assert(original_size_for_assert >= c.size() &&
-         "hash table was modified unexpectedly");
-}
-
-size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
-                             CommonFields& common) {
-  assert(common.capacity() == NextCapacity(SooCapacity()));
-  // After resize from capacity 1 to 3, we always have exactly the slot with
-  // index 1 occupied, so we need to insert either at index 0 or index 2.
-  static_assert(SooSlotIndex() == 1, "");
-  PrepareInsertCommon(common);
-  const size_t offset = SingleGroupTableH1(hash, common.seed()) & 2;
-  common.growth_info().OverwriteEmptyAsFull();
-  SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
-  common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
-  return offset;
+  ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
+                         "hash table was modified unexpectedly");
 }
 
 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
-  assert(ctrl[capacity] == ctrl_t::kSentinel);
-  assert(IsValidCapacity(capacity));
+  ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
   for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
     Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
   }
@@ -224,19 +255,15 @@
   std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
   ctrl[capacity] = ctrl_t::kSentinel;
 }
-// Extern template instantiation for inline function.
-template FindInfo find_first_non_full(const CommonFields&, size_t);
-
-FindInfo find_first_non_full_outofline(const CommonFields& common,
-                                       size_t hash) {
-  return find_first_non_full(common, hash);
-}
 
 namespace {
 
+void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
+  growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
+}
+
 void ResetGrowthLeft(CommonFields& common) {
-  common.growth_info().InitGrowthLeftNoDeleted(
-      CapacityToGrowth(common.capacity()) - common.size());
+  ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
 }
 
 // Finds guaranteed to exists empty slot from the given position.
@@ -270,8 +297,8 @@
   void* set = &common;
   void* slot_array = common.slot_array();
   const size_t capacity = common.capacity();
-  assert(IsValidCapacity(capacity));
-  assert(!is_small(capacity));
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
+  ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
   // Algorithm:
   // - mark all DELETED slots as EMPTY
   // - mark all FULL slots as DELETED
@@ -305,7 +332,7 @@
 
   for (size_t i = 0; i != capacity;
        ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
-    assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+    ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
     if (IsEmpty(ctrl[i])) {
       tmp_space_id = i;
       continue;
@@ -342,7 +369,7 @@
       // Initialize or change empty space id.
       tmp_space_id = i;
     } else {
-      assert(IsDeleted(ctrl[new_i]));
+      ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
       SetCtrlInLargeTable(common, new_i, h2, slot_size);
       // Until we are done rehashing, DELETED marks previously FULL slots.
 
@@ -437,14 +464,19 @@
   absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
 }
 
-// Initializes control bytes after SOO to the next capacity.
+// Initializes control bytes for growing after SOO to the next capacity.
+// `soo_ctrl` is placed in the position `SooSlotIndex()`.
+// `new_hash` is placed in the postion `new_offset`.
 // The table must be non-empty SOO.
 ABSL_ATTRIBUTE_ALWAYS_INLINE inline void
-InitializeThreeElementsControlBytesAfterSoo(size_t hash, ctrl_t* new_ctrl) {
+InitializeThreeElementsControlBytesAfterSoo(ctrl_t soo_ctrl, size_t new_hash,
+                                            size_t new_offset,
+                                            ctrl_t* new_ctrl) {
   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
   static_assert(kNewCapacity == 3);
   static_assert(is_single_group(kNewCapacity));
   static_assert(SooSlotIndex() == 1);
+  ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
 
   static constexpr uint64_t kEmptyXorSentinel =
       static_cast<uint8_t>(ctrl_t::kEmpty) ^
@@ -452,20 +484,33 @@
   static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
   static constexpr size_t kMirroredSooSlotIndex =
       SooSlotIndex() + kNewCapacity + 1;
-  // The first 8 bytes, where present slot positions are replaced with 0.
+  // The first 8 bytes, where SOO slot original and mirrored positions are
+  // replaced with 0.
+  // Result will look like: E0ESE0EE
   static constexpr uint64_t kFirstCtrlBytesWithZeroes =
       k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
       (kEmptyXorSentinel << (8 * kNewCapacity)) ^
       (kEmpty64 << (8 * kMirroredSooSlotIndex));
 
-  const uint64_t h2 = static_cast<uint64_t>(H2(hash));
-  // Fill the original 0th and mirrored 2nd bytes with the hash.
+  const uint64_t soo_h2 = static_cast<uint64_t>(soo_ctrl);
+  const uint64_t new_h2_xor_empty = static_cast<uint64_t>(
+      H2(new_hash) ^ static_cast<uint8_t>(ctrl_t::kEmpty));
+  // Fill the original and mirrored bytes for SOO slot.
   // Result will look like:
   // EHESEHEE
-  // Where H = h2, E = kEmpty, S = kSentinel.
-  const uint64_t first_ctrl_bytes =
-      ((h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
-      (h2 << (8 * kMirroredSooSlotIndex));
+  // Where H = soo_h2, E = kEmpty, S = kSentinel.
+  uint64_t first_ctrl_bytes =
+      ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
+      (soo_h2 << (8 * kMirroredSooSlotIndex));
+  // Replace original and mirrored empty bytes for the new position.
+  // Result for new_offset 0 will look like:
+  // NHESNHEE
+  // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
+  // Result for new_offset 2 will look like:
+  // EHNSEHNE
+  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
+  size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
+  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
 
   // Fill last bytes with kEmpty.
   std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
@@ -475,17 +520,24 @@
 
   // Example for group size 16:
   // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE
-  // new_ctrl after 2nd store  =      EHESEHEEEEEEEEEEEEE
+  // new_offset 0:
+  // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE
+  // new_offset 2:
+  // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE
 
   // Example for group size 8:
   // new_ctrl after 1st memset =      ???EEEEEEEE
-  // new_ctrl after 2nd store  =      EHESEHEEEEE
+  // new_offset 0:
+  // new_ctrl after 2nd store  =      NHESNHEEEEE
+  // new_offset 2:
+  // new_ctrl after 2nd store  =      EHNSEHNEEEE
 }
 
 }  // namespace
 
 void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
-  assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
+  ABSL_SWISSTABLE_ASSERT(IsFull(c.control()[index]) &&
+                         "erasing a dangling iterator");
   c.decrement_size();
   c.infoz().RecordErase();
 
@@ -503,7 +555,7 @@
                        void* alloc, bool reuse, bool soo_enabled) {
   if (reuse) {
     c.set_size_to_zero();
-    assert(!soo_enabled || c.capacity() > SooCapacity());
+    ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
     ResetCtrl(c, policy.slot_size);
     ResetGrowthLeft(c);
     c.infoz().RecordStorageChanged(0, c.capacity());
@@ -521,27 +573,47 @@
 
 namespace {
 
-// Poisons empty slots. It is useful when slots are transferred via memcpy.
-// PRECONDITIONs: common.control() is fully initialized.
-void PoisonEmptySlots(CommonFields& c, size_t slot_size) {
-  for (size_t i = 0; i < c.capacity(); ++i) {
-    if (!IsFull(c.control()[i])) {
-      SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
-                                  slot_size);
-    }
-  }
-}
-
 enum class ResizeNonSooMode {
   kGuaranteedEmpty,
   kGuaranteedAllocated,
 };
 
+// Iterates over full slots in old table, finds new positions for them and
+// transfers the slots.
+// This function is used for reserving or rehashing non-empty tables.
+// This use case is rare so the function is type erased.
+// Returns the total probe length.
+size_t FindNewPositionsAndTransferSlots(CommonFields& common,
+                                        const PolicyFunctions& policy,
+                                        ctrl_t* old_ctrl, void* old_slots,
+                                        size_t old_capacity) {
+  void* new_slots = common.slot_array();
+  const void* hash_fn = policy.hash_fn(common);
+  const size_t slot_size = policy.slot_size;
+
+  const auto insert_slot = [&](void* slot) {
+    size_t hash = policy.hash_slot(hash_fn, slot);
+    auto target = find_first_non_full(common, hash);
+    SetCtrl(common, target.offset, H2(hash), slot_size);
+    policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
+                      slot, 1);
+    return target.probe_length;
+  };
+  size_t total_probe_length = 0;
+  for (size_t i = 0; i < old_capacity; ++i) {
+    if (IsFull(old_ctrl[i])) {
+      total_probe_length += insert_slot(old_slots);
+    }
+    old_slots = NextSlot(old_slots, slot_size);
+  }
+  return total_probe_length;
+}
+
 template <ResizeNonSooMode kMode>
 void ResizeNonSooImpl(CommonFields& common, const PolicyFunctions& policy,
                       size_t new_capacity, HashtablezInfoHandle infoz) {
-  assert(IsValidCapacity(new_capacity));
-  assert(new_capacity > policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity);
 
   const size_t old_capacity = common.capacity();
   [[maybe_unused]] ctrl_t* old_ctrl = common.control();
@@ -560,23 +632,28 @@
       reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
   common.set_generation(NextGeneration(old_generation));
 
-  common.set_control</*kGenerateSeed=*/true>(
-      reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
+  ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
+  common.set_control</*kGenerateSeed=*/true>(new_ctrl);
   common.set_slots(mem + layout.slot_offset());
 
   size_t total_probe_length = 0;
   ResetCtrl(common, slot_size);
-  assert(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
-         old_capacity == policy.soo_capacity);
-  assert(kMode != ResizeNonSooMode::kGuaranteedAllocated || old_capacity > 0);
+  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
+                         old_capacity == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
+                         old_capacity > 0);
   if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
-    total_probe_length = policy.find_new_positions_and_transfer_slots(
-        common, old_ctrl, old_slots, old_capacity);
+    total_probe_length = FindNewPositionsAndTransferSlots(
+        common, policy, old_ctrl, old_slots, old_capacity);
     (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
                       has_infoz);
+    ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
+                    common.size());
+  } else {
+    GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
+        CapacityToGrowth(new_capacity));
   }
 
-  ResetGrowthLeft(common);
   if (has_infoz) {
     common.set_has_infoz();
     infoz.RecordStorageChanged(common.size(), new_capacity);
@@ -588,11 +665,11 @@
 void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
                                       const PolicyFunctions& policy,
                                       size_t new_capacity, bool force_infoz) {
-  assert(IsValidCapacity(new_capacity));
-  assert(new_capacity > policy.soo_capacity);
-  assert(!force_infoz || policy.soo_capacity > 0);
-  assert(common.capacity() <= policy.soo_capacity);
-  assert(common.empty());
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_capacity > 0);
+  ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(common.empty());
   const size_t slot_size = policy.slot_size;
   HashtablezInfoHandle infoz;
   const bool should_sample =
@@ -613,8 +690,8 @@
                                                const PolicyFunctions& policy,
                                                size_t hash, ctrl_t* new_ctrl,
                                                void* new_slots) {
-  assert(c.size() == policy.soo_capacity);
-  assert(policy.soo_capacity == SooCapacity());
+  ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(policy.soo_capacity == SooCapacity());
   size_t new_capacity = c.capacity();
 
   c.generate_new_seed();
@@ -639,9 +716,9 @@
 void ResizeFullSooTable(CommonFields& common, const PolicyFunctions& policy,
                         size_t new_capacity,
                         ResizeFullSooTableSamplingMode sampling_mode) {
-  assert(common.capacity() == policy.soo_capacity);
-  assert(common.size() == policy.soo_capacity);
-  assert(policy.soo_capacity == SooCapacity());
+  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(policy.soo_capacity == SooCapacity());
   const size_t slot_size = policy.slot_size;
   const size_t slot_align = policy.slot_align;
 
@@ -691,7 +768,7 @@
                                             size_t old_capacity,
                                             ctrl_t* __restrict new_ctrl,
                                             size_t new_capacity) {
-  assert(is_single_group(new_capacity));
+  ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
   constexpr size_t kHalfWidth = Group::kWidth / 2;
   ABSL_ASSUME(old_capacity < kHalfWidth);
   ABSL_ASSUME(old_capacity > 0);
@@ -729,7 +806,8 @@
 
   if (Group::kWidth == 8) {
     // With group size 8, we can grow with two write operations.
-    assert(old_capacity < 8 && "old_capacity is too large for group size 8");
+    ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
+                           "old_capacity is too large for group size 8");
     absl::little_endian::Store64(new_ctrl, copied_bytes);
 
     static constexpr uint64_t kSentinal64 =
@@ -755,7 +833,7 @@
     return;
   }
 
-  assert(Group::kWidth == 16);
+  ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);
 
   // Fill the second half of the main control bytes with kEmpty.
   // For small capacity that may write into mirrored control bytes.
@@ -811,16 +889,335 @@
   // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE
 }
 
+// Size of the buffer we allocate on stack for storing probed elements in
+// GrowToNextCapacity algorithm.
+constexpr size_t kProbedElementsBufferSize = 512;
+
+// Decodes information about probed elements from contiguous memory.
+// Finds new position for each element and transfers it to the new slots.
+// Returns the total probe length.
+template <typename ProbedItem>
+ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
+    CommonFields& c, const PolicyFunctions& policy, const ProbedItem* start,
+    const ProbedItem* end, void* old_slots) {
+  const size_t new_capacity = c.capacity();
+
+  void* new_slots = c.slot_array();
+  ctrl_t* new_ctrl = c.control();
+  size_t total_probe_length = 0;
+
+  const size_t slot_size = policy.slot_size;
+  auto transfer_n = policy.transfer_n;
+
+  for (; start < end; ++start) {
+    const FindInfo target = find_first_non_full_from_h1(
+        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
+    total_probe_length += target.probe_length;
+    const size_t old_index = static_cast<size_t>(start->source_offset);
+    const size_t new_i = target.offset;
+    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
+    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
+    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
+    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
+    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
+    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
+    transfer_n(&c, dst_slot, src_slot, 1);
+    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
+  }
+  return total_probe_length;
+}
+
+// Sentinel value for the start of marked elements.
+// Signals that there are no marked elements.
+constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
+
+// Process probed elements that did not fit into available buffers.
+// We marked them in control bytes as kSentinel.
+// Hash recomputation and full probing is done here.
+// This use case should be extremely rare.
+ABSL_ATTRIBUTE_NOINLINE size_t
+ProcessProbedMarkedElements(CommonFields& c, const PolicyFunctions& policy,
+                            ctrl_t* old_ctrl, void* old_slots, size_t start) {
+  size_t old_capacity = PreviousCapacity(c.capacity());
+  const size_t slot_size = policy.slot_size;
+  void* new_slots = c.slot_array();
+  size_t total_probe_length = 0;
+  const void* hash_fn = policy.hash_fn(c);
+  auto hash_slot = policy.hash_slot;
+  auto transfer_n = policy.transfer_n;
+  for (size_t old_index = start; old_index < old_capacity; ++old_index) {
+    if (old_ctrl[old_index] != ctrl_t::kSentinel) {
+      continue;
+    }
+    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
+    const size_t hash = hash_slot(hash_fn, src_slot);
+    const FindInfo target = find_first_non_full(c, hash);
+    total_probe_length += target.probe_length;
+    const size_t new_i = target.offset;
+    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
+    SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
+    transfer_n(&c, dst_slot, src_slot, 1);
+  }
+  return total_probe_length;
+}
+
+// The largest old capacity for which it is guaranteed that all probed elements
+// fit in ProbedItemEncoder's local buffer.
+// For such tables, `encode_probed_element` is trivial.
+constexpr size_t kMaxLocalBufferOldCapacity =
+    kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
+static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
+constexpr size_t kMaxLocalBufferNewCapacity =
+    NextCapacity(kMaxLocalBufferOldCapacity);
+static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
+static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
+              ProbedItem4Bytes::kMaxNewCapacity);
+
+// Initializes mirrored control bytes after
+// transfer_unprobed_elements_to_next_capacity.
+void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
+  std::memcpy(new_ctrl + new_capacity,
+              // We own GrowthInfo just before control bytes. So it is ok
+              // to read one byte from it.
+              new_ctrl - 1, Group::kWidth);
+  new_ctrl[new_capacity] = ctrl_t::kSentinel;
+}
+
+// Encodes probed elements into available memory.
+// At first, a local (on stack) buffer is used. The size of the buffer is
+// kProbedElementsBufferSize bytes.
+// When the local buffer is full, we switch to `control_` buffer. We are allowed
+// to overwrite `control_` buffer till the `source_offset` byte. In case we have
+// no space in `control_` buffer, we fallback to a naive algorithm for all the
+// rest of the probed elements. We mark elements as kSentinel in control bytes
+// and later process them fully. See ProcessMarkedElements for details. It
+// should be extremely rare.
+template <typename ProbedItemType,
+          // If true, we only use the local buffer and never switch to the
+          // control buffer.
+          bool kGuaranteedFitToBuffer = false>
+class ProbedItemEncoder {
+ public:
+  using ProbedItem = ProbedItemType;
+  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
+
+  // Encode item into the best available location.
+  void EncodeItem(ProbedItem item) {
+    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
+      return ProcessEncodeWithOverflow(item);
+    }
+    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
+    *pos_ = item;
+    ++pos_;
+  }
+
+  // Decodes information about probed elements from all available sources.
+  // Finds new position for each element and transfers it to the new slots.
+  // Returns the total probe length.
+  size_t DecodeAndInsertToTable(CommonFields& common,
+                                const PolicyFunctions& policy,
+                                void* old_slots) const {
+    if (pos_ == buffer_) {
+      return 0;
+    }
+    if constexpr (kGuaranteedFitToBuffer) {
+      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
+    }
+    size_t total_probe_length = DecodeAndInsertImpl(
+        common, policy, buffer_,
+        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
+    if (!local_buffer_full_) {
+      return total_probe_length;
+    }
+    total_probe_length +=
+        DecodeAndInsertToTableOverflow(common, policy, old_slots);
+    return total_probe_length;
+  }
+
+ private:
+  static ProbedItem* AlignToNextItem(void* ptr) {
+    return reinterpret_cast<ProbedItem*>(AlignUpTo(
+        reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
+  }
+
+  ProbedItem* OverflowBufferStart() const {
+    // We reuse GrowthInfo memory as well.
+    return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
+  }
+
+  // Encodes item when previously allocated buffer is full.
+  // At first that happens when local buffer is full.
+  // We switch from the local buffer to the control buffer.
+  // Every time this function is called, the available buffer is extended till
+  // `item.source_offset` byte in the control buffer.
+  // After the buffer is extended, this function wouldn't be called till the
+  // buffer is exhausted.
+  //
+  // If there's no space in the control buffer, we fallback to naive algorithm
+  // and mark probed elements as kSentinel in the control buffer. In this case,
+  // we will call this function for every subsequent probed element.
+  ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
+    if (!local_buffer_full_) {
+      local_buffer_full_ = true;
+      pos_ = OverflowBufferStart();
+    }
+    const size_t source_offset = static_cast<size_t>(item.source_offset);
+    // We are in fallback mode so we can't reuse control buffer anymore.
+    // Probed elements are marked as kSentinel in the control buffer.
+    if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
+                           kNoMarkedElementsSentinel)) {
+      control_[source_offset] = ctrl_t::kSentinel;
+      return;
+    }
+    // Refresh the end pointer to the new available position.
+    // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
+    // to write.
+    end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
+    if (ABSL_PREDICT_TRUE(pos_ < end_)) {
+      *pos_ = item;
+      ++pos_;
+      return;
+    }
+    control_[source_offset] = ctrl_t::kSentinel;
+    marked_elements_starting_position_ = source_offset;
+    // Now we will always fall down to `ProcessEncodeWithOverflow`.
+    ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
+  }
+
+  // Decodes information about probed elements from control buffer and processes
+  // marked elements.
+  // Finds new position for each element and transfers it to the new slots.
+  // Returns the total probe length.
+  ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
+      CommonFields& common, const PolicyFunctions& policy,
+      void* old_slots) const {
+    ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
+                           "must not be called when local buffer is not full");
+    size_t total_probe_length = DecodeAndInsertImpl(
+        common, policy, OverflowBufferStart(), pos_, old_slots);
+    if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
+                          kNoMarkedElementsSentinel)) {
+      return total_probe_length;
+    }
+    total_probe_length +=
+        ProcessProbedMarkedElements(common, policy, control_, old_slots,
+                                    marked_elements_starting_position_);
+    return total_probe_length;
+  }
+
+  static constexpr size_t kBufferSize =
+      kProbedElementsBufferSize / sizeof(ProbedItem);
+  ProbedItem buffer_[kBufferSize];
+  // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
+  // otherwise, they're in the overflow buffer.
+  ProbedItem* pos_ = buffer_;
+  const void* end_ = buffer_ + kBufferSize;
+  ctrl_t* const control_;
+  size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
+  bool local_buffer_full_ = false;
+};
+
+// Grows to next capacity with specified encoder type.
+// Encoder is used to store probed elements that are processed later.
+// Different encoder is used depending on the capacity of the table.
+// Returns total probe length.
+template <typename Encoder>
+size_t GrowToNextCapacity(CommonFields& common, const PolicyFunctions& policy,
+                          ctrl_t* old_ctrl, void* old_slots) {
+  using ProbedItem = typename Encoder::ProbedItem;
+  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
+  Encoder encoder(old_ctrl);
+  policy.transfer_unprobed_elements_to_next_capacity(
+      common, old_ctrl, old_slots, &encoder,
+      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
+        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
+        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
+      });
+  InitializeMirroredControlBytes(common.control(), common.capacity());
+  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
+}
+
+// Grows to next capacity for relatively small tables so that even if all
+// elements are probed, we don't need to overflow the local buffer.
+// Returns total probe length.
+size_t GrowToNextCapacityThatFitsInLocalBuffer(CommonFields& common,
+                                               const PolicyFunctions& policy,
+                                               ctrl_t* old_ctrl,
+                                               void* old_slots) {
+  ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
+  return GrowToNextCapacity<
+      ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
+      common, policy, old_ctrl, old_slots);
+}
+
+// Grows to next capacity with different encodings. Returns total probe length.
+// These functions are useful to simplify profile analysis.
+size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
+                                       const PolicyFunctions& policy,
+                                       ctrl_t* old_ctrl, void* old_slots) {
+  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
+      common, policy, old_ctrl, old_slots);
+}
+size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
+                                       const PolicyFunctions& policy,
+                                       ctrl_t* old_ctrl, void* old_slots) {
+  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
+      common, policy, old_ctrl, old_slots);
+}
+size_t GrowToNextCapacity16BytesEncoder(CommonFields& common,
+                                        const PolicyFunctions& policy,
+                                        ctrl_t* old_ctrl, void* old_slots) {
+  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
+      common, policy, old_ctrl, old_slots);
+}
+
+// Grows to next capacity for tables with relatively large capacity so that we
+// can't guarantee that all probed elements fit in the local buffer. Returns
+// total probe length.
+size_t GrowToNextCapacityOverflowLocalBuffer(CommonFields& common,
+                                             const PolicyFunctions& policy,
+                                             ctrl_t* old_ctrl,
+                                             void* old_slots) {
+  const size_t new_capacity = common.capacity();
+  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
+    return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
+  }
+  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
+    return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
+  }
+  // 16 bytes encoding supports the maximum swisstable capacity.
+  return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
+}
+
+// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
+// capacity of the table. Returns total probe length.
+ABSL_ATTRIBUTE_NOINLINE
+size_t GrowToNextCapacityDispatch(CommonFields& common,
+                                  const PolicyFunctions& policy,
+                                  ctrl_t* old_ctrl, void* old_slots) {
+  const size_t new_capacity = common.capacity();
+  if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
+    return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
+                                                   old_slots);
+  } else {
+    return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
+                                                 old_slots);
+  }
+}
+
+// Grows to next capacity and prepares insert for the given new_hash.
+// Returns the offset of the new element.
 size_t GrowToNextCapacityAndPrepareInsert(CommonFields& common,
                                           const PolicyFunctions& policy,
                                           size_t new_hash) {
-  assert(common.growth_left() == 0);
+  ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
   const size_t old_capacity = common.capacity();
-  assert(old_capacity == 0 || old_capacity > policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(old_capacity == 0 ||
+                         old_capacity > policy.soo_capacity);
 
   const size_t new_capacity = NextCapacity(old_capacity);
-  assert(IsValidCapacity(new_capacity));
-  assert(new_capacity > policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
+  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity);
 
   ctrl_t* old_ctrl = common.control();
   void* old_slots = common.slot_array();
@@ -853,6 +1250,7 @@
   void* new_slots = mem + layout.slot_offset();
   common.set_control</*kGenerateSeed=*/false>(new_ctrl);
   common.set_slots(new_slots);
+  SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
 
   h2_t new_h2 = H2(new_hash);
   size_t total_probe_length = 0;
@@ -862,38 +1260,39 @@
     InitializeSingleElementControlBytes(new_h2, new_ctrl);
     common.generate_new_seed();
     find_info = FindInfo{0, 0};
+    SanitizerUnpoisonMemoryRegion(new_slots, slot_size);
   } else {
     if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
       GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
                                              new_capacity);
       // Single group tables have all slots full on resize. So we can transfer
       // all slots without checking the control bytes.
-      assert(common.size() == old_capacity);
-      policy.transfer_n(&common, NextSlot(new_slots, slot_size), old_slots,
-                        old_capacity);
-      PoisonEmptySlots(common, slot_size);
+      ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
+      auto* target = NextSlot(new_slots, slot_size);
+      SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
+      policy.transfer_n(&common, target, old_slots, old_capacity);
       // We put the new element either at the beginning or at the end of the
       // table with approximately equal probability.
       size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 1
                           ? 0
                           : new_capacity - 1;
 
-      assert(IsEmpty(new_ctrl[offset]));
+      ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
       SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
       find_info = FindInfo{offset, 0};
     } else {
-      ResetCtrl(common, slot_size);
-      total_probe_length = policy.find_new_positions_and_transfer_slots(
-          common, old_ctrl, old_slots, old_capacity);
+      total_probe_length =
+          GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
       find_info = find_first_non_full(common, new_hash);
       SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
     }
-    assert(old_capacity > policy.soo_capacity);
+    ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity);
     (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
                       has_infoz);
   }
   PrepareInsertCommon(common);
-  ResetGrowthLeft(common);
+  ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
+                  common.size());
 
   if (ABSL_PREDICT_FALSE(has_infoz)) {
     common.set_has_infoz();
@@ -969,10 +1368,11 @@
 size_t PrepareInsertNonSooSlow(CommonFields& common,
                                const PolicyFunctions& policy, size_t hash) {
   const GrowthInfo growth_info = common.growth_info();
-  assert(!growth_info.HasNoDeletedAndGrowthLeft());
+  ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
   if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
     // Table without deleted slots (>95% cases) that needs to be resized.
-    assert(growth_info.HasNoDeleted() && growth_info.GetGrowthLeft() == 0);
+    ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
+                           growth_info.GetGrowthLeft() == 0);
     return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
   }
   if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
@@ -988,6 +1388,68 @@
   return target.offset;
 }
 
+
+// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
+// forces the table to be sampled and prepares the insert.
+// SOO tables need to switch from SOO to heap in order to store the infoz.
+// Requires:
+//   1. `c.capacity() == SooCapacity()`.
+//   2. `c.empty()`.
+ABSL_ATTRIBUTE_NOINLINE size_t
+GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
+    CommonFields& common, const PolicyFunctions& policy, size_t new_hash) {
+  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
+                                   /*force_infoz=*/true);
+  PrepareInsertCommon(common);
+  common.growth_info().OverwriteEmptyAsFull();
+  SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
+                            policy.slot_size);
+  common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
+  return SooSlotIndex();
+}
+
+// Resizes empty non-allocated table to the capacity to fit new_size elements.
+// Requires:
+//   1. `c.capacity() == policy.soo_capacity`.
+//   2. `c.empty()`.
+//   3. `new_size > policy.soo_capacity`.
+// The table will be attempted to be sampled.
+void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common,
+                                               const PolicyFunctions& policy,
+                                               size_t new_size) {
+  ValidateMaxSize(new_size, policy.slot_size);
+  ABSL_ASSUME(new_size > 0);
+  ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
+                                   /*force_infoz=*/false);
+  // This is after resize, to ensure that we have completed the allocation
+  // and have potentially sampled the hashtable.
+  common.infoz().RecordReservation(new_size);
+}
+
+// Type erased version of raw_hash_set::reserve for tables that have an
+// allocated backing array.
+//
+// Requires:
+//   1. `c.capacity() > policy.soo_capacity` OR `!c.empty()`.
+// Reserving already allocated tables is considered to be a rare case.
+ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
+    CommonFields& common, const PolicyFunctions& policy, size_t new_size) {
+  const size_t cap = common.capacity();
+  ValidateMaxSize(new_size, policy.slot_size);
+  ABSL_ASSUME(new_size > 0);
+  const size_t new_capacity = SizeToCapacity(new_size);
+  if (cap == policy.soo_capacity) {
+    ABSL_SWISSTABLE_ASSERT(!common.empty());
+    ResizeFullSooTable(common, policy, new_capacity,
+                       ResizeFullSooTableSamplingMode::kNoSampling);
+  } else {
+    ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity);
+    // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
+    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
+  }
+  common.infoz().RecordReservation(new_size);
+}
+
 }  // namespace
 
 void* GetRefForEmptyClass(CommonFields& common) {
@@ -1005,20 +1467,6 @@
       common, policy, new_capacity, common.infoz());
 }
 
-void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common,
-                                               const PolicyFunctions& policy,
-                                               size_t new_size) {
-  ValidateMaxSize(new_size, policy.slot_size);
-  ResizeEmptyNonAllocatedTableImpl(
-      common, policy, NormalizeCapacity(GrowthToLowerboundCapacity(new_size)),
-      /*force_infoz=*/false);
-  // This is after resize, to ensure that we have completed the allocation
-  // and have potentially sampled the hashtable.
-  common.infoz().RecordReservation(new_size);
-  common.reset_reserved_growth(new_size);
-  common.set_reservation_size(new_size);
-}
-
 void ReserveEmptyNonAllocatedTableToFitBucketCount(
     CommonFields& common, const PolicyFunctions& policy, size_t bucket_count) {
   size_t new_capacity = NormalizeCapacity(bucket_count);
@@ -1027,22 +1475,22 @@
                                    /*force_infoz=*/false);
 }
 
-void GrowEmptySooTableToNextCapacityForceSampling(
-    CommonFields& common, const PolicyFunctions& policy) {
-  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
-                                   /*force_infoz=*/true);
-}
-
 // Resizes a full SOO table to the NextCapacity(SooCapacity()).
 template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
-void GrowFullSooTableToNextCapacity(CommonFields& common,
-                                    const PolicyFunctions& policy,
-                                    size_t soo_slot_hash) {
-  assert(common.capacity() == policy.soo_capacity);
-  assert(common.size() == policy.soo_capacity);
+size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common,
+                                                  const PolicyFunctions& policy,
+                                                  size_t new_hash,
+                                                  ctrl_t soo_slot_ctrl) {
+  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(policy.soo_capacity == SooCapacity());
+  if (ABSL_PREDICT_FALSE(soo_slot_ctrl == ctrl_t::kEmpty)) {
+    // The table is empty, it is only used for forced sampling of SOO tables.
+    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
+        common, policy, new_hash);
+  }
+  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity);
   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
-  assert(kNewCapacity > policy.soo_capacity);
-  assert(policy.soo_capacity == SooCapacity());
+  ABSL_SWISSTABLE_ASSERT(kNewCapacity > policy.soo_capacity);
   const size_t slot_size = policy.slot_size;
   const size_t slot_align = policy.slot_align;
   common.set_capacity(kNewCapacity);
@@ -1063,7 +1511,17 @@
   ctrl_t* new_ctrl = reinterpret_cast<ctrl_t*>(mem + layout.control_offset());
   void* new_slots = mem + layout.slot_offset();
 
-  InitializeThreeElementsControlBytesAfterSoo(soo_slot_hash, new_ctrl);
+  PrepareInsertCommon(common);
+  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
+  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
+  common.generate_new_seed();
+
+  // After resize from capacity 1 to 3, we always have exactly the slot with
+  // index 1 occupied, so we need to insert either at index 0 or index 2.
+  static_assert(SooSlotIndex() == 1);
+  const size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 2;
+  InitializeThreeElementsControlBytesAfterSoo(soo_slot_ctrl, new_hash, offset,
+                                              new_ctrl);
 
   SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
   void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
@@ -1075,8 +1533,8 @@
     static_assert(SooSlotIndex() == 1);
     static_assert(SooSlotMemcpySize > 0);
     static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
-    assert(SooSlotMemcpySize <= 2 * slot_size);
-    assert(SooSlotMemcpySize >= slot_size);
+    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
+    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
     void* next_slot = SlotAddress(target_slot, 1, slot_size);
     SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
     std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
@@ -1085,17 +1543,21 @@
     static_assert(SooSlotMemcpySize == 0);
     policy.transfer_n(&common, target_slot, common.soo_data(), 1);
   }
-  common.set_control</*kGenerateSeed=*/true>(new_ctrl);
+  // Seed was already generated above.
+  common.set_control</*kGenerateSeed=*/false>(new_ctrl);
   common.set_slots(new_slots);
 
-  ResetGrowthLeft(common);
+  common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
+  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
+                                slot_size);
+  return offset;
 }
 
 void GrowFullSooTableToNextCapacityForceSampling(
     CommonFields& common, const PolicyFunctions& policy) {
-  assert(common.capacity() == policy.soo_capacity);
-  assert(common.size() == policy.soo_capacity);
-  assert(policy.soo_capacity == SooCapacity());
+  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(policy.soo_capacity == SooCapacity());
   ResizeFullSooTable(
       common, policy, NextCapacity(SooCapacity()),
       ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
@@ -1127,11 +1589,11 @@
                                              kInitialSampledCapacity);
         }
         // This asserts that we didn't lose sampling coverage in `resize`.
-        assert(common.infoz().IsSampled());
+        ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
         return;
       }
-      assert(slot_size <= sizeof(HeapOrSoo));
-      assert(policy.slot_align <= alignof(HeapOrSoo));
+      ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
+      ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
       HeapOrSoo tmp_slot(uninitialized_tag_t{});
       size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
       policy.transfer_n(
@@ -1144,11 +1606,11 @@
     }
   }
 
+  ValidateMaxSize(n, policy.slot_size);
   // bitor is a faster way of doing `max` here. We will round up to the next
   // power-of-2-minus-1, so bitor is good enough.
-  size_t new_size = n | GrowthToLowerboundCapacity(common.size());
-  ValidateMaxSize(n, policy.slot_size);
-  const size_t new_capacity = NormalizeCapacity(new_size);
+  const size_t new_capacity =
+      NormalizeCapacity(n | SizeToCapacity(common.size()));
   // n == 0 unconditionally rehashes as per the standard.
   if (n == 0 || new_capacity > cap) {
     if (cap == policy.soo_capacity) {
@@ -1168,31 +1630,25 @@
   }
 }
 
-void ReserveAllocatedTable(CommonFields& common, const PolicyFunctions& policy,
-                           size_t n) {
-  common.reset_reserved_growth(n);
-  common.set_reservation_size(n);
-
+void ReserveTableToFitNewSize(CommonFields& common,
+                              const PolicyFunctions& policy, size_t new_size) {
+  common.reset_reserved_growth(new_size);
+  common.set_reservation_size(new_size);
+  ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity);
   const size_t cap = common.capacity();
-  assert(!common.empty() || cap > policy.soo_capacity);
-  assert(cap > 0);
+  if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity)) {
+    return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
+  }
+
+  ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity);
+  ABSL_SWISSTABLE_ASSERT(cap > 0);
   const size_t max_size_before_growth =
       cap <= policy.soo_capacity ? policy.soo_capacity
                                  : common.size() + common.growth_left();
-  if (n <= max_size_before_growth) {
+  if (new_size <= max_size_before_growth) {
     return;
   }
-  ValidateMaxSize(n, policy.slot_size);
-  const size_t new_capacity = NormalizeCapacity(GrowthToLowerboundCapacity(n));
-  if (cap == policy.soo_capacity) {
-    assert(!common.empty());
-    ResizeFullSooTable(common, policy, new_capacity,
-                       ResizeFullSooTableSamplingMode::kNoSampling);
-  } else {
-    assert(cap > policy.soo_capacity);
-    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
-  }
-  common.infoz().RecordReservation(n);
+  ReserveAllocatedTable(common, policy, new_size);
 }
 
 size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy,
@@ -1248,32 +1704,41 @@
 }
 }  // namespace
 
+// Extern template instantiation for inline function.
+template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
+                                              size_t old_capacity,
+                                              ctrl_t* new_ctrl,
+                                              size_t new_capacity);
+
 // We need to instantiate ALL possible template combinations because we define
 // the function in the cc file.
-template void GrowFullSooTableToNextCapacity<0, false>(CommonFields&,
-                                                       const PolicyFunctions&,
-                                                       size_t);
-template void
-GrowFullSooTableToNextCapacity<OptimalMemcpySizeForSooSlotTransfer(1), true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+    OptimalMemcpySizeForSooSlotTransfer(1), true>(CommonFields&,
+                                                  const PolicyFunctions&,
+                                                  size_t, ctrl_t);
 
 static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
-template void
-GrowFullSooTableToNextCapacity<OptimalMemcpySizeForSooSlotTransfer(3), true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+    OptimalMemcpySizeForSooSlotTransfer(3), true>(CommonFields&,
+                                                  const PolicyFunctions&,
+                                                  size_t, ctrl_t);
 
 static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
-template void
-GrowFullSooTableToNextCapacity<OptimalMemcpySizeForSooSlotTransfer(8), true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+    OptimalMemcpySizeForSooSlotTransfer(8), true>(CommonFields&,
+                                                  const PolicyFunctions&,
+                                                  size_t, ctrl_t);
 
 #if UINTPTR_MAX == UINT32_MAX
 static_assert(MaxSooSlotSize() == 8);
 #else
 static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
-template void
-GrowFullSooTableToNextCapacity<OptimalMemcpySizeForSooSlotTransfer(16), true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+template size_t GrowSooTableToNextCapacityAndPrepareInsert<
+    OptimalMemcpySizeForSooSlotTransfer(16), true>(CommonFields&,
+                                                   const PolicyFunctions&,
+                                                   size_t, ctrl_t);
 static_assert(MaxSooSlotSize() == 16);
 #endif
 
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index f22deea..bccbc66 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -465,16 +465,12 @@
   const size_t seed_;
 };
 
-// Returns next seed base number that is used for generating per-table seeds.
-// Only the lowest PerTableSeed::kBitCount bits are used for actual hash table
-// seed.
-inline size_t NextSeedBaseNumber() {
-  thread_local size_t seed = reinterpret_cast<uintptr_t>(&seed);
-  if constexpr (sizeof(size_t) == 4) {
-    seed += uint32_t{0xcc9e2d51};
-  } else {
-    seed += uint64_t{0xdcb22ca68cb134ed};
-  }
+// Returns next per-table seed.
+inline uint16_t NextSeed() {
+  static_assert(PerTableSeed::kBitCount == 16);
+  thread_local uint16_t seed =
+      static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
+  seed += uint16_t{0xad53};
   return seed;
 }
 
@@ -506,8 +502,7 @@
   }
 
   void generate_new_seed() {
-    size_t seed = NextSeedBaseNumber();
-    data_ = (data_ & ~kSeedMask) | (seed & kSeedMask);
+    data_ = (data_ & ~kSeedMask) ^ uint64_t{NextSeed()};
   }
 
   // Returns true if the table has infoz.
@@ -539,40 +534,6 @@
 // These are used as an occupied control byte.
 inline h2_t H2(size_t hash) { return hash & 0x7F; }
 
-// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
-// randomize insertion order within groups.
-bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
-                                   PerTableSeed seed);
-
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
-    ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
-    ABSL_ATTRIBUTE_UNUSED PerTableSeed seed) {
-#if defined(NDEBUG)
-  return false;
-#else
-  return ShouldInsertBackwardsForDebug(capacity, hash, seed);
-#endif
-}
-
-// Returns insert position for the given mask.
-// We want to add entropy even when ASLR is not enabled.
-// In debug build we will randomly insert in either the front or back of
-// the group.
-// TODO(kfm,sbenza): revisit after we do unconditional mixing
-template <class Mask>
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
-    Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
-    ABSL_ATTRIBUTE_UNUSED size_t hash,
-    ABSL_ATTRIBUTE_UNUSED PerTableSeed seed) {
-#if defined(NDEBUG)
-  return mask.LowestBitSet();
-#else
-  return ShouldInsertBackwardsForDebug(capacity, hash, seed)
-             ? mask.HighestBitSet()
-             : mask.LowestBitSet();
-#endif
-}
-
 // When there is an insertion with no reserved growth, we rehash with
 // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
 // constant divided by capacity ensures that inserting N elements is still O(N)
@@ -835,6 +796,12 @@
   return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
 }
 
+// Returns the offset of the next item after `offset` that is aligned to `align`
+// bytes. `align` must be a power of two.
+constexpr size_t AlignUpTo(size_t offset, size_t align) {
+  return (offset + align - 1) & (~align + 1);
+}
+
 // Helper class for computing offsets and allocation size of hash set fields.
 class RawHashSetLayout {
  public:
@@ -843,8 +810,7 @@
       : control_offset_(ControlOffset(has_infoz)),
         generation_offset_(control_offset_ + NumControlBytes(capacity)),
         slot_offset_(
-            (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
-            (~slot_align + 1)),
+            AlignUpTo(generation_offset_ + NumGenerationBytes(), slot_align)),
         alloc_size_(slot_offset_ + capacity * slot_size) {
     ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
     ABSL_SWISSTABLE_ASSERT(
@@ -955,6 +921,15 @@
   unsigned char soo_data[MaxSooSlotSize()];
 };
 
+// Returns a reference to the GrowthInfo object stored immediately before
+// `control`.
+inline GrowthInfo& GetGrowthInfoFromControl(ctrl_t* control) {
+  auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control) - 1;
+  ABSL_SWISSTABLE_ASSERT(
+      reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
+  return *gl_ptr;
+}
+
 // CommonFields hold the fields in raw_hash_set that do not depend
 // on template parameters. This allows us to conveniently pass all
 // of this state to helper functions as a single argument.
@@ -1078,10 +1053,7 @@
   size_t growth_left() const { return growth_info().GetGrowthLeft(); }
 
   GrowthInfo& growth_info() {
-    auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
-    ABSL_SWISSTABLE_ASSERT(
-        reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
-    return *gl_ptr;
+    return GetGrowthInfoFromControl(control());
   }
   GrowthInfo growth_info() const {
     return const_cast<CommonFields*>(this)->growth_info();
@@ -1192,6 +1164,12 @@
   return n * 2 + 1;
 }
 
+// Returns the previous valid capacity before `n`.
+constexpr size_t PreviousCapacity(size_t n) {
+  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
+  return n / 2;
+}
+
 // Applies the following mapping to every byte in the control array:
 //   * kDeleted -> kEmpty
 //   * kEmpty -> kEmpty
@@ -1227,18 +1205,28 @@
   return capacity - capacity / 8;
 }
 
-// Given `growth`, "unapplies" the load factor to find how large the capacity
+// Given `size`, "unapplies" the load factor to find how large the capacity
 // should be to stay within the load factor.
 //
-// This might not be a valid capacity and `NormalizeCapacity()` should be
-// called on this.
-constexpr size_t GrowthToLowerboundCapacity(size_t growth) {
-  // `growth*8/7`
-  if (Group::kWidth == 8 && growth == 7) {
-    // x+(x-1)/7 does not work when x==7.
-    return 8;
+// For size == 0, returns 0.
+// For other values, returns the same as `NormalizeCapacity(size*8/7)`.
+constexpr size_t SizeToCapacity(size_t size) {
+  if (size == 0) {
+    return 0;
   }
-  return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+  // The minimum possible capacity is NormalizeCapacity(size).
+  // Shifting right `~size_t{}` by `leading_zeros` yields
+  // NormalizeCapacity(size).
+  int leading_zeros = absl::countl_zero(size);
+  constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3);
+  size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros;
+  // Decrease shift if size is too big for the minimum capacity.
+  leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
+  if constexpr (Group::kWidth == 8) {
+    // Formula doesn't work when size==7 for 8-wide groups.
+    leading_zeros -= (size == 7);
+  }
+  return (~size_t{}) >> leading_zeros;
 }
 
 template <class InputIter>
@@ -1249,8 +1237,7 @@
   }
   if (base_internal::IsAtLeastIterator<std::random_access_iterator_tag,
                                        InputIter>()) {
-    return GrowthToLowerboundCapacity(
-        static_cast<size_t>(std::distance(first, last)));
+    return SizeToCapacity(static_cast<size_t>(std::distance(first, last)));
   }
   return 0;
 }
@@ -1323,7 +1310,7 @@
           FATAL, "Invalid iterator comparison. The element was likely erased.");
     }
   } else {
-    ABSL_HARDENING_ASSERT(
+    ABSL_HARDENING_ASSERT_SLOW(
         ctrl_is_valid_for_comparison &&
         "Invalid iterator comparison. The element might have been erased or "
         "the table might have rehashed. Consider running with --config=asan to "
@@ -1421,22 +1408,6 @@
   size_t probe_length;
 };
 
-// Whether a table is "small". A small table fits entirely into a probing
-// group, i.e., has a capacity < `Group::kWidth`.
-//
-// In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
-//
-// In small mode only the first `capacity` control bytes after the sentinel
-// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// `find_first_non_full()`, where we never try
-// `ShouldInsertBackwards()` for small tables.
-constexpr bool is_small(size_t capacity) {
-  return capacity < Group::kWidth - 1;
-}
-
 // Whether a table fits entirely into a probing group.
 // Arbitrary order of elements in such tables is correct.
 constexpr bool is_single_group(size_t capacity) {
@@ -1444,9 +1415,12 @@
 }
 
 // Begins a probing operation on `common.control`, using `hash`.
-inline probe_seq<Group::kWidth> probe(PerTableSeed seed, const size_t capacity,
+inline probe_seq<Group::kWidth> probe(size_t h1, size_t capacity) {
+  return probe_seq<Group::kWidth>(h1, capacity);
+}
+inline probe_seq<Group::kWidth> probe(PerTableSeed seed, size_t capacity,
                                       size_t hash) {
-  return probe_seq<Group::kWidth>(H1(hash, seed), capacity);
+  return probe(H1(hash, seed), capacity);
 }
 inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
   return probe(common.seed(), common.capacity(), hash);
@@ -1459,36 +1433,60 @@
 //
 // NOTE: this function must work with tables having both empty and deleted
 // slots in the same group. Such tables appear during `erase()`.
+FindInfo find_first_non_full(const CommonFields& common, size_t hash);
+
+constexpr size_t kProbedElementIndexSentinel = ~size_t{};
+
+// Implementation detail of transfer_unprobed_elements_to_next_capacity_fn.
+// Tries to find the new index for an element whose hash corresponds to
+// `h1` for growth to the next capacity.
+// Returns kProbedElementIndexSentinel if full probing is required.
+//
+// If element is located in the first probing group in the table before growth,
+// returns one of two positions: `old_index` or `old_index + old_capacity + 1`.
+//
+// Otherwise, we will try to insert it into the first probe group of the new
+// table. We only attempt to do so if the first probe group is already
+// initialized.
 template <typename = void>
-inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
-  auto seq = probe(common, hash);
-  const ctrl_t* ctrl = common.control();
-  const PerTableSeed seed = common.seed();
-  if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
-      !ShouldInsertBackwards(common.capacity(), hash, seed)) {
-    return {seq.offset(), /*probe_length=*/0};
+inline size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
+                                            size_t old_capacity,
+                                            ctrl_t* new_ctrl,
+                                            size_t new_capacity) {
+  size_t in_floating_group_index = (old_index - h1) & (Group::kWidth - 1);
+  size_t new_index = (h1 + in_floating_group_index) & new_capacity;
+  ABSL_ASSUME(new_index != kProbedElementIndexSentinel);
+  if (ABSL_PREDICT_TRUE((new_index & old_capacity) == old_index)) {
+    return new_index;
   }
-  while (true) {
-    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
-    auto mask = g.MaskEmptyOrDeleted();
-    if (mask) {
-      return {
-          seq.offset(GetInsertionOffset(mask, common.capacity(), hash, seed)),
-          seq.index()};
-    }
-    seq.next();
-    ABSL_SWISSTABLE_ASSERT(seq.index() <= common.capacity() && "full table!");
+  ABSL_SWISSTABLE_ASSERT(((old_index - h1) & old_capacity) >= Group::kWidth);
+  // Try to insert element into the first probe group.
+  // new_ctrl is not yet fully initialized so we can't use regular search via
+  // find_first_non_full.
+
+  // We can search in the first probe group only if it is located in already
+  // initialized part of the table.
+  if (ABSL_PREDICT_FALSE((h1 & old_capacity) >= old_index)) {
+    return kProbedElementIndexSentinel;
   }
+  size_t offset = h1 & new_capacity;
+  Group new_g(new_ctrl + offset);
+  if (auto mask = new_g.MaskNonFull(); ABSL_PREDICT_TRUE(mask)) {
+    size_t result = offset + mask.LowestBitSet();
+    ABSL_ASSUME(result != kProbedElementIndexSentinel);
+    return result;
+  }
+  return kProbedElementIndexSentinel;
 }
 
-// Extern template for inline function keep possibility of inlining.
+// Extern template for inline function keeps possibility of inlining.
 // When compiler decided to not inline, no symbols will be added to the
 // corresponding translation unit.
-extern template FindInfo find_first_non_full(const CommonFields&, size_t);
-
-// Non-inlined version of find_first_non_full for use in less
-// performance critical routines.
-FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
+extern template size_t TryFindNewIndexWithoutProbing(size_t h1,
+                                                     size_t old_index,
+                                                     size_t old_capacity,
+                                                     ctrl_t* new_ctrl,
+                                                     size_t new_capacity);
 
 // Sets sanitizer poisoning for slot corresponding to control byte being set.
 inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
@@ -1651,13 +1649,21 @@
   void (*dealloc)(void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
                   size_t slot_align, bool had_infoz);
 
-  // Iterates over full slots in old table, finds new positions
-  // for them and transfers the slots.
-  // Returns the total probe length.
-  size_t (*find_new_positions_and_transfer_slots)(CommonFields& common,
-                                                  ctrl_t* old_ctrl,
-                                                  void* old_slots,
-                                                  size_t old_capacity);
+  // Implementation detail of GrowToNextCapacity.
+  // Iterates over all full slots and transfers unprobed elements.
+  // Initializes the new control bytes except mirrored bytes and kSentinel.
+  // Caller must finish the initialization.
+  // All slots corresponding to the full control bytes are transferred.
+  // Probed elements are reported by `encode_probed_element` callback.
+  // encode_probed_element may overwrite old_ctrl buffer till source_offset.
+  // Different encoding is used depending on the capacity of the table.
+  // See ProbedItem*Bytes classes for details.
+  void (*transfer_unprobed_elements_to_next_capacity)(
+      CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
+      // TODO(b/382423690): Try to use absl::FunctionRef here.
+      void* probed_storage,
+      void (*encode_probed_element)(void* probed_storage, h2_t h2,
+                                    size_t source_offset, size_t h1));
 };
 
 // Returns the maximum valid size for a table with 1-byte slots.
@@ -1719,42 +1725,23 @@
 // Allowing till 16 would require additional store that can be avoided.
 constexpr size_t MaxSmallAfterSooCapacity() { return 7; }
 
-// Resizes empty non-allocated table to the capacity to fit new_size elements.
-// Requires:
+// Type erased version of raw_hash_set::reserve.
+// Requires: `new_size > policy.soo_capacity`.
+void ReserveTableToFitNewSize(CommonFields& common,
+                              const PolicyFunctions& policy, size_t new_size);
+
+// Resizes empty non-allocated table to the next valid capacity after
+// `bucket_count`. Requires:
 //   1. `c.capacity() == policy.soo_capacity`.
 //   2. `c.empty()`.
 //   3. `new_size > policy.soo_capacity`.
 // The table will be attempted to be sampled.
-void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common,
-                                               const PolicyFunctions& policy,
-                                               size_t new_size);
-
-// The same as ReserveEmptyNonAllocatedTableToFitNewSize, but resizes to the
-// next valid capacity after `bucket_count`.
 void ReserveEmptyNonAllocatedTableToFitBucketCount(
     CommonFields& common, const PolicyFunctions& policy, size_t bucket_count);
 
-// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()) and
-// forces the table to be sampled.
-// SOO tables need to switch from SOO to heap in order to store the infoz.
-// Requires:
-//   1. `c.capacity() == SooCapacity()`.
-//   2. `c.empty()`.
-void GrowEmptySooTableToNextCapacityForceSampling(
-    CommonFields& common, const PolicyFunctions& policy);
-
 // Type erased version of raw_hash_set::rehash.
 void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n);
 
-// Type erased version of raw_hash_set::reserve for tables that have an
-// allocated backing array.
-//
-// Requires:
-//   1. `c.capacity() > policy.soo_capacity` OR `!c.empty()`.
-// Reserving already allocated tables is considered to be a rare case.
-void ReserveAllocatedTable(CommonFields& common, const PolicyFunctions& policy,
-                           size_t n);
-
 // Returns the optimal size for memcpy when transferring SOO slot.
 // Otherwise, returns the optimal size for memcpy SOO slot transfer
 // to SooSlotIndex().
@@ -1790,13 +1777,19 @@
   return 24;
 }
 
-// Resizes a full SOO table to the NextCapacity(SooCapacity()).
+// Resizes SOO table to the NextCapacity(SooCapacity()) and prepares insert for
+// the given new_hash. Returns the offset of the new element.
+// `soo_slot_ctrl` is the control byte of the SOO slot.
+// If soo_slot_ctrl is kEmpty
+//   1. The table must be empty.
+//   2. Table will be forced to be sampled.
 // All possible template combinations are defined in cc file to improve
 // compilation time.
 template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
-void GrowFullSooTableToNextCapacity(CommonFields& common,
-                                    const PolicyFunctions& policy,
-                                    size_t soo_slot_hash);
+size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common,
+                                                  const PolicyFunctions& policy,
+                                                  size_t new_hash,
+                                                  ctrl_t soo_slot_ctrl);
 
 // As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
 // table to be sampled. SOO tables need to switch from SOO to heap in order to
@@ -1816,10 +1809,6 @@
   common.maybe_increment_generation_on_insert();
 }
 
-// Like prepare_insert, but for the case of inserting into a full SOO table.
-size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
-                             CommonFields& common);
-
 // ClearBackingArray clears the backing array, either modifying it in place,
 // or creating a new one based on the value of "reuse".
 // REQUIRES: c.capacity > 0
@@ -2278,7 +2267,7 @@
                                allocator_type(that.char_alloc_ref()))) {}
 
   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
-      : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
+      : raw_hash_set(SizeToCapacity(that.size()), that.hash_ref(),
                      that.eq_ref(), a) {
     that.AssertNotDebugCapacity();
     const size_t size = that.size();
@@ -2321,7 +2310,7 @@
             // a full `insert`.
             const size_t hash = PolicyTraits::apply(
                 HashElement{hash_ref()}, PolicyTraits::element(that_slot));
-            FindInfo target = find_first_non_full_outofline(common(), hash);
+            FindInfo target = find_first_non_full(common(), hash);
             infoz().RecordInsert(hash, target.probe_length);
             offset = target.offset;
           } else {
@@ -2852,17 +2841,8 @@
   void rehash(size_t n) { Rehash(common(), GetPolicyFunctions(), n); }
 
   void reserve(size_t n) {
-    const size_t cap = capacity();
-    if (ABSL_PREDICT_TRUE(cap > DefaultCapacity() ||
-                          // !SooEnabled() implies empty(), so we can skip the
-                          // check for optimization.
-                          (SooEnabled() && !empty()))) {
-      ReserveAllocatedTable(common(), GetPolicyFunctions(), n);
-    } else {
-      if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
-        ReserveEmptyNonAllocatedTableToFitNewSize(common(),
-                                                  GetPolicyFunctions(), n);
-      }
+    if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
+      ReserveTableToFitNewSize(common(), GetPolicyFunctions(), n);
     }
   }
 
@@ -3189,20 +3169,6 @@
                                PolicyTraits::element(slot));
   }
 
-  void resize_full_soo_table_to_next_capacity() {
-    ABSL_SWISSTABLE_ASSERT(SooEnabled());
-    ABSL_SWISSTABLE_ASSERT(capacity() == SooCapacity());
-    ABSL_SWISSTABLE_ASSERT(!empty());
-    if constexpr (SooEnabled()) {
-      GrowFullSooTableToNextCapacity<PolicyTraits::transfer_uses_memcpy()
-                                         ? OptimalMemcpySizeForSooSlotTransfer(
-                                               sizeof(slot_type))
-                                         : 0,
-                                     PolicyTraits::transfer_uses_memcpy()>(
-          common(), GetPolicyFunctions(), hash_of(soo_slot()));
-    }
-  }
-
   // Casting directly from e.g. char* to slot_type* can cause compilation errors
   // on objective-C. This function converts to void* first, avoiding the issue.
   static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
@@ -3313,22 +3279,25 @@
 
   template <class K>
   std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
+    ctrl_t soo_slot_ctrl;
     if (empty()) {
-      if (should_sample_soo()) {
-        GrowEmptySooTableToNextCapacityForceSampling(common(),
-                                                     GetPolicyFunctions());
-      } else {
+      if (!should_sample_soo()) {
         common().set_full_soo();
         return {soo_iterator(), true};
       }
+      soo_slot_ctrl = ctrl_t::kEmpty;
     } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
                                    PolicyTraits::element(soo_slot()))) {
       return {soo_iterator(), false};
     } else {
-      resize_full_soo_table_to_next_capacity();
+      soo_slot_ctrl = static_cast<ctrl_t>(H2(hash_of(soo_slot())));
     }
-    const size_t index =
-        PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
+    constexpr bool kUseMemcpy =
+        PolicyTraits::transfer_uses_memcpy() && SooEnabled();
+    size_t index = GrowSooTableToNextCapacityAndPrepareInsert<
+        kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0,
+        kUseMemcpy>(common(), GetPolicyFunctions(), hash_ref()(key),
+                    soo_slot_ctrl);
     return {iterator_at(index), true};
   }
 
@@ -3350,8 +3319,7 @@
       }
       auto mask_empty = g.MaskEmpty();
       if (ABSL_PREDICT_TRUE(mask_empty)) {
-        size_t target = seq.offset(
-            GetInsertionOffset(mask_empty, capacity(), hash, common().seed()));
+        size_t target = seq.offset(mask_empty.LowestBitSet());
         return {iterator_at(PrepareInsertNonSoo(common(), GetPolicyFunctions(),
                                                 hash,
                                                 FindInfo{target, seq.index()})),
@@ -3577,29 +3545,60 @@
     }
   }
 
-  // TODO(b/382423690): Type erase by GetKey + Hash for memcpyable types.
-  static size_t find_new_positions_and_transfer_slots_fn(CommonFields& common,
-                                                         ctrl_t* old_ctrl,
-                                                         void* old_slots,
-                                                         size_t old_capacity) {
+  // TODO(b/382423690): Try to type erase entire function or at least type erase
+  // by GetKey + Hash for memcpyable types.
+  // TODO(b/382423690): Try to type erase for big slots: sizeof(slot_type) > 16.
+  static void transfer_unprobed_elements_to_next_capacity_fn(
+      CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
+      void* probed_storage,
+      void (*encode_probed_element)(void* probed_storage, h2_t h2,
+                                    size_t source_offset, size_t h1)) {
+    const size_t new_capacity = common.capacity();
+    const size_t old_capacity = PreviousCapacity(new_capacity);
+    ABSL_SWISSTABLE_ASSERT(old_capacity + 1 >= Group::kWidth);
+    ABSL_SWISSTABLE_ASSERT((old_capacity + 1) % Group::kWidth == 0);
+
     auto* set = reinterpret_cast<raw_hash_set*>(&common);
-    slot_type* new_slots = set->slot_array();
     slot_type* old_slots_ptr = to_slot(old_slots);
-    const auto insert_slot = [&](slot_type* slot) {
-      size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
-                                        PolicyTraits::element(slot));
-      auto target = find_first_non_full(common, hash);
-      SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
-      set->transfer(new_slots + target.offset, slot);
-      return target.probe_length;
-    };
-    size_t total_probe_length = 0;
-    for (size_t i = 0; i < old_capacity; ++i) {
-      if (IsFull(old_ctrl[i])) {
-        total_probe_length += insert_slot(old_slots_ptr + i);
+    ctrl_t* new_ctrl = common.control();
+    slot_type* new_slots = set->slot_array();
+
+    const PerTableSeed seed = common.seed();
+
+    for (size_t group_index = 0; group_index < old_capacity;
+         group_index += Group::kWidth) {
+      Group old_g(old_ctrl + group_index);
+      std::memset(new_ctrl + group_index, static_cast<int8_t>(ctrl_t::kEmpty),
+                  Group::kWidth);
+      std::memset(new_ctrl + group_index + old_capacity + 1,
+                  static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
+      // TODO(b/382423690): try to type erase everything outside of the loop.
+      // We will share a lot of code in expense of one function call per group.
+      for (auto in_fixed_group_index : old_g.MaskFull()) {
+        size_t old_index = group_index + in_fixed_group_index;
+        slot_type* old_slot = old_slots_ptr + old_index;
+        // TODO(b/382423690): try to avoid entire hash calculation since we need
+        // only one new bit of h1.
+        size_t hash = set->hash_of(old_slot);
+        size_t h1 = H1(hash, seed);
+        h2_t h2 = H2(hash);
+        size_t new_index = TryFindNewIndexWithoutProbing(
+            h1, old_index, old_capacity, new_ctrl, new_capacity);
+        // Note that encode_probed_element is allowed to use old_ctrl buffer
+        // till and included the old_index.
+        if (ABSL_PREDICT_FALSE(new_index == kProbedElementIndexSentinel)) {
+          encode_probed_element(probed_storage, h2, old_index, h1);
+          continue;
+        }
+        ABSL_SWISSTABLE_ASSERT((new_index & old_capacity) <= old_index);
+        ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_index]));
+        new_ctrl[new_index] = static_cast<ctrl_t>(h2);
+        auto* new_slot = new_slots + new_index;
+        SanitizerUnpoisonMemoryRegion(new_slot, sizeof(slot_type));
+        set->transfer(new_slot, old_slot);
+        SanitizerPoisonMemoryRegion(old_slot, sizeof(slot_type));
       }
     }
-    return total_probe_length;
   }
 
   static const PolicyFunctions& GetPolicyFunctions() {
@@ -3633,7 +3632,7 @@
                                : &raw_hash_set::get_char_alloc_ref_fn,
         &AllocateBackingArray<kBackingArrayAlignment, CharAlloc>,
         &DeallocateBackingArray<kBackingArrayAlignment, CharAlloc>,
-        &raw_hash_set::find_new_positions_and_transfer_slots_fn};
+        &raw_hash_set::transfer_unprobed_elements_to_next_capacity_fn};
     return value;
   }
 
@@ -3775,17 +3774,17 @@
 
 // Extern template instantiations reduce binary size and linker input size.
 // Function definition is in raw_hash_set.cc.
-extern template void GrowFullSooTableToNextCapacity<0, false>(
-    CommonFields&, const PolicyFunctions&, size_t);
-extern template void GrowFullSooTableToNextCapacity<1, true>(
-    CommonFields&, const PolicyFunctions&, size_t);
-extern template void GrowFullSooTableToNextCapacity<4, true>(
-    CommonFields&, const PolicyFunctions&, size_t);
-extern template void GrowFullSooTableToNextCapacity<8, true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<1, true>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<4, true>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<8, true>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
 #if UINTPTR_MAX == UINT64_MAX
-extern template void GrowFullSooTableToNextCapacity<16, true>(
-    CommonFields&, const PolicyFunctions&, size_t);
+extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<16, true>(
+    CommonFields&, const PolicyFunctions&, size_t, ctrl_t);
 #endif
 
 }  // namespace container_internal
diff --git a/absl/container/internal/raw_hash_set_resize_impl.h b/absl/container/internal/raw_hash_set_resize_impl.h
new file mode 100644
index 0000000..149d9e8
--- /dev/null
+++ b/absl/container/internal/raw_hash_set_resize_impl.h
@@ -0,0 +1,80 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This is a private implementation detail of resize algorithm of
+// raw_hash_set. It is exposed in a separate file for testing purposes.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Encoding for probed elements used for smaller tables.
+// Data is encoded into single integer.
+// Storage format for 4 bytes:
+// - 7 bits for h2
+// - 12 bits for source_offset
+// - 13 bits for h1
+// Storage format for 8 bytes:
+// - 7 bits for h2
+// - 28 bits for source_offset
+// - 29 bits for h1
+// Storage format for 16 bytes:
+// - 7 bits for h2
+// - 57 bits for source_offset
+// - 58 bits for h1
+template <typename IntType, size_t kTotalBits>
+struct ProbedItemImpl {
+  static constexpr IntType kH2Bits = 7;
+
+  static constexpr IntType kMaxOldBits = (kTotalBits - kH2Bits) / 2;
+  static constexpr IntType kMaxOldCapacity = (IntType{1} << kMaxOldBits) - 1;
+
+  // We always have one bit more for h1.
+  static constexpr IntType kMaxNewBits = kMaxOldBits + 1;
+  static constexpr IntType kMaxNewCapacity = (IntType{1} << kMaxNewBits) - 1;
+
+  static constexpr IntType kH2Shift = (kTotalBits - kH2Bits);
+  static_assert(kMaxNewBits + kMaxOldBits + kH2Bits == kTotalBits);
+
+  ProbedItemImpl() = default;
+  ProbedItemImpl(uint8_t h2_arg, size_t source_offset_arg, size_t h1_arg)
+      : h2(h2_arg),
+        source_offset(static_cast<IntType>(source_offset_arg)),
+        h1(static_cast<IntType>(h1_arg)) {}
+
+  IntType h2 : kH2Bits;
+  IntType source_offset : kMaxOldBits;
+  IntType h1 : kMaxNewBits;
+};
+
+using ProbedItem4Bytes = ProbedItemImpl<uint32_t, 32>;
+static_assert(sizeof(ProbedItem4Bytes) == 4);
+using ProbedItem8Bytes = ProbedItemImpl<uint64_t, 64>;
+static_assert(sizeof(ProbedItem8Bytes) == 8);
+using ProbedItem16Bytes = ProbedItemImpl<uint64_t, 7 + 57 + 58>;
+static_assert(sizeof(ProbedItem16Bytes) == 16);
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_RESIZE_IMPL_H_
diff --git a/absl/container/internal/raw_hash_set_resize_impl_test.cc b/absl/container/internal/raw_hash_set_resize_impl_test.cc
new file mode 100644
index 0000000..5020fca
--- /dev/null
+++ b/absl/container/internal/raw_hash_set_resize_impl_test.cc
@@ -0,0 +1,66 @@
+// Copyright 2025 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set_resize_impl.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+constexpr size_t kSmallSourceOffset = 17;
+constexpr size_t kSmallH1 = 25;
+
+template <class Encoder>
+class EncoderTest : public testing::Test {};
+
+using ProbedItemTypes =
+    ::testing::Types<ProbedItem4Bytes, ProbedItem8Bytes, ProbedItem16Bytes>;
+TYPED_TEST_SUITE(EncoderTest, ProbedItemTypes);
+
+TYPED_TEST(EncoderTest, EncodeDecodeSmall) {
+  using ProbedItem = TypeParam;
+  for (uint8_t h2 = 0; h2 < 128; ++h2) {
+    ProbedItem item(h2, kSmallSourceOffset, kSmallH1);
+    EXPECT_EQ(item.h2, h2);
+    EXPECT_EQ(item.source_offset, kSmallSourceOffset);
+    EXPECT_EQ(item.h1 & ProbedItem::kMaxNewCapacity, kSmallH1);
+  }
+}
+
+TYPED_TEST(EncoderTest, EncodeDecodeMax) {
+  using ProbedItem = TypeParam;
+  for (uint8_t h2 = 0; h2 < 128; ++h2) {
+    size_t source_offset = static_cast<size_t>(std::min<uint64_t>(
+        ProbedItem::kMaxOldCapacity, (std::numeric_limits<size_t>::max)()));
+    size_t h1 = static_cast<size_t>(std::min<uint64_t>(
+        ProbedItem::kMaxNewCapacity, (std::numeric_limits<size_t>::max)()));
+    ProbedItem item(h2, source_offset, h1);
+    EXPECT_EQ(item.h2, h2);
+    EXPECT_EQ(item.source_offset, source_offset);
+    EXPECT_EQ(item.h1 & ProbedItem::kMaxNewCapacity, h1);
+  }
+}
+
+}  // namespace
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 41cc6ec..2359269 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -14,8 +14,6 @@
 
 #include "absl/container/internal/raw_hash_set.h"
 
-#include <sys/types.h>
-
 #include <algorithm>
 #include <array>
 #include <atomic>
@@ -60,6 +58,7 @@
 #include "absl/container/internal/hashtable_control_bytes.h"
 #include "absl/container/internal/hashtable_debug.h"
 #include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/raw_hash_set_resize_impl.h"
 #include "absl/container/internal/test_allocator.h"
 #include "absl/container/internal/test_instance_tracker.h"
 #include "absl/container/node_hash_set.h"
@@ -80,8 +79,8 @@
 
 struct RawHashSetTestOnlyAccess {
   template <typename C>
-  static auto GetCommon(const C& c) -> decltype(c.common()) {
-    return c.common();
+  static auto GetCommon(C&& c) -> decltype(std::forward<C>(c).common()) {
+    return std::forward<C>(c).common();
   }
   template <typename C>
   static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
@@ -282,9 +281,14 @@
 TEST(Util, GrowthAndCapacity) {
   // Verify that GrowthToCapacity gives the minimum capacity that has enough
   // growth.
-  for (size_t growth = 0; growth < 10000; ++growth) {
+  EXPECT_EQ(SizeToCapacity(0), 0);
+  EXPECT_EQ(SizeToCapacity(1), 1);
+  EXPECT_EQ(SizeToCapacity(2), 3);
+  EXPECT_EQ(SizeToCapacity(3), 3);
+  for (size_t growth = 1; growth < 10000; ++growth) {
     SCOPED_TRACE(growth);
-    size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+    size_t capacity = SizeToCapacity(growth);
+    ASSERT_TRUE(IsValidCapacity(capacity));
     // The capacity is large enough for `growth`.
     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
     // For (capacity+1) < kWidth, growth should equal capacity.
@@ -304,8 +308,8 @@
     SCOPED_TRACE(capacity);
     size_t growth = CapacityToGrowth(capacity);
     EXPECT_THAT(growth, Lt(capacity));
-    EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
-    EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+    EXPECT_EQ(SizeToCapacity(growth), capacity);
+    EXPECT_EQ(NormalizeCapacity(SizeToCapacity(growth)), capacity);
   }
 }
 
@@ -2710,7 +2714,7 @@
 // in seed.
 void GenerateIrrelevantSeeds(int cnt) {
   for (int i = cnt % 17; i > 0; --i) {
-    NextSeedBaseNumber();
+    NextSeed();
   }
 }
 
@@ -3177,6 +3181,20 @@
   }
 }
 
+TYPED_TEST(SanitizerTest, PoisoningUnusedOnGrowth) {
+  TypeParam t;
+  for (int64_t i = 0; i < 100; ++i) {
+    t.insert(i);
+
+    int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+    int poisoned = 0;
+    for (size_t i = 0; i < t.capacity(); ++i) {
+      poisoned += static_cast<int>(__asan_address_is_poisoned(slots + i));
+    }
+    ASSERT_EQ(poisoned, t.capacity() - t.size());
+  }
+}
+
 // TODO(b/289225379): poison inline space when empty SOO.
 TEST(Sanitizer, PoisoningOnErase) {
   NonSooIntTable t;
@@ -4088,7 +4106,7 @@
         ASSERT_FALSE(IsAboveValidSize(size_t{1} << 40, slot_size));
         ASSERT_GE(max_size, uint64_t{1} << 40);
       }
-      ASSERT_LT(NormalizeCapacity(GrowthToLowerboundCapacity(max_size)),
+      ASSERT_LT(SizeToCapacity(max_size),
                 uint64_t{1} << HashtableSize::kSizeBitCount);
       ASSERT_LT(absl::uint128(max_size) * slot_size, uint64_t{1} << 63);
     }
@@ -4101,8 +4119,7 @@
     ASSERT_FALSE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size, slot_size));
     ASSERT_TRUE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size + 1, slot_size));
     ASSERT_LT(max_size, 1 << 30);
-    size_t max_capacity =
-        NormalizeCapacity(GrowthToLowerboundCapacity(max_size));
+    size_t max_capacity = SizeToCapacity(max_size);
     ASSERT_LT(max_capacity, (size_t{1} << 31) / slot_size);
     ASSERT_GT(max_capacity, (1 << 29) / slot_size);
     ASSERT_LT(max_capacity * slot_size, size_t{1} << 31);
@@ -4124,9 +4141,13 @@
                             "Hash table size overflow");
   EXPECT_DEATH_IF_SUPPORTED(t.rehash(slightly_overflow),
                             "Hash table size overflow");
+  IntTable non_empty_table;
+  non_empty_table.insert(0);
+  EXPECT_DEATH_IF_SUPPORTED(non_empty_table.reserve(slightly_overflow),
+                            "Hash table size overflow");
 }
 
-// TODO(b/397453582): Remove support for const hasher and ermove this test.
+// TODO(b/397453582): Remove support for const hasher and remove this test.
 TEST(Table, ConstLambdaHash) {
   int64_t multiplier = 17;
   // Make sure that code compiles and work OK with non-empty hasher with const
@@ -4147,6 +4168,64 @@
   EXPECT_EQ(t.find(3), t.end());
 }
 
+struct ConstUint8Hash {
+  size_t operator()(uint8_t) const { return *value; }
+  size_t* value;
+};
+
+// This test is imitating growth of a very big table and triggers all buffer
+// overflows.
+// We try to insert all elements into the first probe group.
+// So the resize codepath in test does the following:
+// 1. Insert 16 elements into the first probe group. No other elements will be
+//    inserted into the first probe group.
+// 2. There will be enough elements to fill up the local buffer even for
+//    encoding with 4 bytes.
+// 3. After local buffer is full, we will fill up the control buffer till
+//    some point.
+// 4. Then a few times we will extend control buffer end.
+// 5. Finally we will catch up and go to overflow codepath.
+TEST(Table, GrowExtremelyLargeTable) {
+  constexpr size_t kTargetCapacity =
+#if defined(__wasm__) || defined(__asmjs__)
+      NextCapacity(ProbedItem4Bytes::kMaxNewCapacity);  // OOMs on WASM.
+#else
+      NextCapacity(ProbedItem8Bytes::kMaxNewCapacity);
+#endif
+
+  size_t hash = 0;
+  // In order to save memory we use 1 byte slot.
+  // There are not enough different values to achieve big capacity, so we
+  // artificially update growth info to force resize.
+  absl::flat_hash_set<uint8_t, ConstUint8Hash> t(63, ConstUint8Hash{&hash});
+  CommonFields& common = RawHashSetTestOnlyAccess::GetCommon(t);
+  // Assign value to the seed, so that H1 is always 0.
+  // That helps to test all buffer overflows in GrowToNextCapacity.
+  hash = common.seed().seed() << 7;
+  ASSERT_EQ(H1(t.hash_function()(75), common.seed()), 0);
+  uint8_t inserted_till = 210;
+  for (uint8_t i = 0; i < inserted_till; ++i) {
+    t.insert(i);
+  }
+  for (uint8_t i = 0; i < inserted_till; ++i) {
+    ASSERT_TRUE(t.contains(i));
+  }
+
+  for (size_t cap = t.capacity(); cap < kTargetCapacity;
+       cap = NextCapacity(cap)) {
+    ASSERT_EQ(t.capacity(), cap);
+    // Update growth info to force resize on the next insert.
+    common.growth_info().OverwriteManyEmptyAsFull(CapacityToGrowth(cap) -
+                                                  t.size());
+    t.insert(inserted_till++);
+    ASSERT_EQ(t.capacity(), NextCapacity(cap));
+    for (uint8_t i = 0; i < inserted_till; ++i) {
+      ASSERT_TRUE(t.contains(i));
+    }
+  }
+  EXPECT_EQ(t.capacity(), kTargetCapacity);
+}
+
 }  // namespace
 }  // namespace container_internal
 ABSL_NAMESPACE_END
diff --git a/absl/debugging/internal/decode_rust_punycode.cc b/absl/debugging/internal/decode_rust_punycode.cc
index 43b46bf..6652dc2 100644
--- a/absl/debugging/internal/decode_rust_punycode.cc
+++ b/absl/debugging/internal/decode_rust_punycode.cc
@@ -172,7 +172,7 @@
 
 }  // namespace
 
-absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
+char* absl_nullable DecodeRustPunycode(DecodeRustPunycodeOptions options) {
   const char* punycode_begin = options.punycode_begin;
   const char* const punycode_end = options.punycode_end;
   char* const out_begin = options.out_begin;
diff --git a/absl/debugging/internal/decode_rust_punycode.h b/absl/debugging/internal/decode_rust_punycode.h
index 0ae53ff..b1b1c97 100644
--- a/absl/debugging/internal/decode_rust_punycode.h
+++ b/absl/debugging/internal/decode_rust_punycode.h
@@ -46,7 +46,7 @@
 // DecodeRustPunycode is async-signal-safe with bounded runtime and a small
 // stack footprint, making it suitable for use in demangling Rust symbol names
 // from a signal handler.
-absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options);
+char* absl_nullable DecodeRustPunycode(DecodeRustPunycodeOptions options);
 
 }  // namespace debugging_internal
 ABSL_NAMESPACE_END
diff --git a/absl/debugging/internal/stacktrace_win32-inl.inc b/absl/debugging/internal/stacktrace_win32-inl.inc
index 513a392..f57c187 100644
--- a/absl/debugging/internal/stacktrace_win32-inl.inc
+++ b/absl/debugging/internal/stacktrace_win32-inl.inc
@@ -37,39 +37,20 @@
 #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
 #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
 
-#include <windows.h>    // for GetProcAddress and GetModuleHandle
+#include <windows.h>  // CaptureStackBackTrace
+
 #include <cassert>
 
-typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
-    IN ULONG frames_to_skip,
-    IN ULONG frames_to_capture,
-    OUT PVOID *backtrace,
-    OUT PULONG backtrace_hash);
-
-// It is not possible to load RtlCaptureStackBackTrace at static init time in
-// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
-    !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
-    &::CaptureStackBackTrace;
-#else
-// Load the function we need at static init time, where we don't have
-// to worry about someone else holding the loader's lock.
-static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
-    (RtlCaptureStackBackTrace_Function*)GetProcAddress(
-        GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
-#endif  // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
-
 template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
 static int UnwindImpl(void** result, uintptr_t* frames, int* sizes,
                       int max_depth, int skip_count, const void*,
                       int* min_dropped_frames) {
   USHORT n = 0;
-  if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) {
+  if (skip_count < 0 || max_depth < 0) {
     // can't get a stacktrace with no function/invalid args
   } else {
-    n = RtlCaptureStackBackTrace_fn(static_cast<ULONG>(skip_count) + 2,
-                                    static_cast<ULONG>(max_depth), result, 0);
+    n = CaptureStackBackTrace(static_cast<ULONG>(skip_count) + 2,
+                              static_cast<ULONG>(max_depth), result, 0);
   }
   if (IS_STACK_FRAMES) {
     // No implementation for finding out the stack frames yet.
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index 5e9bec4..620af2b 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -192,6 +192,7 @@
         ":private_handle_accessor",
         "//absl/base:config",
         "//absl/base:core_headers",
+        "//absl/base:fast_type_id",
         "//absl/base:no_destructor",
         "//absl/container:flat_hash_map",
         "//absl/strings",
@@ -221,6 +222,7 @@
         "//absl/base:config",
         "//absl/base:core_headers",
         "//absl/base:dynamic_annotations",
+        "//absl/base:fast_type_id",
         "//absl/base:no_destructor",
         "//absl/memory",
         "//absl/meta:type_traits",
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index f995957..d0ea452 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -165,6 +165,7 @@
     ${ABSL_DEFAULT_LINKOPTS}
   DEPS
     absl::config
+    absl::fast_type_id
     absl::flags_commandlineflag
     absl::flags_private_handle_accessor
     absl::flags_config
@@ -190,6 +191,7 @@
   DEPS
     absl::base
     absl::config
+    absl::fast_type_id
     absl::flags_commandlineflag
     absl::flags_commandlineflag_internal
     absl::flags_config
diff --git a/absl/flags/commandlineflag.h b/absl/flags/commandlineflag.h
index a9ffd02..9098b4c 100644
--- a/absl/flags/commandlineflag.h
+++ b/absl/flags/commandlineflag.h
@@ -30,7 +30,7 @@
 #include <string>
 
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/flags/internal/commandlineflag.h"
 #include "absl/strings/string_view.h"
 #include "absl/types/optional.h"
@@ -80,7 +80,7 @@
   // Return true iff flag has type T.
   template <typename T>
   inline bool IsOfType() const {
-    return TypeId() == base_internal::FastTypeId<T>();
+    return TypeId() == FastTypeId<T>();
   }
 
   // absl::CommandLineFlag::TryGet()
diff --git a/absl/flags/internal/commandlineflag.h b/absl/flags/internal/commandlineflag.h
index ebfe81b..daef4e3 100644
--- a/absl/flags/internal/commandlineflag.h
+++ b/absl/flags/internal/commandlineflag.h
@@ -17,7 +17,7 @@
 #define ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_
 
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 
 namespace absl {
 ABSL_NAMESPACE_BEGIN
@@ -28,7 +28,7 @@
 // cases this id is enough to uniquely identify the flag's value type. In a few
 // cases we'll have to resort to using actual RTTI implementation if it is
 // available.
-using FlagFastTypeId = absl::base_internal::FastTypeIdType;
+using FlagFastTypeId = absl::FastTypeIdType;
 
 // Options that control SetCommandLineOptionWithMode.
 enum FlagSettingMode {
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index ccd2667..37f6ef1 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -34,6 +34,7 @@
 #include "absl/base/config.h"
 #include "absl/base/const_init.h"
 #include "absl/base/dynamic_annotations.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/base/no_destructor.h"
 #include "absl/base/optimization.h"
 #include "absl/base/thread_annotations.h"
@@ -59,7 +60,7 @@
 // Currently we only validate flag values for user-defined flag types.
 bool ShouldValidateFlagValue(FlagFastTypeId flag_type_id) {
 #define DONT_VALIDATE(T, _) \
-  if (flag_type_id == base_internal::FastTypeId<T>()) return false;
+  if (flag_type_id == absl::FastTypeId<T>()) return false;
   ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(DONT_VALIDATE)
 #undef DONT_VALIDATE
 
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index ef739e7..b61a247 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -828,7 +828,7 @@
     U u;
 
 #if !defined(NDEBUG)
-    impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
+    impl_.AssertValidType(absl::FastTypeId<T>(), &GenRuntimeTypeId<T>);
 #endif
 
     if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) {
@@ -837,7 +837,7 @@
     return std::move(u.value);
   }
   void Set(const T& v) {
-    impl_.AssertValidType(base_internal::FastTypeId<T>(), &GenRuntimeTypeId<T>);
+    impl_.AssertValidType(absl::FastTypeId<T>(), &GenRuntimeTypeId<T>);
     impl_.Write(&v);
   }
 
@@ -902,7 +902,7 @@
     case FlagOp::kSizeof:
       return reinterpret_cast<void*>(static_cast<uintptr_t>(sizeof(T)));
     case FlagOp::kFastTypeId:
-      return const_cast<void*>(base_internal::FastTypeId<T>());
+      return const_cast<void*>(absl::FastTypeId<T>());
     case FlagOp::kRuntimeTypeId:
       return const_cast<std::type_info*>(GenRuntimeTypeId<T>());
     case FlagOp::kParse: {
diff --git a/absl/flags/internal/registry.h b/absl/flags/internal/registry.h
index a57ba3c..be9aacc 100644
--- a/absl/flags/internal/registry.h
+++ b/absl/flags/internal/registry.h
@@ -19,6 +19,7 @@
 #include <functional>
 
 #include "absl/base/config.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/flags/commandlineflag.h"
 #include "absl/flags/internal/commandlineflag.h"
 #include "absl/strings/string_view.h"
@@ -83,7 +84,7 @@
 class RetiredFlag {
  public:
   void Retire(const char* flag_name) {
-    flags_internal::Retire(flag_name, base_internal::FastTypeId<T>(), buf_);
+    flags_internal::Retire(flag_name, absl::FastTypeId<T>(), buf_);
   }
 
  private:
diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel
index 84911c9..30f78d4 100644
--- a/absl/hash/BUILD.bazel
+++ b/absl/hash/BUILD.bazel
@@ -86,6 +86,7 @@
         ":hash_testing",
         ":spy_hash_state",
         "//absl/base:config",
+        "//absl/container:flat_hash_map",
         "//absl/container:flat_hash_set",
         "//absl/memory",
         "//absl/meta:type_traits",
diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt
index 4b43787..cc46aab 100644
--- a/absl/hash/CMakeLists.txt
+++ b/absl/hash/CMakeLists.txt
@@ -70,6 +70,7 @@
   DEPS
     absl::bits
     absl::cord_test_helpers
+    absl::flat_hash_map
     absl::flat_hash_set
     absl::hash
     absl::hash_testing
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc
index e25c0ac..b751a16 100644
--- a/absl/hash/hash_test.cc
+++ b/absl/hash/hash_test.cc
@@ -38,6 +38,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
+#include "absl/container/flat_hash_map.h"
 #include "absl/container/flat_hash_set.h"
 #include "absl/hash/hash_testing.h"
 #include "absl/hash/internal/hash_test.h"
@@ -400,7 +401,7 @@
   EXPECT_TRUE((is_hashable<__int128_t>::value));
   EXPECT_TRUE((is_hashable<__uint128_t>::value));
 
-  absl::flat_hash_set<size_t> hashes;
+  absl::flat_hash_map<size_t, int> hash_to_index;
   std::vector<__uint128_t> values;
   for (int i = 0; i < 128; ++i) {
     // Some arbitrary pattern to check if changing each bit changes the hash.
@@ -411,13 +412,14 @@
     const __int128_t as_signed = static_cast<__int128_t>(value);
 
     values.push_back(value);
-    hashes.insert(absl::Hash<__uint128_t>{}(value));
+    auto [it, inserted] =
+        hash_to_index.insert({absl::Hash<__uint128_t>{}(value), i});
+    ASSERT_TRUE(inserted) << "Duplicate hash: " << i << " vs " << it->second;
 
     // Verify that the fast-path for MixingHashState does not break the hash.
     EXPECT_EQ(absl::HashOf(value), absl::Hash<__uint128_t>{}(value));
     EXPECT_EQ(absl::HashOf(as_signed), absl::Hash<__int128_t>{}(as_signed));
   }
-  EXPECT_THAT(hashes, SizeIs(128));
 
   EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(values));
   EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
@@ -706,7 +708,9 @@
   //
   // This test is run on a buffer that is a multiple of the stride size, and one
   // that isn't.
-  for (size_t big_buffer_size : {1024u * 2 + 512u, 1024u * 3}) {
+  const size_t kChunkSize = absl::hash_internal::PiecewiseChunkSize();
+  for (size_t big_buffer_size :
+       {2 * kChunkSize + kChunkSize / 2, 3 * kChunkSize}) {
     SCOPED_TRACE(big_buffer_size);
     std::string big_buffer;
     for (size_t i = 0; i < big_buffer_size; ++i) {
@@ -716,8 +720,15 @@
     auto big_buffer_hash = hash(PiecewiseHashTester(big_buffer));
 
     const int possible_breaks = 9;
-    size_t breaks[possible_breaks] = {1,    512,  1023, 1024, 1025,
-                                      1536, 2047, 2048, 2049};
+    size_t breaks[possible_breaks] = {1,
+                                      kChunkSize / 2,
+                                      kChunkSize - 1,
+                                      kChunkSize,
+                                      kChunkSize + 1,
+                                      kChunkSize + kChunkSize / 2,
+                                      2 * kChunkSize - 1,
+                                      2 * kChunkSize,
+                                      2 * kChunkSize + 1};
     for (unsigned test_mask = 0; test_mask < (1u << possible_breaks);
          ++test_mask) {
       SCOPED_TRACE(test_mask);
@@ -727,7 +738,7 @@
           break_locations.insert(breaks[j]);
         }
       }
-      EXPECT_EQ(
+      ASSERT_EQ(
           hash(PiecewiseHashTester(big_buffer, std::move(break_locations))),
           big_buffer_hash);
     }
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index 4db7f0f..3a34bde 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -1222,8 +1222,8 @@
                                                size_t len);
 
   // Reads 9 to 16 bytes from p.
-  // The least significant 8 bytes are in .first, the rest (zero padded) bytes
-  // are in .second.
+  // The least significant 8 bytes are in .first, and the rest of the bytes are
+  // in .second along with duplicated bytes from .first if len<16.
   static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
                                                  size_t len) {
     uint64_t low_mem = Read8(p);
diff --git a/absl/log/internal/BUILD.bazel b/absl/log/internal/BUILD.bazel
index 5d54b6f..c052105 100644
--- a/absl/log/internal/BUILD.bazel
+++ b/absl/log/internal/BUILD.bazel
@@ -68,9 +68,7 @@
     hdrs = ["check_op.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = [
-        "//absl/log:__pkg__",
-    ],
+    visibility = ["//absl/log:__pkg__"],
     deps = [
         ":nullguard",
         ":nullstream",
diff --git a/absl/log/internal/check_op.cc b/absl/log/internal/check_op.cc
index bda8c09..23db63b 100644
--- a/absl/log/internal/check_op.cc
+++ b/absl/log/internal/check_op.cc
@@ -36,8 +36,8 @@
 namespace log_internal {
 
 #define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(x) \
-  template absl::Nonnull<const char*> MakeCheckOpString( \
-      x, x, absl::Nonnull<const char*>)
+  template const char* absl_nonnull MakeCheckOpString(   \
+      x, x, const char* absl_nonnull)
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(bool);
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(int64_t);
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING(uint64_t);
@@ -54,7 +54,7 @@
 #undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING
 
 CheckOpMessageBuilder::CheckOpMessageBuilder(
-    absl::Nonnull<const char*> exprtext) {
+    const char* absl_nonnull exprtext) {
   stream_ << exprtext << " (";
 }
 
@@ -63,7 +63,7 @@
   return stream_;
 }
 
-absl::Nonnull<const char*> CheckOpMessageBuilder::NewString() {
+const char* absl_nonnull CheckOpMessageBuilder::NewString() {
   stream_ << ")";
   // There's no need to free this string since the process is crashing.
   return absl::IgnoreLeak(new std::string(std::move(stream_).str()))->c_str();
@@ -103,9 +103,9 @@
 
 // Helper functions for string comparisons.
 #define DEFINE_CHECK_STROP_IMPL(name, func, expected)                          \
-  absl::Nullable<const char*> Check##func##expected##Impl(                     \
-      absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,          \
-      absl::Nonnull<const char*> exprtext) {                                   \
+  const char* absl_nullable Check##func##expected##Impl(                       \
+      const char* absl_nullable s1, const char* absl_nullable s2,              \
+      const char* absl_nonnull exprtext) {                                     \
     bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2));                      \
     if (equal == expected) {                                                   \
       return nullptr;                                                          \
diff --git a/absl/log/internal/check_op.h b/absl/log/internal/check_op.h
index 8d7ade4..dc7d19e 100644
--- a/absl/log/internal/check_op.h
+++ b/absl/log/internal/check_op.h
@@ -64,49 +64,48 @@
 #endif
 
 #define ABSL_LOG_INTERNAL_CHECK_OP(name, op, val1, val1_text, val2, val2_text) \
-  while (absl::Nullable<const char*> absl_log_internal_check_op_result         \
-             [[maybe_unused]] =                 \
-                 ::absl::log_internal::name##Impl(                             \
-                     ::absl::log_internal::GetReferenceableValue(val1),        \
-                     ::absl::log_internal::GetReferenceableValue(val2),        \
-                     ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(                   \
-                         val1_text " " #op " " val2_text)))                    \
+  while (const char* absl_nullable absl_log_internal_check_op_result           \
+         [[maybe_unused]] = ::absl::log_internal::name##Impl(                  \
+             ::absl::log_internal::GetReferenceableValue(val1),                \
+             ::absl::log_internal::GetReferenceableValue(val2),                \
+             ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val1_text " " #op          \
+                                                              " " val2_text))) \
     ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true)                         \
-  ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>(     \
+  ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>(     \
                               absl_log_internal_check_op_result))              \
       .InternalStream()
 #define ABSL_LOG_INTERNAL_QCHECK_OP(name, op, val1, val1_text, val2,        \
                                     val2_text)                              \
-  while (absl::Nullable<const char*> absl_log_internal_qcheck_op_result =   \
+  while (const char* absl_nullable absl_log_internal_qcheck_op_result =     \
              ::absl::log_internal::name##Impl(                              \
                  ::absl::log_internal::GetReferenceableValue(val1),         \
                  ::absl::log_internal::GetReferenceableValue(val2),         \
                  ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(                    \
                      val1_text " " #op " " val2_text)))                     \
     ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true)                     \
-  ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+  ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>( \
                                absl_log_internal_qcheck_op_result))         \
       .InternalStream()
 #define ABSL_LOG_INTERNAL_CHECK_STROP(func, op, expected, s1, s1_text, s2,     \
                                       s2_text)                                 \
-  while (absl::Nullable<const char*> absl_log_internal_check_strop_result =    \
+  while (const char* absl_nullable absl_log_internal_check_strop_result =      \
              ::absl::log_internal::Check##func##expected##Impl(                \
                  (s1), (s2),                                                   \
                  ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op        \
                                                                 " " s2_text))) \
     ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true)                         \
-  ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>(     \
+  ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>(     \
                               absl_log_internal_check_strop_result))           \
       .InternalStream()
 #define ABSL_LOG_INTERNAL_QCHECK_STROP(func, op, expected, s1, s1_text, s2,    \
                                        s2_text)                                \
-  while (absl::Nullable<const char*> absl_log_internal_qcheck_strop_result =   \
+  while (const char* absl_nullable absl_log_internal_qcheck_strop_result =     \
              ::absl::log_internal::Check##func##expected##Impl(                \
                  (s1), (s2),                                                   \
                  ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(s1_text " " #op        \
                                                                 " " s2_text))) \
     ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true)                        \
-  ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>(    \
+  ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>(    \
                                absl_log_internal_qcheck_strop_result))         \
       .InternalStream()
 
@@ -135,8 +134,8 @@
 //   strip the call to stringify the non-ok `Status` as long as we don't log it;
 //   dropping the `Status`'s message text is out of scope.
 #define ABSL_LOG_INTERNAL_CHECK_OK(val, val_text)                          \
-  for (::std::pair<absl::Nonnull<const ::absl::Status*>,                   \
-                   absl::Nullable<const char*>>                            \
+  for (::std::pair<const ::absl::Status* absl_nonnull,                     \
+                   const char* absl_nullable>                              \
            absl_log_internal_check_ok_goo;                                 \
        absl_log_internal_check_ok_goo.first =                              \
            ::absl::log_internal::AsStatus(val),                            \
@@ -149,12 +148,12 @@
                                                             " is OK")),    \
        !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());)    \
     ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true)                     \
-  ABSL_LOG_INTERNAL_CHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+  ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast<const char* absl_nonnull>( \
                               absl_log_internal_check_ok_goo.second))      \
       .InternalStream()
 #define ABSL_LOG_INTERNAL_QCHECK_OK(val, val_text)                          \
-  for (::std::pair<absl::Nonnull<const ::absl::Status*>,                    \
-                   absl::Nullable<const char*>>                             \
+  for (::std::pair<const ::absl::Status* absl_nonnull,                      \
+                   const char* absl_nullable>                               \
            absl_log_internal_qcheck_ok_goo;                                 \
        absl_log_internal_qcheck_ok_goo.first =                              \
            ::absl::log_internal::AsStatus(val),                             \
@@ -167,7 +166,7 @@
                                                             " is OK")),     \
        !ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok());)    \
     ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true)                     \
-  ABSL_LOG_INTERNAL_QCHECK(absl::implicit_cast<absl::Nonnull<const char*>>( \
+  ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast<const char* absl_nonnull>( \
                                absl_log_internal_qcheck_ok_goo.second))     \
       .InternalStream()
 
@@ -179,9 +178,8 @@
 class StatusOr;
 
 namespace status_internal {
-ABSL_ATTRIBUTE_PURE_FUNCTION absl::Nonnull<const char*> MakeCheckFailString(
-    absl::Nonnull<const absl::Status*> status,
-    absl::Nonnull<const char*> prefix);
+ABSL_ATTRIBUTE_PURE_FUNCTION const char* absl_nonnull MakeCheckFailString(
+    const absl::Status* absl_nonnull status, const char* absl_nonnull prefix);
 }  // namespace status_internal
 
 namespace log_internal {
@@ -189,11 +187,11 @@
 // Convert a Status or a StatusOr to its underlying status value.
 //
 // (This implementation does not require a dep on absl::Status to work.)
-inline absl::Nonnull<const absl::Status*> AsStatus(const absl::Status& s) {
+inline const absl::Status* absl_nonnull AsStatus(const absl::Status& s) {
   return &s;
 }
 template <typename T>
-absl::Nonnull<const absl::Status*> AsStatus(const absl::StatusOr<T>& s) {
+const absl::Status* absl_nonnull AsStatus(const absl::StatusOr<T>& s) {
   return &s.status();
 }
 
@@ -202,14 +200,14 @@
 class CheckOpMessageBuilder final {
  public:
   // Inserts `exprtext` and ` (` to the stream.
-  explicit CheckOpMessageBuilder(absl::Nonnull<const char*> exprtext);
+  explicit CheckOpMessageBuilder(const char* absl_nonnull exprtext);
   ~CheckOpMessageBuilder() = default;
   // For inserting the first variable.
   std::ostream& ForVar1() { return stream_; }
   // For inserting the second variable (adds an intermediate ` vs. `).
   std::ostream& ForVar2();
   // Get the result (inserts the closing `)`).
-  absl::Nonnull<const char*> NewString();
+  const char* absl_nonnull NewString();
 
  private:
   std::ostringstream stream_;
@@ -352,12 +350,12 @@
 
 // Build the error message string.  Specify no inlining for code size.
 template <typename T1, typename T2>
-ABSL_ATTRIBUTE_RETURNS_NONNULL absl::Nonnull<const char*> MakeCheckOpString(
-    T1 v1, T2 v2, absl::Nonnull<const char*> exprtext) ABSL_ATTRIBUTE_NOINLINE;
+ABSL_ATTRIBUTE_RETURNS_NONNULL const char* absl_nonnull MakeCheckOpString(
+    T1 v1, T2 v2, const char* absl_nonnull exprtext) ABSL_ATTRIBUTE_NOINLINE;
 
 template <typename T1, typename T2>
-absl::Nonnull<const char*> MakeCheckOpString(
-    T1 v1, T2 v2, absl::Nonnull<const char*> exprtext) {
+const char* absl_nonnull MakeCheckOpString(T1 v1, T2 v2,
+                                           const char* absl_nonnull exprtext) {
   CheckOpMessageBuilder comb(exprtext);
   MakeCheckOpValueString(comb.ForVar1(), v1);
   MakeCheckOpValueString(comb.ForVar2(), v2);
@@ -367,8 +365,8 @@
 // Add a few commonly used instantiations as extern to reduce size of objects
 // files.
 #define ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(x) \
-  extern template absl::Nonnull<const char*> MakeCheckOpString( \
-      x, x, absl::Nonnull<const char*>)
+  extern template const char* absl_nonnull MakeCheckOpString(   \
+      x, x, const char* absl_nonnull)
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(bool);
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(int64_t);
 ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(uint64_t);
@@ -404,8 +402,8 @@
 // type.
 #define ABSL_LOG_INTERNAL_CHECK_OP_IMPL(name, op)                          \
   template <typename T1, typename T2>                                      \
-  inline constexpr absl::Nullable<const char*> name##Impl(                 \
-      const T1& v1, const T2& v2, absl::Nonnull<const char*> exprtext) {   \
+  inline constexpr const char* absl_nullable name##Impl(                   \
+      const T1& v1, const T2& v2, const char* absl_nonnull exprtext) {     \
     using U1 = CheckOpStreamType<T1>;                                      \
     using U2 = CheckOpStreamType<T2>;                                      \
     return ABSL_PREDICT_TRUE(v1 op v2)                                     \
@@ -413,8 +411,8 @@
                : ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, U1(v1),    \
                                                         U2(v2), exprtext); \
   }                                                                        \
-  inline constexpr absl::Nullable<const char*> name##Impl(                 \
-      int v1, int v2, absl::Nonnull<const char*> exprtext) {               \
+  inline constexpr const char* absl_nullable name##Impl(                   \
+      int v1, int v2, const char* absl_nonnull exprtext) {                 \
     return name##Impl<int, int>(v1, v2, exprtext);                         \
   }
 
@@ -427,18 +425,18 @@
 #undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT
 #undef ABSL_LOG_INTERNAL_CHECK_OP_IMPL
 
-absl::Nullable<const char*> CheckstrcmptrueImpl(
-    absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
-    absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcmpfalseImpl(
-    absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
-    absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcasecmptrueImpl(
-    absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
-    absl::Nonnull<const char*> exprtext);
-absl::Nullable<const char*> CheckstrcasecmpfalseImpl(
-    absl::Nullable<const char*> s1, absl::Nullable<const char*> s2,
-    absl::Nonnull<const char*> exprtext);
+const char* absl_nullable CheckstrcmptrueImpl(
+    const char* absl_nullable s1, const char* absl_nullable s2,
+    const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcmpfalseImpl(
+    const char* absl_nullable s1, const char* absl_nullable s2,
+    const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcasecmptrueImpl(
+    const char* absl_nullable s1, const char* absl_nullable s2,
+    const char* absl_nonnull exprtext);
+const char* absl_nullable CheckstrcasecmpfalseImpl(
+    const char* absl_nullable s1, const char* absl_nullable s2,
+    const char* absl_nonnull exprtext);
 
 // `CHECK_EQ` and friends want to pass their arguments by reference, however
 // this winds up exposing lots of cases where people have defined and
diff --git a/absl/log/internal/log_message.cc b/absl/log/internal/log_message.cc
index 8dc5722..aaaaf03 100644
--- a/absl/log/internal/log_message.cc
+++ b/absl/log/internal/log_message.cc
@@ -147,7 +147,7 @@
 }  // namespace
 
 struct LogMessage::LogMessageData final {
-  LogMessageData(absl::Nonnull<const char*> file, int line,
+  LogMessageData(const char* absl_nonnull file, int line,
                  absl::LogSeverity severity, absl::Time timestamp);
   LogMessageData(const LogMessageData&) = delete;
   LogMessageData& operator=(const LogMessageData&) = delete;
@@ -163,7 +163,7 @@
   bool is_perror;
 
   // Extra `LogSink`s to log to, in addition to `global_sinks`.
-  absl::InlinedVector<absl::Nonnull<absl::LogSink*>, 16> extra_sinks;
+  absl::InlinedVector<absl::LogSink* absl_nonnull, 16> extra_sinks;
   // If true, log to `extra_sinks` but not to `global_sinks` or hardcoded
   // non-sink targets (e.g. stderr, log files).
   bool extra_sinks_only;
@@ -199,7 +199,7 @@
   void FinalizeEncodingAndFormat();
 };
 
-LogMessage::LogMessageData::LogMessageData(absl::Nonnull<const char*> file,
+LogMessage::LogMessageData::LogMessageData(const char* absl_nonnull file,
                                            int line, absl::LogSeverity severity,
                                            absl::Time timestamp)
     : extra_sinks_only(false), manipulated(nullptr) {
@@ -270,7 +270,7 @@
       absl::MakeSpan(string_buf).subspan(0, chars_written);
 }
 
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line,
+LogMessage::LogMessage(const char* absl_nonnull file, int line,
                        absl::LogSeverity severity)
     : data_(absl::make_unique<LogMessageData>(file, line, severity,
                                               absl::Now())) {
@@ -284,11 +284,11 @@
   LogBacktraceIfNeeded();
 }
 
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, InfoTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, InfoTag)
     : LogMessage(file, line, absl::LogSeverity::kInfo) {}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, WarningTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, WarningTag)
     : LogMessage(file, line, absl::LogSeverity::kWarning) {}
-LogMessage::LogMessage(absl::Nonnull<const char*> file, int line, ErrorTag)
+LogMessage::LogMessage(const char* absl_nonnull file, int line, ErrorTag)
     : LogMessage(file, line, absl::LogSeverity::kError) {}
 
 // This cannot go in the header since LogMessageData is defined in this file.
@@ -343,13 +343,13 @@
   return *this;
 }
 
-LogMessage& LogMessage::ToSinkAlso(absl::Nonnull<absl::LogSink*> sink) {
+LogMessage& LogMessage::ToSinkAlso(absl::LogSink* absl_nonnull sink) {
   ABSL_INTERNAL_CHECK(sink, "null LogSink*");
   data_->extra_sinks.push_back(sink);
   return *this;
 }
 
-LogMessage& LogMessage::ToSinkOnly(absl::Nonnull<absl::LogSink*> sink) {
+LogMessage& LogMessage::ToSinkOnly(absl::LogSink* absl_nonnull sink) {
   ABSL_INTERNAL_CHECK(sink, "null LogSink*");
   data_->extra_sinks.clear();
   data_->extra_sinks.push_back(sink);
@@ -673,11 +673,11 @@
 #pragma warning(disable : 4722)
 #endif
 
-LogMessageFatal::LogMessageFatal(absl::Nonnull<const char*> file, int line)
+LogMessageFatal::LogMessageFatal(const char* absl_nonnull file, int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {}
 
-LogMessageFatal::LogMessageFatal(absl::Nonnull<const char*> file, int line,
-                                 absl::Nonnull<const char*> failure_msg)
+LogMessageFatal::LogMessageFatal(const char* absl_nonnull file, int line,
+                                 const char* absl_nonnull failure_msg)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {
   *this << "Check failed: " << failure_msg << " ";
 }
@@ -686,7 +686,7 @@
   FailWithoutStackTrace();
 }
 
-LogMessageDebugFatal::LogMessageDebugFatal(absl::Nonnull<const char*> file,
+LogMessageDebugFatal::LogMessageDebugFatal(const char* absl_nonnull file,
                                            int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {}
 
@@ -695,7 +695,7 @@
 }
 
 LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal(
-    absl::Nonnull<const char*> file, int line)
+    const char* absl_nonnull file, int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {
   SetFailQuietly();
 }
@@ -704,15 +704,15 @@
   FailQuietly();
 }
 
-LogMessageQuietlyFatal::LogMessageQuietlyFatal(absl::Nonnull<const char*> file,
+LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* absl_nonnull file,
                                                int line)
     : LogMessage(file, line, absl::LogSeverity::kFatal) {
   SetFailQuietly();
 }
 
 LogMessageQuietlyFatal::LogMessageQuietlyFatal(
-    absl::Nonnull<const char*> file, int line,
-    absl::Nonnull<const char*> failure_msg)
+    const char* absl_nonnull file, int line,
+    const char* absl_nonnull failure_msg)
     : LogMessageQuietlyFatal(file, line) {
   *this << "Check failed: " << failure_msg << " ";
 }
diff --git a/absl/log/internal/log_message.h b/absl/log/internal/log_message.h
index 8acc4a1..de9fb8f 100644
--- a/absl/log/internal/log_message.h
+++ b/absl/log/internal/log_message.h
@@ -62,15 +62,15 @@
   struct ErrorTag {};
 
   // Used for `LOG`.
-  LogMessage(absl::Nonnull<const char*> file, int line,
+  LogMessage(const char* absl_nonnull file, int line,
              absl::LogSeverity severity) ABSL_ATTRIBUTE_COLD;
   // These constructors are slightly smaller/faster to call; the severity is
   // curried into the function pointer.
-  LogMessage(absl::Nonnull<const char*> file, int line,
+  LogMessage(const char* absl_nonnull file, int line,
              InfoTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
-  LogMessage(absl::Nonnull<const char*> file, int line,
+  LogMessage(const char* absl_nonnull file, int line,
              WarningTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
-  LogMessage(absl::Nonnull<const char*> file, int line,
+  LogMessage(const char* absl_nonnull file, int line,
              ErrorTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
   LogMessage(const LogMessage&) = delete;
   LogMessage& operator=(const LogMessage&) = delete;
@@ -102,9 +102,9 @@
   LogMessage& WithPerror();
   // Sends this message to `*sink` in addition to whatever other sinks it would
   // otherwise have been sent to.
-  LogMessage& ToSinkAlso(absl::Nonnull<absl::LogSink*> sink);
+  LogMessage& ToSinkAlso(absl::LogSink* absl_nonnull sink);
   // Sends this message to `*sink` and no others.
-  LogMessage& ToSinkOnly(absl::Nonnull<absl::LogSink*> sink);
+  LogMessage& ToSinkOnly(absl::LogSink* absl_nonnull sink);
 
   // Don't call this method from outside this library.
   LogMessage& InternalStream() { return *this; }
@@ -141,10 +141,10 @@
   LogMessage& operator<<(unsigned long long v) {
     return operator<< <unsigned long long>(v);
   }
-  LogMessage& operator<<(absl::Nullable<void*> v) {
+  LogMessage& operator<<(void* absl_nullable  v) {
     return operator<< <void*>(v);
   }
-  LogMessage& operator<<(absl::Nullable<const void*> v) {
+  LogMessage& operator<<(const void* absl_nullable  v) {
     return operator<< <const void*>(v);
   }
   LogMessage& operator<<(float v) { return operator<< <float>(v); }
@@ -271,7 +271,7 @@
 
   // We keep the data in a separate struct so that each instance of `LogMessage`
   // uses less stack space.
-  absl::Nonnull<std::unique_ptr<LogMessageData>> data_;
+  absl_nonnull std::unique_ptr<LogMessageData> data_;
 };
 
 // Helper class so that `AbslStringify()` can modify the LogMessage.
@@ -289,7 +289,7 @@
   }
 
   // For types that implement `AbslStringify` using `absl::Format()`.
-  friend void AbslFormatFlush(absl::Nonnull<StringifySink*> sink,
+  friend void AbslFormatFlush(StringifySink* absl_nonnull sink,
                               absl::string_view v) {
     sink->Append(v);
   }
@@ -341,9 +341,9 @@
 extern template LogMessage& LogMessage::operator<<(const long long& v);
 extern template LogMessage& LogMessage::operator<<(const unsigned long long& v);
 extern template LogMessage& LogMessage::operator<<(
-    absl::Nullable<void*> const& v);
+    void* absl_nullable const& v);
 extern template LogMessage& LogMessage::operator<<(
-    absl::Nullable<const void*> const& v);
+    const void* absl_nullable const& v);
 extern template LogMessage& LogMessage::operator<<(const float& v);
 extern template LogMessage& LogMessage::operator<<(const double& v);
 extern template LogMessage& LogMessage::operator<<(const bool& v);
@@ -364,10 +364,9 @@
 // message.
 class LogMessageFatal final : public LogMessage {
  public:
-  LogMessageFatal(absl::Nonnull<const char*> file,
-                  int line) ABSL_ATTRIBUTE_COLD;
-  LogMessageFatal(absl::Nonnull<const char*> file, int line,
-                  absl::Nonnull<const char*> failure_msg) ABSL_ATTRIBUTE_COLD;
+  LogMessageFatal(const char* absl_nonnull file, int line) ABSL_ATTRIBUTE_COLD;
+  LogMessageFatal(const char* absl_nonnull file, int line,
+                  const char* absl_nonnull failure_msg) ABSL_ATTRIBUTE_COLD;
   [[noreturn]] ~LogMessageFatal();
 };
 
@@ -376,7 +375,7 @@
 // for DLOG(FATAL) variants.
 class LogMessageDebugFatal final : public LogMessage {
  public:
-  LogMessageDebugFatal(absl::Nonnull<const char*> file,
+  LogMessageDebugFatal(const char* absl_nonnull file,
                        int line) ABSL_ATTRIBUTE_COLD;
   ~LogMessageDebugFatal();
 };
@@ -386,7 +385,7 @@
   // DLOG(QFATAL) calls this instead of LogMessageQuietlyFatal to make sure the
   // destructor is not [[noreturn]] even if this is always FATAL as this is only
   // invoked when DLOG() is enabled.
-  LogMessageQuietlyDebugFatal(absl::Nonnull<const char*> file,
+  LogMessageQuietlyDebugFatal(const char* absl_nonnull file,
                               int line) ABSL_ATTRIBUTE_COLD;
   ~LogMessageQuietlyDebugFatal();
 };
@@ -394,10 +393,10 @@
 // Used for LOG(QFATAL) to make sure it's properly understood as [[noreturn]].
 class LogMessageQuietlyFatal final : public LogMessage {
  public:
-  LogMessageQuietlyFatal(absl::Nonnull<const char*> file,
+  LogMessageQuietlyFatal(const char* absl_nonnull file,
                          int line) ABSL_ATTRIBUTE_COLD;
-  LogMessageQuietlyFatal(absl::Nonnull<const char*> file, int line,
-                         absl::Nonnull<const char*> failure_msg)
+  LogMessageQuietlyFatal(const char* absl_nonnull file, int line,
+                         const char* absl_nonnull failure_msg)
       ABSL_ATTRIBUTE_COLD;
   [[noreturn]] ~LogMessageQuietlyFatal();
 };
diff --git a/absl/log/log_sink_registry.h b/absl/log/log_sink_registry.h
index 3aa3bf6..a3fa9a3 100644
--- a/absl/log/log_sink_registry.h
+++ b/absl/log/log_sink_registry.h
@@ -44,10 +44,10 @@
 // sink instead which writes them to `stderr`.
 //
 // Do not call these inside `absl::LogSink::Send`.
-inline void AddLogSink(absl::Nonnull<absl::LogSink*> sink) {
+inline void AddLogSink(absl::LogSink* absl_nonnull sink) {
   log_internal::AddLogSink(sink);
 }
-inline void RemoveLogSink(absl::Nonnull<absl::LogSink*> sink) {
+inline void RemoveLogSink(absl::LogSink* absl_nonnull sink) {
   log_internal::RemoveLogSink(sink);
 }
 
diff --git a/absl/random/bit_gen_ref.h b/absl/random/bit_gen_ref.h
index fa36eb7..dfce2c4 100644
--- a/absl/random/bit_gen_ref.h
+++ b/absl/random/bit_gen_ref.h
@@ -31,7 +31,7 @@
 
 #include "absl/base/attributes.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/meta/type_traits.h"
 #include "absl/random/internal/distribution_caller.h"
 #include "absl/random/internal/fast_uniform_bits.h"
@@ -100,7 +100,7 @@
 
   template <class T>
   using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
-      std::declval<base_internal::FastTypeIdType>(), std::declval<void*>(),
+      std::declval<FastTypeIdType>(), std::declval<void*>(),
       std::declval<void*>()));
 
   template <typename T>
@@ -145,8 +145,7 @@
 
  private:
   using impl_fn = result_type (*)(uintptr_t);
-  using mock_call_fn = bool (*)(uintptr_t, base_internal::FastTypeIdType, void*,
-                                void*);
+  using mock_call_fn = bool (*)(uintptr_t, FastTypeIdType, void*, void*);
 
   template <typename URBG>
   static result_type ImplFn(uintptr_t ptr) {
@@ -158,16 +157,16 @@
 
   // Get a type-erased InvokeMock pointer.
   template <typename URBG>
-  static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType key_id,
-                       void* result, void* arg_tuple) {
+  static bool MockCall(uintptr_t gen_ptr, FastTypeIdType key_id, void* result,
+                       void* arg_tuple) {
     return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(key_id, result,
                                                         arg_tuple);
   }
-  static bool NotAMock(uintptr_t, base_internal::FastTypeIdType, void*, void*) {
+  static bool NotAMock(uintptr_t, FastTypeIdType, void*, void*) {
     return false;
   }
 
-  inline bool InvokeMock(base_internal::FastTypeIdType key_id, void* args_tuple,
+  inline bool InvokeMock(FastTypeIdType key_id, void* args_tuple,
                          void* result) {
     if (mock_call_ == NotAMock) return false;  // avoids an indirect call.
     return mock_call_(t_erased_gen_ptr_, key_id, args_tuple, result);
diff --git a/absl/random/bit_gen_ref_test.cc b/absl/random/bit_gen_ref_test.cc
index 00876d9..d581352 100644
--- a/absl/random/bit_gen_ref_test.cc
+++ b/absl/random/bit_gen_ref_test.cc
@@ -22,7 +22,7 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/random/internal/sequence_urbg.h"
 #include "absl/random/random.h"
 
@@ -39,7 +39,7 @@
   result_type operator()() { return 1; }
 
   // InvokeMock method
-  bool InvokeMock(base_internal::FastTypeIdType, void*, void* result) {
+  bool InvokeMock(FastTypeIdType, void*, void* result) {
     *static_cast<int*>(result) = 42;
     return true;
   }
diff --git a/absl/random/internal/distribution_caller.h b/absl/random/internal/distribution_caller.h
index bfe8fac..e84ec8c 100644
--- a/absl/random/internal/distribution_caller.h
+++ b/absl/random/internal/distribution_caller.h
@@ -22,7 +22,7 @@
 #include <utility>
 
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/meta/type_traits.h"
 #include "absl/utility/utility.h"
 
@@ -50,7 +50,7 @@
 
   template <class T>
   using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
-      std::declval<base_internal::FastTypeIdType>(), std::declval<void*>(),
+      std::declval<FastTypeIdType>(), std::declval<void*>(),
       std::declval<void*>()));
 
   using HasInvokeMock = typename detector<invoke_mock_t, void, URBG>::type;
@@ -74,8 +74,7 @@
 
     ArgTupleT arg_tuple(std::forward<Args>(args)...);
     ResultT result;
-    if (!urbg->InvokeMock(base_internal::FastTypeId<KeyT>(), &arg_tuple,
-                          &result)) {
+    if (!urbg->InvokeMock(FastTypeId<KeyT>(), &arg_tuple, &result)) {
       auto dist = absl::make_from_tuple<DistrT>(arg_tuple);
       result = dist(*urbg);
     }
diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h
index b78b251..85f7387 100644
--- a/absl/random/internal/mock_helpers.h
+++ b/absl/random/internal/mock_helpers.h
@@ -19,7 +19,7 @@
 #include <utility>
 
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/types/optional.h"
 
 namespace absl {
@@ -48,7 +48,7 @@
 //   result_type(args...)
 //
 class MockHelpers {
-  using IdType = ::absl::base_internal::FastTypeIdType;
+  using IdType = ::absl::FastTypeIdType;
 
   // Given a key signature type used to index the mock, extract the components.
   // KeyT is expected to have the form:
@@ -82,8 +82,7 @@
                                                 Args&&... args) {
     ArgTupleT arg_tuple(std::forward<Args>(args)...);
     ReturnT result;
-    if (urbg->InvokeMock(base_internal::FastTypeId<KeyT>(), &arg_tuple,
-                         &result)) {
+    if (urbg->InvokeMock(FastTypeId<KeyT>(), &arg_tuple, &result)) {
       return result;
     }
     return absl::nullopt;
@@ -138,7 +137,7 @@
           m, std::declval<IdType>(), ValidatorT())) {
     return m.template RegisterMock<typename KeySignature<KeyT>::result_type,
                                    typename KeySignature<KeyT>::arg_tuple_type>(
-        m, ::absl::base_internal::FastTypeId<KeyT>(), ValidatorT());
+        m, ::absl::FastTypeId<KeyT>(), ValidatorT());
   }
 
   // Acquire a mock for the KeyT (may or may not be a signature).
diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h
index 7cecd88..1680ff4 100644
--- a/absl/random/mocking_bit_gen.h
+++ b/absl/random/mocking_bit_gen.h
@@ -35,7 +35,7 @@
 
 #include "gmock/gmock.h"
 #include "absl/base/config.h"
-#include "absl/base/internal/fast_type_id.h"
+#include "absl/base/fast_type_id.h"
 #include "absl/container/flat_hash_map.h"
 #include "absl/meta/type_traits.h"
 #include "absl/random/internal/mock_helpers.h"
@@ -175,7 +175,7 @@
   // distribution parameters of the expectation.
   template <typename ResultT, typename ArgTupleT, typename SelfT,
             typename ValidatorT>
-  auto RegisterMock(SelfT&, base_internal::FastTypeIdType type, ValidatorT)
+  auto RegisterMock(SelfT&, FastTypeIdType type, ValidatorT)
       -> decltype(GetMockFnType(std::declval<ResultT>(),
                                 std::declval<ArgTupleT>()))& {
     using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
@@ -212,7 +212,7 @@
   // Requires tuple_args to point to a ArgTupleT, which is a std::tuple<Args...>
   // used to invoke the mock function.
   // Requires result to point to a ResultT, which is the result of the call.
-  inline bool InvokeMock(base_internal::FastTypeIdType key_id, void* args_tuple,
+  inline bool InvokeMock(FastTypeIdType key_id, void* args_tuple,
                          void* result) {
     // Trigger a mock, if there exists one that matches `param`.
     auto it = mocks_.find(key_id);
@@ -221,9 +221,7 @@
     return true;
   }
 
-  absl::flat_hash_map<base_internal::FastTypeIdType,
-                      std::unique_ptr<FunctionHolder>>
-      mocks_;
+  absl::flat_hash_map<FastTypeIdType, std::unique_ptr<FunctionHolder>> mocks_;
   absl::BitGen gen_;
 
   template <typename>
diff --git a/absl/status/internal/status_internal.cc b/absl/status/internal/status_internal.cc
index 99bf8fa..9884189 100644
--- a/absl/status/internal/status_internal.cc
+++ b/absl/status/internal/status_internal.cc
@@ -189,7 +189,7 @@
   return true;
 }
 
-absl::Nonnull<StatusRep*> StatusRep::CloneAndUnref() const {
+StatusRep* absl_nonnull StatusRep::CloneAndUnref() const {
   // Optimization: no need to create a clone if we already have a refcount of 1.
   if (ref_.load(std::memory_order_acquire) == 1) {
     // All StatusRep instances are heap allocated and mutable, therefore this
@@ -235,9 +235,8 @@
   }
 }
 
-absl::Nonnull<const char*> MakeCheckFailString(
-    absl::Nonnull<const absl::Status*> status,
-    absl::Nonnull<const char*> prefix) {
+const char* absl_nonnull MakeCheckFailString(
+    const absl::Status* absl_nonnull status, const char* absl_nonnull prefix) {
   // There's no need to free this string since the process is crashing.
   return absl::IgnoreLeak(
              new std::string(absl::StrCat(
diff --git a/absl/status/internal/status_internal.h b/absl/status/internal/status_internal.h
index fe335b0..45b90f3 100644
--- a/absl/status/internal/status_internal.h
+++ b/absl/status/internal/status_internal.h
@@ -100,7 +100,7 @@
   // Returns an equivalent heap allocated StatusRep with refcount 1.
   //
   // `this` is not safe to be used after calling as it may have been deleted.
-  absl::Nonnull<StatusRep*> CloneAndUnref() const;
+  StatusRep* absl_nonnull CloneAndUnref() const;
 
  private:
   mutable std::atomic<int32_t> ref_;
@@ -120,9 +120,8 @@
 //
 // This is an internal implementation detail for Abseil logging.
 ABSL_ATTRIBUTE_PURE_FUNCTION
-absl::Nonnull<const char*> MakeCheckFailString(
-    absl::Nonnull<const absl::Status*> status,
-    absl::Nonnull<const char*> prefix);
+const char* absl_nonnull MakeCheckFailString(
+    const absl::Status* absl_nonnull status, const char* absl_nonnull prefix);
 
 }  // namespace status_internal
 
diff --git a/absl/status/internal/statusor_internal.h b/absl/status/internal/statusor_internal.h
index 6760315..ca7c550 100644
--- a/absl/status/internal/statusor_internal.h
+++ b/absl/status/internal/statusor_internal.h
@@ -185,7 +185,7 @@
 class Helper {
  public:
   // Move type-agnostic error handling to the .cc.
-  static void HandleInvalidStatusCtorArg(absl::Nonnull<Status*>);
+  static void HandleInvalidStatusCtorArg(Status* absl_nonnull);
   [[noreturn]] static void Crash(const absl::Status& status);
 };
 
@@ -194,7 +194,7 @@
 // This abstraction is here mostly for the gcc performance fix.
 template <typename T, typename... Args>
 ABSL_ATTRIBUTE_NONNULL(1)
-void PlacementNew(absl::Nonnull<void*> p, Args&&... args) {
+void PlacementNew(void* absl_nonnull p, Args&&... args) {
   new (p) T(std::forward<Args>(args)...);
 }
 
diff --git a/absl/status/status.cc b/absl/status/status.cc
index 72a2526..963dab6 100644
--- a/absl/status/status.cc
+++ b/absl/status/status.cc
@@ -91,12 +91,12 @@
   return os << StatusCodeToString(code);
 }
 
-absl::Nonnull<const std::string*> Status::EmptyString() {
+const std::string* absl_nonnull Status::EmptyString() {
   static const absl::NoDestructor<std::string> kEmpty;
   return kEmpty.get();
 }
 
-absl::Nonnull<const std::string*> Status::MovedFromString() {
+const std::string* absl_nonnull Status::MovedFromString() {
   static const absl::NoDestructor<std::string> kMovedFrom(kMovedFromString);
   return kMovedFrom.get();
 }
@@ -108,7 +108,7 @@
   }
 }
 
-absl::Nonnull<status_internal::StatusRep*> Status::PrepareToModify(
+status_internal::StatusRep* absl_nonnull Status::PrepareToModify(
     uintptr_t rep) {
   if (IsInlined(rep)) {
     return new status_internal::StatusRep(InlinedRepToCode(rep),
@@ -406,7 +406,7 @@
                 MessageForErrnoToStatus(error_number, message));
 }
 
-absl::Nonnull<const char*> StatusMessageAsCStr(const Status& status) {
+const char* absl_nonnull StatusMessageAsCStr(const Status& status) {
   // As an internal implementation detail, we guarantee that if status.message()
   // is non-empty, then the resulting string_view is null terminated.
   auto sv_message = status.message();
diff --git a/absl/status/status.h b/absl/status/status.h
index 02fd296..4516822 100644
--- a/absl/status/status.h
+++ b/absl/status/status.h
@@ -623,15 +623,15 @@
 
   // REQUIRES: !ok()
   // Ensures rep is not inlined or shared with any other Status.
-  static absl::Nonnull<status_internal::StatusRep*> PrepareToModify(
+  static status_internal::StatusRep* absl_nonnull PrepareToModify(
       uintptr_t rep);
 
   // MSVC 14.0 limitation requires the const.
   static constexpr const char kMovedFromString[] =
       "Status accessed after move.";
 
-  static absl::Nonnull<const std::string*> EmptyString();
-  static absl::Nonnull<const std::string*> MovedFromString();
+  static const std::string* absl_nonnull EmptyString();
+  static const std::string* absl_nonnull MovedFromString();
 
   // Returns whether rep contains an inlined representation.
   // See rep_ for details.
@@ -649,8 +649,8 @@
 
   // Converts between StatusRep* and the external uintptr_t representation used
   // by rep_. See rep_ for details.
-  static uintptr_t PointerToRep(absl::Nonnull<status_internal::StatusRep*> r);
-  static absl::Nonnull<const status_internal::StatusRep*> RepToPointer(
+  static uintptr_t PointerToRep(status_internal::StatusRep* absl_nonnull r);
+  static const status_internal::StatusRep* absl_nonnull RepToPointer(
       uintptr_t r);
 
   static std::string ToStringSlow(uintptr_t rep, StatusToStringMode mode);
@@ -902,14 +902,14 @@
   return CodeToInlinedRep(absl::StatusCode::kInternal) | 2;
 }
 
-inline absl::Nonnull<const status_internal::StatusRep*> Status::RepToPointer(
+inline const status_internal::StatusRep* absl_nonnull Status::RepToPointer(
     uintptr_t rep) {
   assert(!IsInlined(rep));
   return reinterpret_cast<const status_internal::StatusRep*>(rep);
 }
 
 inline uintptr_t Status::PointerToRep(
-    absl::Nonnull<status_internal::StatusRep*> rep) {
+    status_internal::StatusRep* absl_nonnull rep) {
   return reinterpret_cast<uintptr_t>(rep);
 }
 
@@ -934,7 +934,7 @@
 // If the status's message is empty, the empty string is returned.
 //
 // StatusMessageAsCStr exists for C support. Use `status.message()` in C++.
-absl::Nonnull<const char*> StatusMessageAsCStr(
+const char* absl_nonnull StatusMessageAsCStr(
     const Status& status ABSL_ATTRIBUTE_LIFETIME_BOUND);
 
 ABSL_NAMESPACE_END
diff --git a/absl/status/statusor.cc b/absl/status/statusor.cc
index 7e6b334..d8f66a6 100644
--- a/absl/status/statusor.cc
+++ b/absl/status/statusor.cc
@@ -55,7 +55,7 @@
 BadStatusOrAccess::BadStatusOrAccess(BadStatusOrAccess&& other)
     : status_(std::move(other.status_)) {}
 
-absl::Nonnull<const char*> BadStatusOrAccess::what() const noexcept {
+const char* absl_nonnull BadStatusOrAccess::what() const noexcept {
   InitWhat();
   return what_.c_str();
 }
@@ -70,7 +70,7 @@
 
 namespace internal_statusor {
 
-void Helper::HandleInvalidStatusCtorArg(absl::Nonnull<absl::Status*> status) {
+void Helper::HandleInvalidStatusCtorArg(absl::Status* absl_nonnull status) {
   const char* kMessage =
       "An OK status is not a valid constructor argument to StatusOr<T>";
 #ifdef NDEBUG
diff --git a/absl/status/statusor.h b/absl/status/statusor.h
index 322d448..5257af0 100644
--- a/absl/status/statusor.h
+++ b/absl/status/statusor.h
@@ -93,7 +93,7 @@
   //
   // The pointer of this string is guaranteed to be valid until any non-const
   // function is invoked on the exception object.
-  absl::Nonnull<const char*> what() const noexcept override;
+  const char* absl_nonnull what() const noexcept override;
 
   // BadStatusOrAccess::status()
   //
@@ -756,13 +756,13 @@
 }
 
 template <typename T>
-absl::Nonnull<const T*> StatusOr<T>::operator->() const {
+const T* absl_nonnull StatusOr<T>::operator->() const {
   this->EnsureOk();
   return &this->data_;
 }
 
 template <typename T>
-absl::Nonnull<T*> StatusOr<T>::operator->() {
+T* absl_nonnull StatusOr<T>::operator->() {
   this->EnsureOk();
   return &this->data_;
 }
diff --git a/absl/strings/ascii.cc b/absl/strings/ascii.cc
index d15e424..4cd9ff9 100644
--- a/absl/strings/ascii.cc
+++ b/absl/strings/ascii.cc
@@ -183,8 +183,8 @@
 }
 
 template <bool ToUpper, bool Naive>
-constexpr void AsciiStrCaseFoldImpl(absl::Nonnull<char*> dst,
-                                    absl::Nullable<const char*> src,
+constexpr void AsciiStrCaseFoldImpl(char* absl_nonnull dst,
+                                    const char* absl_nullable src,
                                     size_t size) {
   // The upper- and lowercase versions of ASCII characters differ by only 1 bit.
   // When we need to flip the case, we can xor with this bit to achieve the
@@ -211,18 +211,18 @@
 // strings it's not important).
 // `src` may be null iff `size` is zero.
 template <bool ToUpper>
-constexpr void AsciiStrCaseFold(absl::Nonnull<char*> dst,
-                                absl::Nullable<const char*> src, size_t size) {
+constexpr void AsciiStrCaseFold(char* absl_nonnull dst,
+                                const char* absl_nullable src, size_t size) {
   size < 16 ? AsciiStrCaseFoldImpl<ToUpper, /*Naive=*/true>(dst, src, size)
             : AsciiStrCaseFoldImpl<ToUpper, /*Naive=*/false>(dst, src, size);
 }
 
-void AsciiStrToLower(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToLower(char* absl_nonnull dst, const char* absl_nullable src,
                      size_t n) {
   return AsciiStrCaseFold<false>(dst, src, n);
 }
 
-void AsciiStrToUpper(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToUpper(char* absl_nonnull dst, const char* absl_nullable src,
                      size_t n) {
   return AsciiStrCaseFold<true>(dst, src, n);
 }
@@ -253,17 +253,17 @@
 
 }  // namespace ascii_internal
 
-void AsciiStrToLower(absl::Nonnull<std::string*> s) {
+void AsciiStrToLower(std::string* absl_nonnull s) {
   char* p = &(*s)[0];
   return ascii_internal::AsciiStrCaseFold<false>(p, p, s->size());
 }
 
-void AsciiStrToUpper(absl::Nonnull<std::string*> s) {
+void AsciiStrToUpper(std::string* absl_nonnull s) {
   char* p = &(*s)[0];
   return ascii_internal::AsciiStrCaseFold<true>(p, p, s->size());
 }
 
-void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str) {
+void RemoveExtraAsciiWhitespace(std::string* absl_nonnull str) {
   auto stripped = StripAsciiWhitespace(*str);
 
   if (stripped.empty()) {
diff --git a/absl/strings/ascii.h b/absl/strings/ascii.h
index bf60bae..ca0747e 100644
--- a/absl/strings/ascii.h
+++ b/absl/strings/ascii.h
@@ -76,10 +76,10 @@
 // Declaration for the array of characters to lower-case characters.
 ABSL_DLL extern const char kToLower[256];
 
-void AsciiStrToLower(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToLower(char* absl_nonnull dst, const char* absl_nullable src,
                      size_t n);
 
-void AsciiStrToUpper(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
+void AsciiStrToUpper(char* absl_nonnull dst, const char* absl_nullable src,
                      size_t n);
 
 }  // namespace ascii_internal
@@ -185,7 +185,7 @@
 }
 
 // Converts the characters in `s` to lowercase, changing the contents of `s`.
-void AsciiStrToLower(absl::Nonnull<std::string*> s);
+void AsciiStrToLower(std::string* absl_nonnull s);
 
 // Creates a lowercase string from a given absl::string_view.
 [[nodiscard]] inline std::string AsciiStrToLower(absl::string_view s) {
@@ -214,7 +214,7 @@
 }
 
 // Converts the characters in `s` to uppercase, changing the contents of `s`.
-void AsciiStrToUpper(absl::Nonnull<std::string*> s);
+void AsciiStrToUpper(std::string* absl_nonnull s);
 
 // Creates an uppercase string from a given absl::string_view.
 [[nodiscard]] inline std::string AsciiStrToUpper(absl::string_view s) {
@@ -243,7 +243,7 @@
 }
 
 // Strips in place whitespace from the beginning of the given string.
-inline void StripLeadingAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripLeadingAsciiWhitespace(std::string* absl_nonnull str) {
   auto it = std::find_if_not(str->begin(), str->end(), absl::ascii_isspace);
   str->erase(str->begin(), it);
 }
@@ -257,7 +257,7 @@
 }
 
 // Strips in place whitespace from the end of the given string
-inline void StripTrailingAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripTrailingAsciiWhitespace(std::string* absl_nonnull str) {
   auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace);
   str->erase(static_cast<size_t>(str->rend() - it));
 }
@@ -270,13 +270,13 @@
 }
 
 // Strips in place whitespace from both ends of the given string
-inline void StripAsciiWhitespace(absl::Nonnull<std::string*> str) {
+inline void StripAsciiWhitespace(std::string* absl_nonnull str) {
   StripTrailingAsciiWhitespace(str);
   StripLeadingAsciiWhitespace(str);
 }
 
 // Removes leading, trailing, and consecutive internal whitespace.
-void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str);
+void RemoveExtraAsciiWhitespace(std::string* absl_nonnull str);
 
 ABSL_NAMESPACE_END
 }  // namespace absl
diff --git a/absl/strings/charconv.cc b/absl/strings/charconv.cc
index 681eb93..6f36773 100644
--- a/absl/strings/charconv.cc
+++ b/absl/strings/charconv.cc
@@ -120,7 +120,7 @@
   // Parsing a smaller N will produce something finite.
   static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
 
-  static double MakeNan(absl::Nonnull<const char*> tagp) {
+  static double MakeNan(const char* absl_nonnull tagp) {
 #if ABSL_HAVE_BUILTIN(__builtin_nan)
     // Use __builtin_nan() if available since it has a fix for
     // https://bugs.llvm.org/show_bug.cgi?id=37778
@@ -193,7 +193,7 @@
   static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18;
   static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
 
-  static float MakeNan(absl::Nonnull<const char*> tagp) {
+  static float MakeNan(const char* absl_nonnull tagp) {
 #if ABSL_HAVE_BUILTIN(__builtin_nanf)
     // Use __builtin_nanf() if available since it has a fix for
     // https://bugs.llvm.org/show_bug.cgi?id=37778
@@ -345,7 +345,7 @@
 // `value` must be wider than the requested bit width.
 //
 // Returns the number of bits shifted.
-int TruncateToBitWidth(int bit_width, absl::Nonnull<uint128*> value) {
+int TruncateToBitWidth(int bit_width, uint128* absl_nonnull value) {
   const int current_bit_width = BitWidth(*value);
   const int shift = current_bit_width - bit_width;
   *value >>= shift;
@@ -357,7 +357,7 @@
 // the appropriate double, and returns true.
 template <typename FloatType>
 bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
-                    absl::Nonnull<FloatType*> value) {
+                    FloatType* absl_nonnull value) {
   if (input.type == strings_internal::FloatType::kNan) {
     // A bug in gcc would cause the compiler to optimize away the buffer we are
     // building below.  Declaring the buffer volatile avoids the issue, and has
@@ -403,8 +403,8 @@
 // number is stored in *value.
 template <typename FloatType>
 void EncodeResult(const CalculatedFloat& calculated, bool negative,
-                  absl::Nonnull<absl::from_chars_result*> result,
-                  absl::Nonnull<FloatType*> value) {
+                  absl::from_chars_result* absl_nonnull result,
+                  FloatType* absl_nonnull value) {
   if (calculated.exponent == kOverflow) {
     result->ec = std::errc::result_out_of_range;
     *value = negative ? -std::numeric_limits<FloatType>::max()
@@ -450,7 +450,7 @@
 // Zero and negative values of `shift` are accepted, in which case the word is
 // shifted left, as necessary.
 uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
-                            absl::Nonnull<bool*> output_exact) {
+                            bool* absl_nonnull output_exact) {
   if (shift <= 0) {
     *output_exact = input_exact;
     return static_cast<uint64_t>(value << -shift);
@@ -684,8 +684,7 @@
 // this function returns false) is both fast and correct.
 template <typename FloatType>
 bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
-                 absl::Nonnull<FloatType*> value,
-                 absl::Nonnull<std::errc*> ec) {
+                 FloatType* absl_nonnull value, std::errc* absl_nonnull ec) {
   uint64_t man = input.mantissa;
   int exp10 = input.exponent;
   if (exp10 < FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10) {
@@ -858,9 +857,9 @@
 }
 
 template <typename FloatType>
-from_chars_result FromCharsImpl(absl::Nonnull<const char*> first,
-                                absl::Nonnull<const char*> last,
-                                FloatType& value, chars_format fmt_flags) {
+from_chars_result FromCharsImpl(const char* absl_nonnull first,
+                                const char* absl_nonnull last, FloatType& value,
+                                chars_format fmt_flags) {
   from_chars_result result;
   result.ptr = first;  // overwritten on successful parse
   result.ec = std::errc();
@@ -945,14 +944,14 @@
 }
 }  // namespace
 
-from_chars_result from_chars(absl::Nonnull<const char*> first,
-                             absl::Nonnull<const char*> last, double& value,
+from_chars_result from_chars(const char* absl_nonnull first,
+                             const char* absl_nonnull last, double& value,
                              chars_format fmt) {
   return FromCharsImpl(first, last, value, fmt);
 }
 
-from_chars_result from_chars(absl::Nonnull<const char*> first,
-                             absl::Nonnull<const char*> last, float& value,
+from_chars_result from_chars(const char* absl_nonnull first,
+                             const char* absl_nonnull last, float& value,
                              chars_format fmt) {
   return FromCharsImpl(first, last, value, fmt);
 }
diff --git a/absl/strings/charconv.h b/absl/strings/charconv.h
index be25090..e5733f8 100644
--- a/absl/strings/charconv.h
+++ b/absl/strings/charconv.h
@@ -45,7 +45,7 @@
 // characters that were successfully parsed.  If none was found, `ptr` is set
 // to the `first` argument to from_chars.
 struct from_chars_result {
-  absl::Nonnull<const char*> ptr;
+  const char* absl_nonnull ptr;
   std::errc ec;
 };
 
@@ -77,13 +77,13 @@
 // format that strtod() accepts, except that a "0x" prefix is NOT matched.
 // (In particular, in `hex` mode, the input "0xff" results in the largest
 // matching pattern "0".)
-absl::from_chars_result from_chars(absl::Nonnull<const char*> first,
-                                   absl::Nonnull<const char*> last,
+absl::from_chars_result from_chars(const char* absl_nonnull first,
+                                   const char* absl_nonnull last,
                                    double& value,  // NOLINT
                                    chars_format fmt = chars_format::general);
 
-absl::from_chars_result from_chars(absl::Nonnull<const char*> first,
-                                   absl::Nonnull<const char*> last,
+absl::from_chars_result from_chars(const char* absl_nonnull first,
+                                   const char* absl_nonnull last,
                                    float& value,  // NOLINT
                                    chars_format fmt = chars_format::general);
 
diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc
index 287458a..e53f914 100644
--- a/absl/strings/cord.cc
+++ b/absl/strings/cord.cc
@@ -75,21 +75,19 @@
 using ::absl::cord_internal::kInlinedVectorSize;
 using ::absl::cord_internal::kMaxBytesToCopy;
 
-static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
-                     absl::Nonnull<std::ostream*> os, int indent = 0);
-static bool VerifyNode(absl::Nonnull<CordRep*> root,
-                       absl::Nonnull<CordRep*> start_node);
+static void DumpNode(CordRep* absl_nonnull nonnull_rep, bool include_data,
+                     std::ostream* absl_nonnull os, int indent = 0);
+static bool VerifyNode(CordRep* absl_nonnull root,
+                       CordRep* absl_nonnull start_node);
 
-static inline absl::Nullable<CordRep*> VerifyTree(
-    absl::Nullable<CordRep*> node) {
+static inline CordRep* absl_nullable VerifyTree(CordRep* absl_nullable node) {
   assert(node == nullptr || VerifyNode(node, node));
   static_cast<void>(&VerifyNode);
   return node;
 }
 
-static absl::Nonnull<CordRepFlat*> CreateFlat(absl::Nonnull<const char*> data,
-                                              size_t length,
-                                              size_t alloc_hint) {
+static CordRepFlat* absl_nonnull CreateFlat(const char* absl_nonnull data,
+                                            size_t length, size_t alloc_hint) {
   CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
   flat->length = length;
   memcpy(flat->Data(), data, length);
@@ -98,8 +96,8 @@
 
 // Creates a new flat or Btree out of the specified array.
 // The returned node has a refcount of 1.
-static absl::Nonnull<CordRep*> NewBtree(absl::Nonnull<const char*> data,
-                                        size_t length, size_t alloc_hint) {
+static CordRep* absl_nonnull NewBtree(const char* absl_nonnull data,
+                                      size_t length, size_t alloc_hint) {
   if (length <= kMaxFlatLength) {
     return CreateFlat(data, length, alloc_hint);
   }
@@ -112,8 +110,8 @@
 
 // Create a new tree out of the specified array.
 // The returned node has a refcount of 1.
-static absl::Nullable<CordRep*> NewTree(absl::Nullable<const char*> data,
-                                        size_t length, size_t alloc_hint) {
+static CordRep* absl_nullable NewTree(const char* absl_nullable data,
+                                      size_t length, size_t alloc_hint) {
   if (length == 0) return nullptr;
   return NewBtree(data, length, alloc_hint);
 }
@@ -121,7 +119,7 @@
 namespace cord_internal {
 
 void InitializeCordRepExternal(absl::string_view data,
-                               absl::Nonnull<CordRepExternal*> rep) {
+                               CordRepExternal* absl_nonnull rep) {
   assert(!data.empty());
   rep->length = data.size();
   rep->tag = EXTERNAL;
@@ -135,7 +133,7 @@
 // and not wasteful, we move the string into an external cord rep, preserving
 // the already allocated string contents.
 // Requires the provided string length to be larger than `kMaxInline`.
-static absl::Nonnull<CordRep*> CordRepFromString(std::string&& src) {
+static CordRep* absl_nonnull CordRepFromString(std::string&& src) {
   assert(src.length() > cord_internal::kMaxInline);
   if (
       // String is short: copy data to avoid external block overhead.
@@ -163,13 +161,12 @@
 // --------------------------------------------------------------------
 // Cord::InlineRep functions
 
-inline void Cord::InlineRep::set_data(absl::Nonnull<const char*> data,
-                                      size_t n) {
+inline void Cord::InlineRep::set_data(const char* absl_nonnull data, size_t n) {
   static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
   data_.set_inline_data(data, n);
 }
 
-inline absl::Nonnull<char*> Cord::InlineRep::set_data(size_t n) {
+inline char* absl_nonnull Cord::InlineRep::set_data(size_t n) {
   assert(n <= kMaxInline);
   ResetToEmpty();
   set_inline_size(n);
@@ -193,13 +190,13 @@
 
 // Returns `rep` converted into a CordRepBtree.
 // Directly returns `rep` if `rep` is already a CordRepBtree.
-static absl::Nonnull<CordRepBtree*> ForceBtree(CordRep* rep) {
+static CordRepBtree* absl_nonnull ForceBtree(CordRep* rep) {
   return rep->IsBtree()
              ? rep->btree()
              : CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
 }
 
-void Cord::InlineRep::AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTreeToInlined(CordRep* absl_nonnull tree,
                                           MethodIdentifier method) {
   assert(!is_tree());
   if (!data_.is_empty()) {
@@ -209,7 +206,7 @@
   EmplaceTree(tree, method);
 }
 
-void Cord::InlineRep::AppendTreeToTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTreeToTree(CordRep* absl_nonnull tree,
                                        MethodIdentifier method) {
   assert(is_tree());
   const CordzUpdateScope scope(data_.cordz_info(), method);
@@ -217,7 +214,7 @@
   SetTree(tree, scope);
 }
 
-void Cord::InlineRep::AppendTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::AppendTree(CordRep* absl_nonnull tree,
                                  MethodIdentifier method) {
   assert(tree != nullptr);
   assert(tree->length != 0);
@@ -229,7 +226,7 @@
   }
 }
 
-void Cord::InlineRep::PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTreeToInlined(CordRep* absl_nonnull tree,
                                            MethodIdentifier method) {
   assert(!is_tree());
   if (!data_.is_empty()) {
@@ -239,7 +236,7 @@
   EmplaceTree(tree, method);
 }
 
-void Cord::InlineRep::PrependTreeToTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTreeToTree(CordRep* absl_nonnull tree,
                                         MethodIdentifier method) {
   assert(is_tree());
   const CordzUpdateScope scope(data_.cordz_info(), method);
@@ -247,7 +244,7 @@
   SetTree(tree, scope);
 }
 
-void Cord::InlineRep::PrependTree(absl::Nonnull<CordRep*> tree,
+void Cord::InlineRep::PrependTree(CordRep* absl_nonnull tree,
                                   MethodIdentifier method) {
   assert(tree != nullptr);
   assert(tree->length != 0);
@@ -263,9 +260,10 @@
 // suitable leaf is found, the function will update the length field for all
 // nodes to account for the size increase. The append region address will be
 // written to region and the actual size increase will be written to size.
-static inline bool PrepareAppendRegion(
-    absl::Nonnull<CordRep*> root, absl::Nonnull<absl::Nullable<char*>*> region,
-    absl::Nonnull<size_t*> size, size_t max_length) {
+static inline bool PrepareAppendRegion(CordRep* absl_nonnull root,
+                                       char* absl_nullable* absl_nonnull region,
+                                       size_t* absl_nonnull size,
+                                       size_t max_length) {
   if (root->IsBtree() && root->refcount.IsOne()) {
     Span<char> span = root->btree()->GetAppendBuffer(max_length);
     if (!span.empty()) {
@@ -468,11 +466,11 @@
   CommitTree(root, rep, scope, method);
 }
 
-inline absl::Nonnull<CordRep*> Cord::TakeRep() const& {
+inline CordRep* absl_nonnull Cord::TakeRep() const& {
   return CordRep::Ref(contents_.tree());
 }
 
-inline absl::Nonnull<CordRep*> Cord::TakeRep() && {
+inline CordRep* absl_nonnull Cord::TakeRep() && {
   CordRep* rep = contents_.tree();
   contents_.clear();
   return rep;
@@ -530,7 +528,7 @@
   contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
 }
 
-static CordRep::ExtractResult ExtractAppendBuffer(absl::Nonnull<CordRep*> rep,
+static CordRep::ExtractResult ExtractAppendBuffer(CordRep* absl_nonnull rep,
                                                   size_t min_capacity) {
   switch (rep->tag) {
     case cord_internal::BTREE:
@@ -777,9 +775,9 @@
   return static_cast<int>(memcmp_res > 0) - static_cast<int>(memcmp_res < 0);
 }
 
-int CompareChunks(absl::Nonnull<absl::string_view*> lhs,
-                  absl::Nonnull<absl::string_view*> rhs,
-                  absl::Nonnull<size_t*> size_to_compare) {
+int CompareChunks(absl::string_view* absl_nonnull lhs,
+                  absl::string_view* absl_nonnull rhs,
+                  size_t* absl_nonnull size_to_compare) {
   size_t compared_size = std::min(lhs->size(), rhs->size());
   assert(*size_to_compare >= compared_size);
   *size_to_compare -= compared_size;
@@ -877,7 +875,7 @@
   SetCrcCordState(std::move(state));
 }
 
-absl::Nullable<const crc_internal::CrcCordState*> Cord::MaybeGetCrcCordState()
+const crc_internal::CrcCordState* absl_nullable Cord::MaybeGetCrcCordState()
     const {
   if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
     return nullptr;
@@ -895,8 +893,8 @@
 
 inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
                                  size_t size_to_compare) const {
-  auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
-                    absl::Nonnull<absl::string_view*> chunk) {
+  auto advance = [](Cord::ChunkIterator* absl_nonnull it,
+                    absl::string_view* absl_nonnull chunk) {
     if (!chunk->empty()) return true;
     ++*it;
     if (it->bytes_remaining_ == 0) return false;
@@ -926,8 +924,8 @@
 
 inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size,
                                  size_t size_to_compare) const {
-  auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
-                    absl::Nonnull<absl::string_view*> chunk) {
+  auto advance = [](Cord::ChunkIterator* absl_nonnull it,
+                    absl::string_view* absl_nonnull chunk) {
     if (!chunk->empty()) return true;
     ++*it;
     if (it->bytes_remaining_ == 0) return false;
@@ -1051,7 +1049,7 @@
   return s;
 }
 
-void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+void CopyCordToString(const Cord& src, std::string* absl_nonnull dst) {
   if (!src.contents_.is_tree()) {
     src.contents_.CopyTo(dst);
   } else {
@@ -1060,7 +1058,7 @@
   }
 }
 
-void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
+void AppendCordToString(const Cord& src, std::string* absl_nonnull dst) {
   const size_t cur_dst_size = dst->size();
   const size_t new_dst_size = cur_dst_size + src.size();
   absl::strings_internal::STLStringResizeUninitializedAmortized(dst,
@@ -1069,7 +1067,7 @@
   src.CopyToArrayImpl(append_ptr);
 }
 
-void Cord::CopyToArraySlowPath(absl::Nonnull<char*> dst) const {
+void Cord::CopyToArraySlowPath(char* absl_nonnull dst) const {
   assert(contents_.is_tree());
   absl::string_view fragment;
   if (GetFlatAux(contents_.tree(), &fragment) && !fragment.empty()) {
@@ -1395,8 +1393,8 @@
   return absl::string_view(new_buffer, total_size);
 }
 
-/* static */ bool Cord::GetFlatAux(absl::Nonnull<CordRep*> rep,
-                                   absl::Nonnull<absl::string_view*> fragment) {
+/* static */ bool Cord::GetFlatAux(CordRep* absl_nonnull rep,
+                                   absl::string_view* absl_nonnull fragment) {
   assert(rep != nullptr);
   if (rep->length == 0) {
     *fragment = absl::string_view();
@@ -1430,7 +1428,7 @@
 }
 
 /* static */ void Cord::ForEachChunkAux(
-    absl::Nonnull<absl::cord_internal::CordRep*> rep,
+    absl::cord_internal::CordRep* absl_nonnull rep,
     absl::FunctionRef<void(absl::string_view)> callback) {
   assert(rep != nullptr);
   if (rep->length == 0) return;
@@ -1455,8 +1453,8 @@
   }
 }
 
-static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
-                     absl::Nonnull<std::ostream*> os, int indent) {
+static void DumpNode(CordRep* absl_nonnull nonnull_rep, bool include_data,
+                     std::ostream* absl_nonnull os, int indent) {
   CordRep* rep = nonnull_rep;
   const int kIndentStep = 1;
   for (;;) {
@@ -1502,17 +1500,17 @@
   }
 }
 
-static std::string ReportError(absl::Nonnull<CordRep*> root,
-                               absl::Nonnull<CordRep*> node) {
+static std::string ReportError(CordRep* absl_nonnull root,
+                               CordRep* absl_nonnull node) {
   std::ostringstream buf;
   buf << "Error at node " << node << " in:";
   DumpNode(root, true, &buf);
   return buf.str();
 }
 
-static bool VerifyNode(absl::Nonnull<CordRep*> root,
-                       absl::Nonnull<CordRep*> start_node) {
-  absl::InlinedVector<absl::Nonnull<CordRep*>, 2> worklist;
+static bool VerifyNode(CordRep* absl_nonnull root,
+                       CordRep* absl_nonnull start_node) {
+  absl::InlinedVector<CordRep* absl_nonnull, 2> worklist;
   worklist.push_back(start_node);
   do {
     CordRep* node = worklist.back();
diff --git a/absl/strings/cord.h b/absl/strings/cord.h
index 1f8aafb..b50150f 100644
--- a/absl/strings/cord.h
+++ b/absl/strings/cord.h
@@ -102,8 +102,8 @@
 class CordTestPeer;
 template <typename Releaser>
 Cord MakeCordFromExternal(absl::string_view, Releaser&&);
-void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
-void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst);
+void CopyCordToString(const Cord& src, std::string* absl_nonnull dst);
+void AppendCordToString(const Cord& src, std::string* absl_nonnull dst);
 
 // Cord memory accounting modes
 enum class CordMemoryAccounting {
@@ -417,8 +417,7 @@
   // guarantee that pointers previously returned by `dst->data()` remain valid
   // even if `*dst` had enough capacity to hold `src`. If `*dst` is a new
   // object, prefer to simply use the conversion operator to `std::string`.
-  friend void CopyCordToString(const Cord& src,
-                               absl::Nonnull<std::string*> dst);
+  friend void CopyCordToString(const Cord& src, std::string* absl_nonnull dst);
 
   // AppendCordToString()
   //
@@ -430,7 +429,7 @@
   // `dst->data()`. If `*dst` is a new object, prefer to simply use the
   // conversion operator to `std::string`.
   friend void AppendCordToString(const Cord& src,
-                                 absl::Nonnull<std::string*> dst);
+                                 std::string* absl_nonnull dst);
 
   class CharIterator;
 
@@ -467,7 +466,7 @@
     using iterator_category = std::input_iterator_tag;
     using value_type = absl::string_view;
     using difference_type = ptrdiff_t;
-    using pointer = absl::Nonnull<const value_type*>;
+    using pointer = const value_type* absl_nonnull;
     using reference = value_type;
 
     ChunkIterator() = default;
@@ -488,13 +487,13 @@
     using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader;
 
     // Constructs a `begin()` iterator from `tree`.
-    explicit ChunkIterator(absl::Nonnull<cord_internal::CordRep*> tree);
+    explicit ChunkIterator(cord_internal::CordRep* absl_nonnull tree);
 
     // Constructs a `begin()` iterator from `cord`.
-    explicit ChunkIterator(absl::Nonnull<const Cord*> cord);
+    explicit ChunkIterator(const Cord* absl_nonnull cord);
 
     // Initializes this instance from a tree. Invoked by constructors.
-    void InitTree(absl::Nonnull<cord_internal::CordRep*> tree);
+    void InitTree(cord_internal::CordRep* absl_nonnull tree);
 
     // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than
     // `current_chunk_.size()`.
@@ -512,7 +511,7 @@
     // The current leaf, or `nullptr` if the iterator points to short data.
     // If the current chunk is a substring node, current_leaf_ points to the
     // underlying flat or external node.
-    absl::Nullable<absl::cord_internal::CordRep*> current_leaf_ = nullptr;
+    absl::cord_internal::CordRep* absl_nullable current_leaf_ = nullptr;
     // The number of bytes left in the `Cord` over which we are iterating.
     size_t bytes_remaining_ = 0;
 
@@ -569,13 +568,13 @@
     using iterator = ChunkIterator;
     using const_iterator = ChunkIterator;
 
-    explicit ChunkRange(absl::Nonnull<const Cord*> cord) : cord_(cord) {}
+    explicit ChunkRange(const Cord* absl_nonnull cord) : cord_(cord) {}
 
     ChunkIterator begin() const;
     ChunkIterator end() const;
 
    private:
-    absl::Nonnull<const Cord*> cord_;
+    const Cord* absl_nonnull cord_;
   };
 
   // Cord::Chunks()
@@ -628,7 +627,7 @@
     using iterator_category = std::input_iterator_tag;
     using value_type = char;
     using difference_type = ptrdiff_t;
-    using pointer = absl::Nonnull<const char*>;
+    using pointer = const char* absl_nonnull;
     using reference = const char&;
 
     CharIterator() = default;
@@ -642,7 +641,7 @@
     friend Cord;
 
    private:
-    explicit CharIterator(absl::Nonnull<const Cord*> cord)
+    explicit CharIterator(const Cord* absl_nonnull cord)
         : chunk_iterator_(cord) {}
 
     ChunkIterator chunk_iterator_;
@@ -654,14 +653,14 @@
   // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
   // number of bytes within the Cord; otherwise, behavior is undefined. It is
   // valid to pass `char_end()` and `0`.
-  static Cord AdvanceAndRead(absl::Nonnull<CharIterator*> it, size_t n_bytes);
+  static Cord AdvanceAndRead(CharIterator* absl_nonnull it, size_t n_bytes);
 
   // Cord::Advance()
   //
   // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
   // or equal to the number of bytes remaining within the Cord; otherwise,
   // behavior is undefined. It is valid to pass `char_end()` and `0`.
-  static void Advance(absl::Nonnull<CharIterator*> it, size_t n_bytes);
+  static void Advance(CharIterator* absl_nonnull it, size_t n_bytes);
 
   // Cord::ChunkRemaining()
   //
@@ -670,6 +669,13 @@
   // `it` must be dereferenceable.
   static absl::string_view ChunkRemaining(const CharIterator& it);
 
+  // Cord::Distance()
+  //
+  // Returns the distance between `first` and `last`, as if
+  // `std::distance(first, last)` was called.
+  static ptrdiff_t Distance(const CharIterator& first,
+                            const CharIterator& last);
+
   // Cord::char_begin()
   //
   // Returns an iterator to the first character of the `Cord`.
@@ -710,13 +716,13 @@
     using iterator = CharIterator;
     using const_iterator = CharIterator;
 
-    explicit CharRange(absl::Nonnull<const Cord*> cord) : cord_(cord) {}
+    explicit CharRange(const Cord* absl_nonnull cord) : cord_(cord) {}
 
     CharIterator begin() const;
     CharIterator end() const;
 
    private:
-    absl::Nonnull<const Cord*> cord_;
+    const Cord* absl_nonnull cord_;
   };
 
   // Cord::Chars()
@@ -775,7 +781,7 @@
   CharIterator Find(const absl::Cord& needle) const;
 
   // Supports absl::Cord as a sink object for absl::Format().
-  friend void AbslFormatFlush(absl::Nonnull<absl::Cord*> cord,
+  friend void AbslFormatFlush(absl::Cord* absl_nonnull cord,
                               absl::string_view part) {
     cord->Append(part);
   }
@@ -878,7 +884,7 @@
   }
 #endif
 
-  friend absl::Nullable<const CordzInfo*> GetCordzInfoForTesting(
+  friend const CordzInfo* absl_nullable GetCordzInfoForTesting(
       const Cord& cord);
 
   // Calls the provided function once for each cord chunk, in order.  Unlike
@@ -907,21 +913,21 @@
     InlineRep& operator=(InlineRep&& src) noexcept;
 
     explicit constexpr InlineRep(absl::string_view sv,
-                                 absl::Nullable<CordRep*> rep);
+                                 CordRep* absl_nullable rep);
 
-    void Swap(absl::Nonnull<InlineRep*> rhs);
+    void Swap(InlineRep* absl_nonnull rhs);
     size_t size() const;
     // Returns nullptr if holding pointer
-    absl::Nullable<const char*> data() const;
+    const char* absl_nullable data() const;
     // Discards pointer, if any
-    void set_data(absl::Nonnull<const char*> data, size_t n);
-    absl::Nonnull<char*> set_data(size_t n);  // Write data to the result
+    void set_data(const char* absl_nonnull data, size_t n);
+    char* absl_nonnull set_data(size_t n);  // Write data to the result
     // Returns nullptr if holding bytes
-    absl::Nullable<absl::cord_internal::CordRep*> tree() const;
-    absl::Nonnull<absl::cord_internal::CordRep*> as_tree() const;
-    absl::Nonnull<const char*> as_chars() const;
+    absl::cord_internal::CordRep* absl_nullable tree() const;
+    absl::cord_internal::CordRep* absl_nonnull as_tree() const;
+    const char* absl_nonnull as_chars() const;
     // Returns non-null iff was holding a pointer
-    absl::Nullable<absl::cord_internal::CordRep*> clear();
+    absl::cord_internal::CordRep* absl_nullable clear();
     // Converts to pointer if necessary.
     void reduce_size(size_t n);    // REQUIRES: holding data
     void remove_prefix(size_t n);  // REQUIRES: holding data
@@ -930,58 +936,56 @@
 
     // Creates a CordRepFlat instance from the current inlined data with `extra'
     // bytes of desired additional capacity.
-    absl::Nonnull<CordRepFlat*> MakeFlatWithExtraCapacity(size_t extra);
+    CordRepFlat* absl_nonnull MakeFlatWithExtraCapacity(size_t extra);
 
     // Sets the tree value for this instance. `rep` must not be null.
     // Requires the current instance to hold a tree, and a lock to be held on
     // any CordzInfo referenced by this instance. The latter is enforced through
     // the CordzUpdateScope argument. If the current instance is sampled, then
     // the CordzInfo instance is updated to reference the new `rep` value.
-    void SetTree(absl::Nonnull<CordRep*> rep, const CordzUpdateScope& scope);
+    void SetTree(CordRep* absl_nonnull rep, const CordzUpdateScope& scope);
 
     // Identical to SetTree(), except that `rep` is allowed to be null, in
     // which case the current instance is reset to an empty value.
-    void SetTreeOrEmpty(absl::Nullable<CordRep*> rep,
+    void SetTreeOrEmpty(CordRep* absl_nullable rep,
                         const CordzUpdateScope& scope);
 
     // Sets the tree value for this instance, and randomly samples this cord.
     // This function disregards existing contents in `data_`, and should be
     // called when a Cord is 'promoted' from an 'uninitialized' or 'inlined'
     // value to a non-inlined (tree / ring) value.
-    void EmplaceTree(absl::Nonnull<CordRep*> rep, MethodIdentifier method);
+    void EmplaceTree(CordRep* absl_nonnull rep, MethodIdentifier method);
 
     // Identical to EmplaceTree, except that it copies the parent stack from
     // the provided `parent` data if the parent is sampled.
-    void EmplaceTree(absl::Nonnull<CordRep*> rep, const InlineData& parent,
+    void EmplaceTree(CordRep* absl_nonnull rep, const InlineData& parent,
                      MethodIdentifier method);
 
     // Commits the change of a newly created, or updated `rep` root value into
     // this cord. `old_rep` indicates the old (inlined or tree) value of the
     // cord, and determines if the commit invokes SetTree() or EmplaceTree().
-    void CommitTree(absl::Nullable<const CordRep*> old_rep,
-                    absl::Nonnull<CordRep*> rep, const CordzUpdateScope& scope,
+    void CommitTree(const CordRep* absl_nullable old_rep,
+                    CordRep* absl_nonnull rep, const CordzUpdateScope& scope,
                     MethodIdentifier method);
 
-    void AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
+    void AppendTreeToInlined(CordRep* absl_nonnull tree,
                              MethodIdentifier method);
-    void AppendTreeToTree(absl::Nonnull<CordRep*> tree,
-                          MethodIdentifier method);
-    void AppendTree(absl::Nonnull<CordRep*> tree, MethodIdentifier method);
-    void PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
+    void AppendTreeToTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+    void AppendTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+    void PrependTreeToInlined(CordRep* absl_nonnull tree,
                               MethodIdentifier method);
-    void PrependTreeToTree(absl::Nonnull<CordRep*> tree,
-                           MethodIdentifier method);
-    void PrependTree(absl::Nonnull<CordRep*> tree, MethodIdentifier method);
+    void PrependTreeToTree(CordRep* absl_nonnull tree, MethodIdentifier method);
+    void PrependTree(CordRep* absl_nonnull tree, MethodIdentifier method);
 
     bool IsSame(const InlineRep& other) const { return data_ == other.data_; }
 
     // Copies the inline contents into `dst`. Assumes the cord is not empty.
-    void CopyTo(absl::Nonnull<std::string*> dst) const {
+    void CopyTo(std::string* absl_nonnull dst) const {
       data_.CopyInlineToString(dst);
     }
 
     // Copies the inline contents into `dst`. Assumes the cord is not empty.
-    void CopyToArray(absl::Nonnull<char*> dst) const;
+    void CopyToArray(char* absl_nonnull dst) const;
 
     bool is_tree() const { return data_.is_tree(); }
 
@@ -994,12 +998,12 @@
     }
 
     // Returns the profiled CordzInfo, or nullptr if not sampled.
-    absl::Nullable<absl::cord_internal::CordzInfo*> cordz_info() const {
+    absl::cord_internal::CordzInfo* absl_nullable cordz_info() const {
       return data_.cordz_info();
     }
 
     // Sets the profiled CordzInfo.
-    void set_cordz_info(absl::Nonnull<cord_internal::CordzInfo*> cordz_info) {
+    void set_cordz_info(cord_internal::CordzInfo* absl_nonnull cordz_info) {
       assert(cordz_info != nullptr);
       data_.set_cordz_info(cordz_info);
     }
@@ -1031,19 +1035,19 @@
   InlineRep contents_;
 
   // Helper for GetFlat() and TryFlat().
-  static bool GetFlatAux(absl::Nonnull<absl::cord_internal::CordRep*> rep,
-                         absl::Nonnull<absl::string_view*> fragment);
+  static bool GetFlatAux(absl::cord_internal::CordRep* absl_nonnull rep,
+                         absl::string_view* absl_nonnull fragment);
 
   // Helper for ForEachChunk().
   static void ForEachChunkAux(
-      absl::Nonnull<absl::cord_internal::CordRep*> rep,
+      absl::cord_internal::CordRep* absl_nonnull rep,
       absl::FunctionRef<void(absl::string_view)> callback);
 
   // The destructor for non-empty Cords.
   void DestroyCordSlow();
 
   // Out-of-line implementation of slower parts of logic.
-  void CopyToArraySlowPath(absl::Nonnull<char*> dst) const;
+  void CopyToArraySlowPath(char* absl_nonnull dst) const;
   int CompareSlowPath(absl::string_view rhs, size_t compared_size,
                       size_t size_to_compare) const;
   int CompareSlowPath(const Cord& rhs, size_t compared_size,
@@ -1060,8 +1064,8 @@
 
   // Returns a new reference to contents_.tree(), or steals an existing
   // reference if called on an rvalue.
-  absl::Nonnull<absl::cord_internal::CordRep*> TakeRep() const&;
-  absl::Nonnull<absl::cord_internal::CordRep*> TakeRep() &&;
+  absl::cord_internal::CordRep* absl_nonnull TakeRep() const&;
+  absl::cord_internal::CordRep* absl_nonnull TakeRep() &&;
 
   // Helper for Append().
   template <typename C>
@@ -1098,12 +1102,11 @@
 
   friend class CrcCord;
   void SetCrcCordState(crc_internal::CrcCordState state);
-  absl::Nullable<const crc_internal::CrcCordState*> MaybeGetCrcCordState()
-      const;
+  const crc_internal::CrcCordState* absl_nullable MaybeGetCrcCordState() const;
 
   CharIterator FindImpl(CharIterator it, absl::string_view needle) const;
 
-  void CopyToArrayImpl(absl::Nonnull<char*> dst) const;
+  void CopyToArrayImpl(char* absl_nonnull dst) const;
 };
 
 ABSL_NAMESPACE_END
@@ -1123,14 +1126,14 @@
 // Does non-template-specific `CordRepExternal` initialization.
 // Requires `data` to be non-empty.
 void InitializeCordRepExternal(absl::string_view data,
-                               absl::Nonnull<CordRepExternal*> rep);
+                               CordRepExternal* absl_nonnull rep);
 
 // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
 // to it. Requires `data` to be non-empty.
 template <typename Releaser>
 // NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-absl::Nonnull<CordRep*> NewExternalRep(absl::string_view data,
-                                       Releaser&& releaser) {
+CordRep* absl_nonnull NewExternalRep(absl::string_view data,
+                                     Releaser&& releaser) {
   assert(!data.empty());
   using ReleaserType = absl::decay_t<Releaser>;
   CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
@@ -1142,7 +1145,7 @@
 // Overload for function reference types that dispatches using a function
 // pointer because there are no `alignof()` or `sizeof()` a function reference.
 // NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
-inline absl::Nonnull<CordRep*> NewExternalRep(
+inline CordRep* absl_nonnull NewExternalRep(
     absl::string_view data, void (&releaser)(absl::string_view)) {
   return NewExternalRep(data, &releaser);
 }
@@ -1166,7 +1169,7 @@
 }
 
 constexpr Cord::InlineRep::InlineRep(absl::string_view sv,
-                                     absl::Nullable<CordRep*> rep)
+                                     CordRep* absl_nullable rep)
     : data_(sv, rep) {}
 
 inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src)
@@ -1205,7 +1208,7 @@
   return *this;
 }
 
-inline void Cord::InlineRep::Swap(absl::Nonnull<Cord::InlineRep*> rhs) {
+inline void Cord::InlineRep::Swap(Cord::InlineRep* absl_nonnull rhs) {
   if (rhs == this) {
     return;
   }
@@ -1213,22 +1216,22 @@
   swap(data_, rhs->data_);
 }
 
-inline absl::Nullable<const char*> Cord::InlineRep::data() const {
+inline const char* absl_nullable Cord::InlineRep::data() const {
   return is_tree() ? nullptr : data_.as_chars();
 }
 
-inline absl::Nonnull<const char*> Cord::InlineRep::as_chars() const {
+inline const char* absl_nonnull Cord::InlineRep::as_chars() const {
   assert(!data_.is_tree());
   return data_.as_chars();
 }
 
-inline absl::Nonnull<absl::cord_internal::CordRep*> Cord::InlineRep::as_tree()
+inline absl::cord_internal::CordRep* absl_nonnull Cord::InlineRep::as_tree()
     const {
   assert(data_.is_tree());
   return data_.as_tree();
 }
 
-inline absl::Nullable<absl::cord_internal::CordRep*> Cord::InlineRep::tree()
+inline absl::cord_internal::CordRep* absl_nullable Cord::InlineRep::tree()
     const {
   if (is_tree()) {
     return as_tree();
@@ -1241,7 +1244,7 @@
   return is_tree() ? as_tree()->length : inline_size();
 }
 
-inline absl::Nonnull<cord_internal::CordRepFlat*>
+inline cord_internal::CordRepFlat* absl_nonnull
 Cord::InlineRep::MakeFlatWithExtraCapacity(size_t extra) {
   static_assert(cord_internal::kMinFlatLength >= sizeof(data_), "");
   size_t len = data_.inline_size();
@@ -1251,21 +1254,21 @@
   return result;
 }
 
-inline void Cord::InlineRep::EmplaceTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::EmplaceTree(CordRep* absl_nonnull rep,
                                          MethodIdentifier method) {
   assert(rep);
   data_.make_tree(rep);
   CordzInfo::MaybeTrackCord(data_, method);
 }
 
-inline void Cord::InlineRep::EmplaceTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::EmplaceTree(CordRep* absl_nonnull rep,
                                          const InlineData& parent,
                                          MethodIdentifier method) {
   data_.make_tree(rep);
   CordzInfo::MaybeTrackCord(data_, parent, method);
 }
 
-inline void Cord::InlineRep::SetTree(absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::SetTree(CordRep* absl_nonnull rep,
                                      const CordzUpdateScope& scope) {
   assert(rep);
   assert(data_.is_tree());
@@ -1273,7 +1276,7 @@
   scope.SetCordRep(rep);
 }
 
-inline void Cord::InlineRep::SetTreeOrEmpty(absl::Nullable<CordRep*> rep,
+inline void Cord::InlineRep::SetTreeOrEmpty(CordRep* absl_nullable rep,
                                             const CordzUpdateScope& scope) {
   assert(data_.is_tree());
   if (rep) {
@@ -1284,8 +1287,8 @@
   scope.SetCordRep(rep);
 }
 
-inline void Cord::InlineRep::CommitTree(absl::Nullable<const CordRep*> old_rep,
-                                        absl::Nonnull<CordRep*> rep,
+inline void Cord::InlineRep::CommitTree(const CordRep* absl_nullable old_rep,
+                                        CordRep* absl_nonnull rep,
                                         const CordzUpdateScope& scope,
                                         MethodIdentifier method) {
   if (old_rep) {
@@ -1295,7 +1298,7 @@
   }
 }
 
-inline absl::Nullable<absl::cord_internal::CordRep*> Cord::InlineRep::clear() {
+inline absl::cord_internal::CordRep* absl_nullable Cord::InlineRep::clear() {
   if (is_tree()) {
     CordzInfo::MaybeUntrackCord(cordz_info());
   }
@@ -1304,7 +1307,7 @@
   return result;
 }
 
-inline void Cord::InlineRep::CopyToArray(absl::Nonnull<char*> dst) const {
+inline void Cord::InlineRep::CopyToArray(char* absl_nonnull dst) const {
   assert(!is_tree());
   size_t n = inline_size();
   assert(n != 0);
@@ -1488,7 +1491,7 @@
   return EqualsImpl(rhs, rhs_size);
 }
 
-inline void Cord::CopyToArrayImpl(absl::Nonnull<char*> dst) const {
+inline void Cord::CopyToArrayImpl(char* absl_nonnull dst) const {
   if (!contents_.is_tree()) {
     if (!empty()) contents_.CopyToArray(dst);
   } else {
@@ -1497,7 +1500,7 @@
 }
 
 inline void Cord::ChunkIterator::InitTree(
-    absl::Nonnull<cord_internal::CordRep*> tree) {
+    cord_internal::CordRep* absl_nonnull tree) {
   tree = cord_internal::SkipCrcNode(tree);
   if (tree->tag == cord_internal::BTREE) {
     current_chunk_ = btree_reader_.Init(tree->btree());
@@ -1508,12 +1511,12 @@
 }
 
 inline Cord::ChunkIterator::ChunkIterator(
-    absl::Nonnull<cord_internal::CordRep*> tree) {
+    cord_internal::CordRep* absl_nonnull tree) {
   bytes_remaining_ = tree->length;
   InitTree(tree);
 }
 
-inline Cord::ChunkIterator::ChunkIterator(absl::Nonnull<const Cord*> cord) {
+inline Cord::ChunkIterator::ChunkIterator(const Cord* absl_nonnull cord) {
   if (CordRep* tree = cord->contents_.tree()) {
     bytes_remaining_ = tree->length;
     if (ABSL_PREDICT_TRUE(bytes_remaining_ != 0)) {
@@ -1649,13 +1652,13 @@
   return *chunk_iterator_->data();
 }
 
-inline Cord Cord::AdvanceAndRead(absl::Nonnull<CharIterator*> it,
+inline Cord Cord::AdvanceAndRead(CharIterator* absl_nonnull it,
                                  size_t n_bytes) {
   assert(it != nullptr);
   return it->chunk_iterator_.AdvanceAndReadBytes(n_bytes);
 }
 
-inline void Cord::Advance(absl::Nonnull<CharIterator*> it, size_t n_bytes) {
+inline void Cord::Advance(CharIterator* absl_nonnull it, size_t n_bytes) {
   assert(it != nullptr);
   it->chunk_iterator_.AdvanceBytes(n_bytes);
 }
@@ -1664,6 +1667,12 @@
   return *it.chunk_iterator_;
 }
 
+inline ptrdiff_t Cord::Distance(const CharIterator& first,
+                                const CharIterator& last) {
+  return static_cast<ptrdiff_t>(first.chunk_iterator_.bytes_remaining_ -
+                                last.chunk_iterator_.bytes_remaining_);
+}
+
 inline Cord::CharIterator Cord::char_begin() const {
   return CharIterator(this);
 }
diff --git a/absl/strings/cord_analysis.cc b/absl/strings/cord_analysis.cc
index 19b0fa4..dcbc826 100644
--- a/absl/strings/cord_analysis.cc
+++ b/absl/strings/cord_analysis.cc
@@ -39,15 +39,15 @@
 template <Mode mode>
 struct CordRepRef {
   // Instantiates a CordRepRef instance.
-  explicit CordRepRef(absl::Nonnull<const CordRep*> r) : rep(r) {}
+  explicit CordRepRef(const CordRep* absl_nonnull r) : rep(r) {}
 
   // Creates a child reference holding the provided child.
   // Overloaded to add cumulative reference count for kFairShare.
-  CordRepRef Child(absl::Nonnull<const CordRep*> child) const {
+  CordRepRef Child(const CordRep* absl_nonnull child) const {
     return CordRepRef(child);
   }
 
-  absl::Nonnull<const CordRep*> rep;
+  const CordRep* absl_nonnull rep;
 };
 
 // RawUsage holds the computed total number of bytes.
@@ -66,7 +66,7 @@
 struct RawUsage<Mode::kTotalMorePrecise> {
   size_t total = 0;
   // TODO(b/289250880): Replace this with a flat_hash_set.
-  std::unordered_set<absl::Nonnull<const CordRep*>> counted;
+  std::unordered_set<const CordRep* absl_nonnull> counted;
 
   void Add(size_t size, CordRepRef<Mode::kTotalMorePrecise> repref) {
     if (counted.insert(repref.rep).second) {
@@ -90,15 +90,15 @@
 template <>
 struct CordRepRef<Mode::kFairShare> {
   // Creates a CordRepRef with the provided rep and top (parent) fraction.
-  explicit CordRepRef(absl::Nonnull<const CordRep*> r, double frac = 1.0)
+  explicit CordRepRef(const CordRep* absl_nonnull r, double frac = 1.0)
       : rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {}
 
   // Returns a CordRepRef with a fraction of `this->fraction / child.refcount`
-  CordRepRef Child(absl::Nonnull<const CordRep*> child) const {
+  CordRepRef Child(const CordRep* absl_nonnull child) const {
     return CordRepRef(child, fraction);
   }
 
-  absl::Nonnull<const CordRep*> rep;
+  const CordRep* absl_nonnull rep;
   double fraction;
 };
 
@@ -150,7 +150,7 @@
 }
 
 template <Mode mode>
-size_t GetEstimatedUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedUsage(const CordRep* absl_nonnull rep) {
   // Zero initialized memory usage totals.
   RawUsage<mode> raw_usage;
 
@@ -179,15 +179,15 @@
 
 }  // namespace
 
-size_t GetEstimatedMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedMemoryUsage(const CordRep* absl_nonnull rep) {
   return GetEstimatedUsage<Mode::kTotal>(rep);
 }
 
-size_t GetEstimatedFairShareMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* absl_nonnull rep) {
   return GetEstimatedUsage<Mode::kFairShare>(rep);
 }
 
-size_t GetMorePreciseMemoryUsage(absl::Nonnull<const CordRep*> rep) {
+size_t GetMorePreciseMemoryUsage(const CordRep* absl_nonnull rep) {
   return GetEstimatedUsage<Mode::kTotalMorePrecise>(rep);
 }
 
diff --git a/absl/strings/cord_analysis.h b/absl/strings/cord_analysis.h
index f8ce348..db50f3a 100644
--- a/absl/strings/cord_analysis.h
+++ b/absl/strings/cord_analysis.h
@@ -29,7 +29,7 @@
 // Returns the *approximate* number of bytes held in full or in part by this
 // Cord (which may not remain the same between invocations). Cords that share
 // memory could each be "charged" independently for the same shared memory.
-size_t GetEstimatedMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetEstimatedMemoryUsage(const CordRep* absl_nonnull rep);
 
 // Returns the *approximate* number of bytes held in full or in part by this
 // Cord for the distinct memory held by this cord. This is similar to
@@ -47,13 +47,13 @@
 //
 // This is more expensive than `GetEstimatedMemoryUsage()` as it requires
 // deduplicating all memory references.
-size_t GetMorePreciseMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetMorePreciseMemoryUsage(const CordRep* absl_nonnull rep);
 
 // Returns the *approximate* number of bytes held in full or in part by this
 // CordRep weighted by the sharing ratio of that data. For example, if some data
 // edge is shared by 4 different Cords, then each cord is attribute 1/4th of
 // the total memory usage as a 'fair share' of the total memory usage.
-size_t GetEstimatedFairShareMemoryUsage(absl::Nonnull<const CordRep*> rep);
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* absl_nonnull rep);
 
 }  // namespace cord_internal
 ABSL_NAMESPACE_END
diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc
index 993e586..55007bb 100644
--- a/absl/strings/cord_test.cc
+++ b/absl/strings/cord_test.cc
@@ -2518,6 +2518,10 @@
   absl::Cord::CharRange range = cord.Chars();
   EXPECT_EQ(range.begin() == range.end(), cord.empty());
   EXPECT_EQ(range.begin() != range.end(), !cord.empty());
+  EXPECT_EQ(absl::Cord::Distance(range.begin(), range.end()),
+            static_cast<ptrdiff_t>(cord.size()));
+  EXPECT_EQ(absl::Cord::Distance(range.end(), range.begin()),
+            -static_cast<ptrdiff_t>(cord.size()));
 
   size_t i = 0;
   absl::Cord::CharIterator pre_iter = cord.char_begin();
@@ -2548,19 +2552,29 @@
     absl::Cord::CharIterator advance_iter = range.begin();
     absl::Cord::Advance(&advance_iter, i);
     EXPECT_EQ(pre_iter, advance_iter);
+    EXPECT_EQ(absl::Cord::Distance(range.begin(), advance_iter),
+              static_cast<ptrdiff_t>(i));
 
     advance_iter = range.begin();
     EXPECT_EQ(absl::Cord::AdvanceAndRead(&advance_iter, i), cord.Subcord(0, i));
     EXPECT_EQ(pre_iter, advance_iter);
+    EXPECT_EQ(absl::Cord::Distance(range.begin(), advance_iter),
+              static_cast<ptrdiff_t>(i));
 
     advance_iter = pre_iter;
     absl::Cord::Advance(&advance_iter, cord.size() - i);
     EXPECT_EQ(range.end(), advance_iter);
+    EXPECT_EQ(absl::Cord::Distance(range.begin(), advance_iter),
+              static_cast<ptrdiff_t>(cord.size()));
+    EXPECT_EQ(absl::Cord::Distance(advance_iter, range.end()), 0);
 
     advance_iter = pre_iter;
     EXPECT_EQ(absl::Cord::AdvanceAndRead(&advance_iter, cord.size() - i),
               cord.Subcord(i, cord.size() - i));
     EXPECT_EQ(range.end(), advance_iter);
+    EXPECT_EQ(absl::Cord::Distance(range.begin(), advance_iter),
+              static_cast<ptrdiff_t>(cord.size()));
+    EXPECT_EQ(absl::Cord::Distance(advance_iter, range.end()), 0);
 
     ++i;
     ++pre_iter;
@@ -2642,16 +2656,25 @@
 
   MaybeHarden(cord);
 
+
   for (size_t chunk_size :
        {kChunkSize1, kChunkSize2, kChunkSize3, kChunkSize4}) {
     absl::Cord::CharIterator it = cord.char_begin();
+    size_t it_remaining = cord.size();
+    size_t it_advanced = 0;
     size_t offset = 0;
     while (offset < data.length()) {
+      EXPECT_EQ(absl::Cord::Distance(it, cord.char_end()), it_remaining);
+      EXPECT_EQ(absl::Cord::Distance(cord.char_begin(), it), it_advanced);
       const size_t n = std::min<size_t>(data.length() - offset, chunk_size);
       absl::Cord chunk = cord.AdvanceAndRead(&it, n);
       ASSERT_EQ(chunk.size(), n);
       ASSERT_EQ(chunk.Compare(data.substr(offset, n)), 0);
       offset += n;
+      it_remaining -= n;
+      it_advanced += n;
+      EXPECT_EQ(absl::Cord::Distance(it, cord.char_end()), it_remaining);
+      EXPECT_EQ(absl::Cord::Distance(cord.char_begin(), it), it_advanced);
     }
   }
 }
diff --git a/absl/strings/cordz_test_helpers.h b/absl/strings/cordz_test_helpers.h
index 619f13c..9811709 100644
--- a/absl/strings/cordz_test_helpers.h
+++ b/absl/strings/cordz_test_helpers.h
@@ -34,7 +34,7 @@
 ABSL_NAMESPACE_BEGIN
 
 // Returns the CordzInfo for the cord, or nullptr if the cord is not sampled.
-inline absl::Nullable<const cord_internal::CordzInfo*> GetCordzInfoForTesting(
+inline const cord_internal::CordzInfo* absl_nullable GetCordzInfoForTesting(
     const Cord& cord) {
   if (!cord.contents_.is_tree()) return nullptr;
   return cord.contents_.cordz_info();
@@ -42,7 +42,7 @@
 
 // Returns true if the provided cordz_info is in the list of sampled cords.
 inline bool CordzInfoIsListed(
-    absl::Nonnull<const cord_internal::CordzInfo*> cordz_info,
+    const cord_internal::CordzInfo* absl_nonnull cordz_info,
     cord_internal::CordzSampleToken token = {}) {
   for (const cord_internal::CordzInfo& info : token) {
     if (cordz_info == &info) return true;
@@ -121,7 +121,7 @@
 
 // Wrapper struct managing a small CordRep `rep`
 struct TestCordRep {
-  absl::Nonnull<cord_internal::CordRepFlat*> rep;
+  cord_internal::CordRepFlat* absl_nonnull rep;
 
   TestCordRep() {
     rep = cord_internal::CordRepFlat::New(100);
diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc
index b70c504..f1953b4 100644
--- a/absl/strings/escaping.cc
+++ b/absl/strings/escaping.cc
@@ -59,7 +59,7 @@
 }
 
 inline bool IsSurrogate(char32_t c, absl::string_view src,
-                        absl::Nullable<std::string*> error) {
+                        std::string* absl_nullable error) {
   if (c >= 0xD800 && c <= 0xDFFF) {
     if (error) {
       *error = absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
@@ -76,49 +76,49 @@
 //
 //    Unescapes C escape sequences and is the reverse of CEscape().
 //
-//    If 'source' is valid, stores the unescaped string and its size in
-//    'dest' and 'dest_len' respectively, and returns true. Otherwise
-//    returns false and optionally stores the error description in
-//    'error'. Set 'error' to nullptr to disable error reporting.
+//    If `src` is valid, stores the unescaped string `dst`, and returns
+//    true. Otherwise returns false and optionally stores the error
+//    description in `error`. Set `error` to nullptr to disable error
+//    reporting.
 //
-//    'dest' should point to a buffer that is at least as big as 'source'.
-//    'source' and 'dest' may be the same.
-//
-//     NOTE: any changes to this function must also be reflected in the older
-//     UnescapeCEscapeSequences().
+//    `src` and `dst` may use the same underlying buffer.
 // ----------------------------------------------------------------------
-bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
-                       absl::Nonnull<char*> dest,
-                       absl::Nonnull<ptrdiff_t*> dest_len,
-                       absl::Nullable<std::string*> error) {
-  char* d = dest;
-  const char* p = source.data();
-  const char* end = p + source.size();
-  const char* last_byte = end - 1;
 
-  // Small optimization for case where source = dest and there's no escaping
-  while (p == d && p < end && *p != '\\') p++, d++;
+bool CUnescapeInternal(absl::string_view src, bool leave_nulls_escaped,
+                       std::string* absl_nonnull dst,
+                       std::string* absl_nullable error) {
+  strings_internal::STLStringResizeUninitialized(dst, src.size());
 
-  while (p < end) {
-    if (*p != '\\') {
-      *d++ = *p++;
+  absl::string_view::size_type p = 0;  // Current src position.
+  std::string::size_type d = 0;        // Current dst position.
+
+  // When unescaping in-place, skip any prefix that does not have escaping.
+  if (src.data() == dst->data()) {
+    while (p < src.size() && src[p] != '\\') p++, d++;
+  }
+
+  while (p < src.size()) {
+    if (src[p] != '\\') {
+      (*dst)[d++] = src[p++];
     } else {
-      if (++p > last_byte) {  // skip past the '\\'
-        if (error) *error = "String cannot end with \\";
+      if (++p >= src.size()) {  // skip past the '\\'
+        if (error != nullptr) {
+          *error = "String cannot end with \\";
+        }
         return false;
       }
-      switch (*p) {
-        case 'a':  *d++ = '\a';  break;
-        case 'b':  *d++ = '\b';  break;
-        case 'f':  *d++ = '\f';  break;
-        case 'n':  *d++ = '\n';  break;
-        case 'r':  *d++ = '\r';  break;
-        case 't':  *d++ = '\t';  break;
-        case 'v':  *d++ = '\v';  break;
-        case '\\': *d++ = '\\';  break;
-        case '?':  *d++ = '\?';  break;    // \?  Who knew?
-        case '\'': *d++ = '\'';  break;
-        case '"':  *d++ = '\"';  break;
+      switch (src[p]) {
+        case 'a':  (*dst)[d++] = '\a';  break;
+        case 'b':  (*dst)[d++] = '\b';  break;
+        case 'f':  (*dst)[d++] = '\f';  break;
+        case 'n':  (*dst)[d++] = '\n';  break;
+        case 'r':  (*dst)[d++] = '\r';  break;
+        case 't':  (*dst)[d++] = '\t';  break;
+        case 'v':  (*dst)[d++] = '\v';  break;
+        case '\\': (*dst)[d++] = '\\';  break;
+        case '?':  (*dst)[d++] = '\?';  break;
+        case '\'': (*dst)[d++] = '\'';  break;
+        case '"':  (*dst)[d++] = '\"';  break;
         case '0':
         case '1':
         case '2':
@@ -128,188 +128,170 @@
         case '6':
         case '7': {
           // octal digit: 1 to 3 digits
-          const char* octal_start = p;
-          unsigned int ch = static_cast<unsigned int>(*p - '0');  // digit 1
-          if (p < last_byte && is_octal_digit(p[1]))
-            ch = ch * 8 + static_cast<unsigned int>(*++p - '0');  // digit 2
-          if (p < last_byte && is_octal_digit(p[1]))
-            ch = ch * 8 + static_cast<unsigned int>(*++p - '0');  // digit 3
+          auto octal_start = p;
+          unsigned int ch = static_cast<unsigned int>(src[p] - '0');  // digit 1
+          if (p + 1 < src.size() && is_octal_digit(src[p + 1]))
+            ch = ch * 8 + static_cast<unsigned int>(src[++p] - '0');  // digit 2
+          if (p + 1 < src.size() && is_octal_digit(src[p + 1]))
+            ch = ch * 8 + static_cast<unsigned int>(src[++p] - '0');  // digit 3
           if (ch > 0xff) {
-            if (error) {
-              *error = "Value of \\" +
-                       std::string(octal_start,
-                                   static_cast<size_t>(p + 1 - octal_start)) +
-                       " exceeds 0xff";
+            if (error != nullptr) {
+              *error =
+                  "Value of \\" +
+                  std::string(src.substr(octal_start, p + 1 - octal_start)) +
+                  " exceeds 0xff";
             }
             return false;
           }
           if ((ch == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            const size_t octal_size = static_cast<size_t>(p + 1 - octal_start);
-            *d++ = '\\';
-            memmove(d, octal_start, octal_size);
-            d += octal_size;
+            (*dst)[d++] = '\\';
+            while (octal_start <= p) {
+              (*dst)[d++] = src[octal_start++];
+            }
             break;
           }
-          *d++ = static_cast<char>(ch);
+          (*dst)[d++] = static_cast<char>(ch);
           break;
         }
         case 'x':
         case 'X': {
-          if (p >= last_byte) {
-            if (error) *error = "String cannot end with \\x";
+          if (p + 1 >= src.size()) {
+            if (error != nullptr) {
+              *error = "String cannot end with \\x";
+            }
             return false;
-          } else if (!absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
-            if (error) *error = "\\x cannot be followed by a non-hex digit";
+          } else if (!absl::ascii_isxdigit(
+              static_cast<unsigned char>(src[p + 1]))) {
+            if (error != nullptr) {
+              *error = "\\x cannot be followed by a non-hex digit";
+            }
             return false;
           }
           unsigned int ch = 0;
-          const char* hex_start = p;
-          while (p < last_byte &&
-                 absl::ascii_isxdigit(static_cast<unsigned char>(p[1])))
+          auto hex_start = p;
+          while (p + 1 < src.size() &&
+                 absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
             // Arbitrarily many hex digits
-            ch = (ch << 4) + hex_digit_to_int(*++p);
+            ch = (ch << 4) + hex_digit_to_int(src[++p]);
+          }
           if (ch > 0xFF) {
-            if (error) {
+            if (error != nullptr) {
               *error = "Value of \\" +
-                       std::string(hex_start,
-                                   static_cast<size_t>(p + 1 - hex_start)) +
+                       std::string(src.substr(hex_start, p + 1 - hex_start)) +
                        " exceeds 0xff";
             }
             return false;
           }
           if ((ch == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            const size_t hex_size = static_cast<size_t>(p + 1 - hex_start);
-            *d++ = '\\';
-            memmove(d, hex_start, hex_size);
-            d += hex_size;
+            (*dst)[d++] = '\\';
+            while (hex_start <= p) {
+              (*dst)[d++] = src[hex_start++];
+            }
             break;
           }
-          *d++ = static_cast<char>(ch);
+          (*dst)[d++] = static_cast<char>(ch);
           break;
         }
         case 'u': {
           // \uhhhh => convert 4 hex digits to UTF-8
           char32_t rune = 0;
-          const char* hex_start = p;
-          if (p + 4 >= end) {
-            if (error) {
-              *error = "\\u must be followed by 4 hex digits: \\" +
-                       std::string(hex_start,
-                                   static_cast<size_t>(p + 1 - hex_start));
+          auto hex_start = p;
+          if (p + 4 >= src.size()) {
+            if (error != nullptr) {
+              *error = "\\u must be followed by 4 hex digits";
             }
             return false;
           }
           for (int i = 0; i < 4; ++i) {
             // Look one char ahead.
-            if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
-              rune = (rune << 4) + hex_digit_to_int(*++p);  // Advance p.
+            if (absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
+              rune = (rune << 4) + hex_digit_to_int(src[++p]);
             } else {
-              if (error) {
+              if (error != nullptr) {
                 *error = "\\u must be followed by 4 hex digits: \\" +
-                         std::string(hex_start,
-                                     static_cast<size_t>(p + 1 - hex_start));
+                         std::string(src.substr(hex_start, p + 1 - hex_start));
               }
               return false;
             }
           }
           if ((rune == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            *d++ = '\\';
-            memmove(d, hex_start, 5);  // u0000
-            d += 5;
+            (*dst)[d++] = '\\';
+            while (hex_start <= p) {
+              (*dst)[d++] = src[hex_start++];
+            }
             break;
           }
-          if (IsSurrogate(rune, absl::string_view(hex_start, 5), error)) {
+          if (IsSurrogate(rune, src.substr(hex_start, 5), error)) {
             return false;
           }
-          d += strings_internal::EncodeUTF8Char(d, rune);
+          d += strings_internal::EncodeUTF8Char(dst->data() + d, rune);
           break;
         }
         case 'U': {
           // \Uhhhhhhhh => convert 8 hex digits to UTF-8
           char32_t rune = 0;
-          const char* hex_start = p;
-          if (p + 8 >= end) {
-            if (error) {
-              *error = "\\U must be followed by 8 hex digits: \\" +
-                       std::string(hex_start,
-                                   static_cast<size_t>(p + 1 - hex_start));
+          auto hex_start = p;
+          if (p + 8 >= src.size()) {
+            if (error != nullptr) {
+              *error = "\\U must be followed by 8 hex digits";
             }
             return false;
           }
           for (int i = 0; i < 8; ++i) {
             // Look one char ahead.
-            if (absl::ascii_isxdigit(static_cast<unsigned char>(p[1]))) {
+            if (absl::ascii_isxdigit(static_cast<unsigned char>(src[p + 1]))) {
               // Don't change rune until we're sure this
               // is within the Unicode limit, but do advance p.
-              uint32_t newrune = (rune << 4) + hex_digit_to_int(*++p);
+              uint32_t newrune = (rune << 4) + hex_digit_to_int(src[++p]);
               if (newrune > 0x10FFFF) {
-                if (error) {
-                  *error = "Value of \\" +
-                           std::string(hex_start,
-                                       static_cast<size_t>(p + 1 - hex_start)) +
-                           " exceeds Unicode limit (0x10FFFF)";
+                if (error != nullptr) {
+                  *error =
+                      "Value of \\" +
+                      std::string(src.substr(hex_start, p + 1 - hex_start)) +
+                      " exceeds Unicode limit (0x10FFFF)";
                 }
                 return false;
               } else {
                 rune = newrune;
               }
             } else {
-              if (error) {
+              if (error != nullptr) {
                 *error = "\\U must be followed by 8 hex digits: \\" +
-                         std::string(hex_start,
-                                     static_cast<size_t>(p + 1 - hex_start));
+                         std::string(src.substr(hex_start, p + 1 - hex_start));
               }
               return false;
             }
           }
           if ((rune == 0) && leave_nulls_escaped) {
             // Copy the escape sequence for the null character
-            *d++ = '\\';
-            memmove(d, hex_start, 9);  // U00000000
-            d += 9;
+            (*dst)[d++] = '\\';
+            // U00000000
+            while (hex_start <= p) {
+              (*dst)[d++] = src[hex_start++];
+            }
             break;
           }
-          if (IsSurrogate(rune, absl::string_view(hex_start, 9), error)) {
+          if (IsSurrogate(rune, src.substr(hex_start, 9), error)) {
             return false;
           }
-          d += strings_internal::EncodeUTF8Char(d, rune);
+          d += strings_internal::EncodeUTF8Char(dst->data() + d, rune);
           break;
         }
         default: {
-          if (error) *error = std::string("Unknown escape sequence: \\") + *p;
+          if (error != nullptr) {
+            *error = std::string("Unknown escape sequence: \\") + src[p];
+          }
           return false;
         }
       }
-      p++;                                 // read past letter we escaped
+      p++;  // Read past letter we escaped.
     }
   }
-  *dest_len = d - dest;
-  return true;
-}
 
-// ----------------------------------------------------------------------
-// CUnescapeInternal()
-//
-//    Same as above but uses a std::string for output. 'source' and 'dest'
-//    may be the same.
-// ----------------------------------------------------------------------
-bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped,
-                       absl::Nonnull<std::string*> dest,
-                       absl::Nullable<std::string*> error) {
-  strings_internal::STLStringResizeUninitialized(dest, source.size());
-
-  ptrdiff_t dest_size;
-  if (!CUnescapeInternal(source,
-                         leave_nulls_escaped,
-                         &(*dest)[0],
-                         &dest_size,
-                         error)) {
-    return false;
-  }
-  dest->erase(static_cast<size_t>(dest_size));
+  dst->erase(d);
   return true;
 }
 
@@ -450,7 +432,7 @@
 }
 
 void CEscapeAndAppendInternal(absl::string_view src,
-                              absl::Nonnull<std::string*> dest) {
+                              std::string* absl_nonnull dest) {
   size_t escaped_len = CEscapedLength(src);
   if (escaped_len == src.size()) {
     dest->append(src.data(), src.size());
@@ -479,10 +461,10 @@
 
 // Reverses the mapping in Base64EscapeInternal; see that method's
 // documentation for details of the mapping.
-bool Base64UnescapeInternal(absl::Nullable<const char*> src_param, size_t szsrc,
-                            absl::Nullable<char*> dest, size_t szdest,
+bool Base64UnescapeInternal(const char* absl_nullable src_param, size_t szsrc,
+                            char* absl_nullable dest, size_t szdest,
                             const std::array<signed char, 256>& unbase64,
-                            absl::Nonnull<size_t*> len) {
+                            size_t* absl_nonnull len) {
   static const char kPad64Equals = '=';
   static const char kPad64Dot = '.';
 
@@ -818,8 +800,8 @@
 /* clang-format on */
 
 template <typename String>
-bool Base64UnescapeInternal(absl::Nullable<const char*> src, size_t slen,
-                            absl::Nonnull<String*> dest,
+bool Base64UnescapeInternal(const char* absl_nullable src, size_t slen,
+                            String* absl_nonnull dest,
                             const std::array<signed char, 256>& unbase64) {
   // Determine the size of the output string.  Base64 encodes every 3 bytes into
   // 4 characters.  Any leftover chars are added directly for good measure.
@@ -888,7 +870,7 @@
 // or a string.  This works because we use the [] operator to access
 // individual characters at a time.
 template <typename T>
-void HexStringToBytesInternal(absl::Nullable<const char*> from, T to,
+void HexStringToBytesInternal(const char* absl_nullable from, T to,
                               size_t num) {
   for (size_t i = 0; i < num; i++) {
     to[i] = static_cast<char>(kHexValueLenient[from[i * 2] & 0xFF] << 4) +
@@ -915,8 +897,8 @@
 //
 // See CUnescapeInternal() for implementation details.
 // ----------------------------------------------------------------------
-bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
-               absl::Nullable<std::string*> error) {
+bool CUnescape(absl::string_view source, std::string* absl_nonnull dest,
+               std::string* absl_nullable error) {
   return CUnescapeInternal(source, kUnescapeNulls, dest, error);
 }
 
@@ -938,23 +920,23 @@
   return CEscapeInternal(src, true, true);
 }
 
-bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest) {
+bool Base64Unescape(absl::string_view src, std::string* absl_nonnull dest) {
   return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
 }
 
 bool WebSafeBase64Unescape(absl::string_view src,
-                           absl::Nonnull<std::string*> dest) {
+                           std::string* absl_nonnull dest) {
   return Base64UnescapeInternal(src.data(), src.size(), dest, kUnWebSafeBase64);
 }
 
-void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest) {
+void Base64Escape(absl::string_view src, std::string* absl_nonnull dest) {
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
       true, strings_internal::kBase64Chars);
 }
 
 void WebSafeBase64Escape(absl::string_view src,
-                         absl::Nonnull<std::string*> dest) {
+                         std::string* absl_nonnull dest) {
   strings_internal::Base64EscapeInternal(
       reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
       false, strings_internal::kWebSafeBase64Chars);
@@ -976,8 +958,7 @@
   return dest;
 }
 
-bool HexStringToBytes(absl::string_view hex,
-                      absl::Nonnull<std::string*> bytes) {
+bool HexStringToBytes(absl::string_view hex, std::string* absl_nonnull bytes) {
   std::string output;
 
   size_t num_bytes = hex.size() / 2;
diff --git a/absl/strings/escaping.h b/absl/strings/escaping.h
index 08271d2..3aaf39c 100644
--- a/absl/strings/escaping.h
+++ b/absl/strings/escaping.h
@@ -71,12 +71,12 @@
 //     ...
 //   }
 //   EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
-bool CUnescape(absl::string_view source, absl::Nonnull<std::string*> dest,
-               absl::Nullable<std::string*> error);
+bool CUnescape(absl::string_view source, std::string* absl_nonnull dest,
+               std::string* absl_nullable error);
 
 // Overload of `CUnescape()` with no error reporting.
 inline bool CUnescape(absl::string_view source,
-                      absl::Nonnull<std::string*> dest) {
+                      std::string* absl_nonnull dest) {
   return CUnescape(source, dest, nullptr);
 }
 
@@ -126,7 +126,7 @@
 // Encodes a `src` string into a base64-encoded 'dest' string with padding
 // characters. This function conforms with RFC 4648 section 4 (base64) and RFC
 // 2045.
-void Base64Escape(absl::string_view src, absl::Nonnull<std::string*> dest);
+void Base64Escape(absl::string_view src, std::string* absl_nonnull dest);
 std::string Base64Escape(absl::string_view src);
 
 // WebSafeBase64Escape()
@@ -134,8 +134,7 @@
 // Encodes a `src` string into a base64 string, like Base64Escape() does, but
 // outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'.
 // This function conforms with RFC 4648 section 5 (base64url).
-void WebSafeBase64Escape(absl::string_view src,
-                         absl::Nonnull<std::string*> dest);
+void WebSafeBase64Escape(absl::string_view src, std::string* absl_nonnull dest);
 std::string WebSafeBase64Escape(absl::string_view src);
 
 // Base64Unescape()
@@ -145,7 +144,7 @@
 // `src` contains invalid characters, `dest` is cleared and returns `false`.
 // If padding is included (note that `Base64Escape()` does produce it), it must
 // be correct. In the padding, '=' and '.' are treated identically.
-bool Base64Unescape(absl::string_view src, absl::Nonnull<std::string*> dest);
+bool Base64Unescape(absl::string_view src, std::string* absl_nonnull dest);
 
 // WebSafeBase64Unescape()
 //
@@ -155,7 +154,7 @@
 // included (note that `WebSafeBase64Escape()` does not produce it), it must be
 // correct. In the padding, '=' and '.' are treated identically.
 bool WebSafeBase64Unescape(absl::string_view src,
-                           absl::Nonnull<std::string*> dest);
+                           std::string* absl_nonnull dest);
 
 // HexStringToBytes()
 //
@@ -164,7 +163,7 @@
 // function returns false and leaves `bytes` in an unspecified state. Returns
 // true on success.
 [[nodiscard]] bool HexStringToBytes(absl::string_view hex,
-                                    absl::Nonnull<std::string*> bytes);
+                                    std::string* absl_nonnull bytes);
 
 // HexStringToBytes()
 //
diff --git a/absl/strings/escaping_test.cc b/absl/strings/escaping_test.cc
index 25cb685..4786c88 100644
--- a/absl/strings/escaping_test.cc
+++ b/absl/strings/escaping_test.cc
@@ -169,15 +169,25 @@
     EXPECT_TRUE(absl::CUnescape(val.escaped, &out));
     EXPECT_EQ(out, val.unescaped);
   }
-  std::string bad[] = {"\\u1",         // too short
-                       "\\U1",         // too short
-                       "\\Uffffff",    // exceeds 0x10ffff (largest Unicode)
-                       "\\U00110000",  // exceeds 0x10ffff (largest Unicode)
-                       "\\uD835",      // surrogate character (D800-DFFF)
-                       "\\U0000DD04",  // surrogate character (D800-DFFF)
-                       "\\777",        // exceeds 0xff
-                       "\\xABCD"};     // exceeds 0xff
-  for (const std::string& e : bad) {
+  constexpr absl::string_view bad[] = {
+      "\\u1",         // too short
+      "\\U1",         // too short
+      "\\Uffffff",    // exceeds 0x10ffff (largest Unicode)
+      "\\U00110000",  // exceeds 0x10ffff (largest Unicode)
+      "\\uD835",      // surrogate character (D800-DFFF)
+      "\\U0000DD04",  // surrogate character (D800-DFFF)
+      "\\777",        // exceeds 0xff
+      "\\xABCD",      // exceeds 0xff
+      "endswith\\",   // ends with "\"
+      "endswith\\x",  // ends with "\x"
+      "endswith\\X",  // ends with "\X"
+      "\\x.2345678",  // non-hex follows "\x"
+      "\\X.2345678",  // non-hex follows "\X"
+      "\\u.2345678",  // non-hex follows "\U"
+      "\\U.2345678",  // non-hex follows "\U"
+      "\\.unknown",   // unknown escape sequence
+  };
+  for (const auto e : bad) {
     std::string error;
     std::string out;
     EXPECT_FALSE(absl::CUnescape(e, &out, &error));
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index f2c0253..b55b412 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -635,7 +635,7 @@
     poison();
   }
 
-  void CopyInlineToString(absl::Nonnull<std::string*> dst) const {
+  void CopyInlineToString(std::string* absl_nonnull dst) const {
     assert(!is_tree());
     // As Cord can store only 15 bytes it is smaller than std::string's
     // small string optimization buffer size. Therefore we will always trigger
diff --git a/absl/strings/numbers.cc b/absl/strings/numbers.cc
index c7adaef..a83fd2c 100644
--- a/absl/strings/numbers.cc
+++ b/absl/strings/numbers.cc
@@ -47,7 +47,7 @@
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
-bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out) {
+bool SimpleAtof(absl::string_view str, float* absl_nonnull out) {
   *out = 0.0;
   str = StripAsciiWhitespace(str);
   // std::from_chars doesn't accept an initial +, but SimpleAtof does, so if one
@@ -78,7 +78,7 @@
   return true;
 }
 
-bool SimpleAtod(absl::string_view str, absl::Nonnull<double*> out) {
+bool SimpleAtod(absl::string_view str, double* absl_nonnull out) {
   *out = 0.0;
   str = StripAsciiWhitespace(str);
   // std::from_chars doesn't accept an initial +, but SimpleAtod does, so if one
@@ -109,7 +109,7 @@
   return true;
 }
 
-bool SimpleAtob(absl::string_view str, absl::Nonnull<bool*> out) {
+bool SimpleAtob(absl::string_view str, bool* absl_nonnull out) {
   ABSL_RAW_CHECK(out != nullptr, "Output pointer must not be nullptr.");
   if (EqualsIgnoreCase(str, "true") || EqualsIgnoreCase(str, "t") ||
       EqualsIgnoreCase(str, "yes") || EqualsIgnoreCase(str, "y") ||
@@ -168,7 +168,7 @@
 constexpr uint64_t kDivisionBy100Div = 1 << 20;
 
 // Encode functions write the ASCII output of input `n` to `out_str`.
-inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) {
+inline char* EncodeHundred(uint32_t n, char* absl_nonnull out_str) {
   int num_digits = static_cast<int>(n - 10) >> 8;
   uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
   uint32_t mod10 = n - 10u * div10;
@@ -178,7 +178,7 @@
   return out_str + 2 + num_digits;
 }
 
-inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) {
+inline char* EncodeTenThousand(uint32_t n, char* absl_nonnull out_str) {
   // We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
   // blocks. 123 ->  [\0\1][\0\23]. We divide by 10 both blocks
   // (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
@@ -234,8 +234,8 @@
   return tens;
 }
 
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32(
-    uint32_t n, absl::Nonnull<char*> out_str) {
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* absl_nonnull EncodeFullU32(
+    uint32_t n, char* absl_nonnull out_str) {
   if (n < 10) {
     *out_str = static_cast<char>('0' + n);
     return out_str + 1;
@@ -284,7 +284,7 @@
 
 }  // namespace
 
-void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) {
+void numbers_internal::PutTwoDigits(uint32_t i, char* absl_nonnull buf) {
   assert(i < 100);
   uint32_t base = kTwoZeroBytes;
   uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
@@ -293,15 +293,15 @@
   little_endian::Store16(buf, static_cast<uint16_t>(base));
 }
 
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
-    uint32_t n, absl::Nonnull<char*> out_str) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+    uint32_t n, char* absl_nonnull out_str) {
   out_str = EncodeFullU32(n, out_str);
   *out_str = '\0';
   return out_str;
 }
 
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
-    int32_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+    int32_t i, char* absl_nonnull buffer) {
   uint32_t u = static_cast<uint32_t>(i);
   if (i < 0) {
     *buffer++ = '-';
@@ -315,15 +315,15 @@
   return buffer;
 }
 
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
-    uint64_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+    uint64_t i, char* absl_nonnull buffer) {
   buffer = EncodeFullU64(i, buffer);
   *buffer = '\0';
   return buffer;
 }
 
-absl::Nonnull<char*> numbers_internal::FastIntToBuffer(
-    int64_t i, absl::Nonnull<char*> buffer) {
+char* absl_nonnull numbers_internal::FastIntToBuffer(
+    int64_t i, char* absl_nonnull buffer) {
   uint64_t u = static_cast<uint64_t>(i);
   if (i < 0) {
     *buffer++ = '-';
@@ -546,7 +546,7 @@
 // Helper function for fast formatting of floating-point.
 // The result is the same as "%g", a.k.a. "%.6g".
 size_t numbers_internal::SixDigitsToBuffer(double d,
-                                           absl::Nonnull<char*> const buffer) {
+                                           char* absl_nonnull const buffer) {
   static_assert(std::numeric_limits<float>::is_iec559,
                 "IEEE-754/IEC-559 support only");
 
@@ -694,9 +694,9 @@
 
 // Parse the sign and optional hex or oct prefix in text.
 inline bool safe_parse_sign_and_base(
-    absl::Nonnull<absl::string_view*> text /*inout*/,
-    absl::Nonnull<int*> base_ptr /*inout*/,
-    absl::Nonnull<bool*> negative_ptr /*output*/) {
+    absl::string_view* absl_nonnull text /*inout*/,
+    int* absl_nonnull base_ptr /*inout*/,
+    bool* absl_nonnull negative_ptr /*output*/) {
   if (text->data() == nullptr) {
     return false;
   }
@@ -981,7 +981,7 @@
 
 template <typename IntType>
 inline bool safe_parse_positive_int(absl::string_view text, int base,
-                                    absl::Nonnull<IntType*> value_p) {
+                                    IntType* absl_nonnull value_p) {
   IntType value = 0;
   const IntType vmax = std::numeric_limits<IntType>::max();
   assert(vmax > 0);
@@ -1018,7 +1018,7 @@
 
 template <typename IntType>
 inline bool safe_parse_negative_int(absl::string_view text, int base,
-                                    absl::Nonnull<IntType*> value_p) {
+                                    IntType* absl_nonnull value_p) {
   IntType value = 0;
   const IntType vmin = std::numeric_limits<IntType>::min();
   assert(vmin < 0);
@@ -1063,7 +1063,7 @@
 // http://pubs.opengroup.org/onlinepubs/9699919799/functions/strtol.html
 template <typename IntType>
 inline bool safe_int_internal(absl::string_view text,
-                              absl::Nonnull<IntType*> value_p, int base) {
+                              IntType* absl_nonnull value_p, int base) {
   *value_p = 0;
   bool negative;
   if (!safe_parse_sign_and_base(&text, &base, &negative)) {
@@ -1078,7 +1078,7 @@
 
 template <typename IntType>
 inline bool safe_uint_internal(absl::string_view text,
-                               absl::Nonnull<IntType*> value_p, int base) {
+                               IntType* absl_nonnull value_p, int base) {
   *value_p = 0;
   bool negative;
   if (!safe_parse_sign_and_base(&text, &base, &negative) || negative) {
@@ -1112,52 +1112,52 @@
     "e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
     "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
 
-bool safe_strto8_base(absl::string_view text, absl::Nonnull<int8_t*> value,
+bool safe_strto8_base(absl::string_view text, int8_t* absl_nonnull value,
                       int base) {
   return safe_int_internal<int8_t>(text, value, base);
 }
 
-bool safe_strto16_base(absl::string_view text, absl::Nonnull<int16_t*> value,
+bool safe_strto16_base(absl::string_view text, int16_t* absl_nonnull value,
                        int base) {
   return safe_int_internal<int16_t>(text, value, base);
 }
 
-bool safe_strto32_base(absl::string_view text, absl::Nonnull<int32_t*> value,
+bool safe_strto32_base(absl::string_view text, int32_t* absl_nonnull value,
                        int base) {
   return safe_int_internal<int32_t>(text, value, base);
 }
 
-bool safe_strto64_base(absl::string_view text, absl::Nonnull<int64_t*> value,
+bool safe_strto64_base(absl::string_view text, int64_t* absl_nonnull value,
                        int base) {
   return safe_int_internal<int64_t>(text, value, base);
 }
 
-bool safe_strto128_base(absl::string_view text, absl::Nonnull<int128*> value,
+bool safe_strto128_base(absl::string_view text, int128* absl_nonnull value,
                         int base) {
   return safe_int_internal<absl::int128>(text, value, base);
 }
 
-bool safe_strtou8_base(absl::string_view text, absl::Nonnull<uint8_t*> value,
+bool safe_strtou8_base(absl::string_view text, uint8_t* absl_nonnull value,
                        int base) {
   return safe_uint_internal<uint8_t>(text, value, base);
 }
 
-bool safe_strtou16_base(absl::string_view text, absl::Nonnull<uint16_t*> value,
+bool safe_strtou16_base(absl::string_view text, uint16_t* absl_nonnull value,
                         int base) {
   return safe_uint_internal<uint16_t>(text, value, base);
 }
 
-bool safe_strtou32_base(absl::string_view text, absl::Nonnull<uint32_t*> value,
+bool safe_strtou32_base(absl::string_view text, uint32_t* absl_nonnull value,
                         int base) {
   return safe_uint_internal<uint32_t>(text, value, base);
 }
 
-bool safe_strtou64_base(absl::string_view text, absl::Nonnull<uint64_t*> value,
+bool safe_strtou64_base(absl::string_view text, uint64_t* absl_nonnull value,
                         int base) {
   return safe_uint_internal<uint64_t>(text, value, base);
 }
 
-bool safe_strtou128_base(absl::string_view text, absl::Nonnull<uint128*> value,
+bool safe_strtou128_base(absl::string_view text, uint128* absl_nonnull value,
                          int base) {
   return safe_uint_internal<absl::uint128>(text, value, base);
 }
diff --git a/absl/strings/numbers.h b/absl/strings/numbers.h
index 5f4b661..9c67974 100644
--- a/absl/strings/numbers.h
+++ b/absl/strings/numbers.h
@@ -63,7 +63,7 @@
 // state.
 template <typename int_type>
 [[nodiscard]] bool SimpleAtoi(absl::string_view str,
-                              absl::Nonnull<int_type*> out);
+                              int_type* absl_nonnull out);
 
 // SimpleAtof()
 //
@@ -74,7 +74,7 @@
 // allowed formats for `str`, except SimpleAtof() is locale-independent and will
 // always use the "C" locale. If any errors are encountered, this function
 // returns `false`, leaving `out` in an unspecified state.
-[[nodiscard]] bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out);
+[[nodiscard]] bool SimpleAtof(absl::string_view str, float* absl_nonnull out);
 
 // SimpleAtod()
 //
@@ -85,8 +85,7 @@
 // allowed formats for `str`, except SimpleAtod is locale-independent and will
 // always use the "C" locale. If any errors are encountered, this function
 // returns `false`, leaving `out` in an unspecified state.
-[[nodiscard]] bool SimpleAtod(absl::string_view str,
-                              absl::Nonnull<double*> out);
+[[nodiscard]] bool SimpleAtod(absl::string_view str, double* absl_nonnull out);
 
 // SimpleAtob()
 //
@@ -96,7 +95,7 @@
 // are interpreted as boolean `false`: "false", "f", "no", "n", "0". If any
 // errors are encountered, this function returns `false`, leaving `out` in an
 // unspecified state.
-[[nodiscard]] bool SimpleAtob(absl::string_view str, absl::Nonnull<bool*> out);
+[[nodiscard]] bool SimpleAtob(absl::string_view str, bool* absl_nonnull out);
 
 // SimpleHexAtoi()
 //
@@ -110,13 +109,13 @@
 // `false`, leaving `out` in an unspecified state.
 template <typename int_type>
 [[nodiscard]] bool SimpleHexAtoi(absl::string_view str,
-                                 absl::Nonnull<int_type*> out);
+                                 int_type* absl_nonnull out);
 
 // Overloads of SimpleHexAtoi() for 128 bit integers.
 [[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
-                                        absl::Nonnull<absl::int128*> out);
+                                        absl::int128* absl_nonnull out);
 [[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
-                                        absl::Nonnull<absl::uint128*> out);
+                                        absl::uint128* absl_nonnull out);
 
 ABSL_NAMESPACE_END
 }  // namespace absl
@@ -150,30 +149,30 @@
 //   PutTwoDigits(42, buf);
 //   // buf[0] == '4'
 //   // buf[1] == '2'
-void PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf);
+void PutTwoDigits(uint32_t i, char* absl_nonnull buf);
 
 // safe_strto?() functions for implementing SimpleAtoi()
 
-bool safe_strto8_base(absl::string_view text, absl::Nonnull<int8_t*> value,
+bool safe_strto8_base(absl::string_view text, int8_t* absl_nonnull value,
                       int base);
-bool safe_strto16_base(absl::string_view text, absl::Nonnull<int16_t*> value,
+bool safe_strto16_base(absl::string_view text, int16_t* absl_nonnull value,
                        int base);
-bool safe_strto32_base(absl::string_view text, absl::Nonnull<int32_t*> value,
+bool safe_strto32_base(absl::string_view text, int32_t* absl_nonnull value,
                        int base);
-bool safe_strto64_base(absl::string_view text, absl::Nonnull<int64_t*> value,
+bool safe_strto64_base(absl::string_view text, int64_t* absl_nonnull value,
                        int base);
 bool safe_strto128_base(absl::string_view text,
-                        absl::Nonnull<absl::int128*> value, int base);
-bool safe_strtou8_base(absl::string_view text, absl::Nonnull<uint8_t*> value,
+                        absl::int128* absl_nonnull value, int base);
+bool safe_strtou8_base(absl::string_view text, uint8_t* absl_nonnull value,
                        int base);
-bool safe_strtou16_base(absl::string_view text, absl::Nonnull<uint16_t*> value,
+bool safe_strtou16_base(absl::string_view text, uint16_t* absl_nonnull value,
                         int base);
-bool safe_strtou32_base(absl::string_view text, absl::Nonnull<uint32_t*> value,
+bool safe_strtou32_base(absl::string_view text, uint32_t* absl_nonnull value,
                         int base);
-bool safe_strtou64_base(absl::string_view text, absl::Nonnull<uint64_t*> value,
+bool safe_strtou64_base(absl::string_view text, uint64_t* absl_nonnull value,
                         int base);
 bool safe_strtou128_base(absl::string_view text,
-                         absl::Nonnull<absl::uint128*> value, int base);
+                         absl::uint128* absl_nonnull value, int base);
 
 static const int kFastToBufferSize = 32;
 static const int kSixDigitsToBufferSize = 16;
@@ -184,25 +183,25 @@
 // outside the range 0.0001-999999 are output using scientific notation
 // (1.23456e+06). This routine is heavily optimized.
 // Required buffer size is `kSixDigitsToBufferSize`.
-size_t SixDigitsToBuffer(double d, absl::Nonnull<char*> buffer);
+size_t SixDigitsToBuffer(double d, char* absl_nonnull buffer);
 
 // WARNING: These functions may write more characters than necessary, because
 // they are intended for speed. All functions take an output buffer
 // as an argument and return a pointer to the last byte they wrote, which is the
 // terminating '\0'. At most `kFastToBufferSize` bytes are written.
-absl::Nonnull<char*> FastIntToBuffer(int32_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int32_t i, char* absl_nonnull buffer)
     ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(uint32_t n, absl::Nonnull<char*> out_str)
+char* absl_nonnull FastIntToBuffer(uint32_t n, char* absl_nonnull out_str)
     ABSL_INTERNAL_NEED_MIN_SIZE(out_str, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(int64_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int64_t i, char* absl_nonnull buffer)
     ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
-absl::Nonnull<char*> FastIntToBuffer(uint64_t i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(uint64_t i, char* absl_nonnull buffer)
     ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize);
 
 // For enums and integer types that are not an exact match for the types above,
 // use templates to call the appropriate one of the four overloads above.
 template <typename int_type>
-absl::Nonnull<char*> FastIntToBuffer(int_type i, absl::Nonnull<char*> buffer)
+char* absl_nonnull FastIntToBuffer(int_type i, char* absl_nonnull buffer)
     ABSL_INTERNAL_NEED_MIN_SIZE(buffer, kFastToBufferSize) {
   static_assert(sizeof(i) <= 64 / 8,
                 "FastIntToBuffer works only with 64-bit-or-less integers.");
@@ -228,7 +227,7 @@
 // with base different from 10 elsewhere in Abseil implementation).
 template <typename int_type>
 [[nodiscard]] bool safe_strtoi_base(absl::string_view s,
-                                    absl::Nonnull<int_type*> out, int base) {
+                                    int_type* absl_nonnull out, int base) {
   static_assert(sizeof(*out) == 1 || sizeof(*out) == 2 || sizeof(*out) == 4 ||
                     sizeof(*out) == 8,
                 "SimpleAtoi works only with 8, 16, 32, or 64-bit integers.");
@@ -284,7 +283,7 @@
 // without the terminating null character. Thus `out` must be of length >= 16.
 // Returns the number of non-pad digits of the output (it can never be zero
 // since 0 has one digit).
-inline size_t FastHexToBufferZeroPad16(uint64_t val, absl::Nonnull<char*> out) {
+inline size_t FastHexToBufferZeroPad16(uint64_t val, char* absl_nonnull out) {
 #ifdef ABSL_INTERNAL_HAVE_SSSE3
   uint64_t be = absl::big_endian::FromHost64(val);
   const auto kNibbleMask = _mm_set1_epi8(0xf);
@@ -311,33 +310,33 @@
 
 template <typename int_type>
 [[nodiscard]] bool SimpleAtoi(absl::string_view str,
-                              absl::Nonnull<int_type*> out) {
+                              int_type* absl_nonnull out) {
   return numbers_internal::safe_strtoi_base(str, out, 10);
 }
 
 [[nodiscard]] inline bool SimpleAtoi(absl::string_view str,
-                                     absl::Nonnull<absl::int128*> out) {
+                                     absl::int128* absl_nonnull out) {
   return numbers_internal::safe_strto128_base(str, out, 10);
 }
 
 [[nodiscard]] inline bool SimpleAtoi(absl::string_view str,
-                                     absl::Nonnull<absl::uint128*> out) {
+                                     absl::uint128* absl_nonnull out) {
   return numbers_internal::safe_strtou128_base(str, out, 10);
 }
 
 template <typename int_type>
 [[nodiscard]] bool SimpleHexAtoi(absl::string_view str,
-                                 absl::Nonnull<int_type*> out) {
+                                 int_type* absl_nonnull out) {
   return numbers_internal::safe_strtoi_base(str, out, 16);
 }
 
 [[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
-                                        absl::Nonnull<absl::int128*> out) {
+                                        absl::int128* absl_nonnull out) {
   return numbers_internal::safe_strto128_base(str, out, 16);
 }
 
 [[nodiscard]] inline bool SimpleHexAtoi(absl::string_view str,
-                                        absl::Nonnull<absl::uint128*> out) {
+                                        absl::uint128* absl_nonnull out) {
   return numbers_internal::safe_strtou128_base(str, out, 16);
 }
 
diff --git a/absl/strings/str_cat.cc b/absl/strings/str_cat.cc
index c51c137..1f3cfbf 100644
--- a/absl/strings/str_cat.cc
+++ b/absl/strings/str_cat.cc
@@ -42,8 +42,7 @@
 namespace {
 // Append is merely a version of memcpy that returns the address of the byte
 // after the area just overwritten.
-inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out,
-                                   const AlphaNum& x) {
+inline char* absl_nonnull Append(char* absl_nonnull out, const AlphaNum& x) {
   // memcpy is allowed to overwrite arbitrary memory, so doing this after the
   // call would force an extra fetch of x.size().
   char* after = out + x.size();
@@ -159,7 +158,7 @@
   assert(((src).size() == 0) ||      \
          (uintptr_t((src).data() - (dest).data()) > uintptr_t((dest).size())))
 
-void AppendPieces(absl::Nonnull<std::string*> dest,
+void AppendPieces(std::string* absl_nonnull dest,
                   std::initializer_list<absl::string_view> pieces) {
   size_t old_size = dest->size();
   size_t to_append = 0;
@@ -183,7 +182,7 @@
 
 }  // namespace strings_internal
 
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a) {
   ASSERT_NO_OVERLAP(*dest, a);
   std::string::size_type old_size = dest->size();
   STLStringAppendUninitializedAmortized(dest, a.size());
@@ -193,7 +192,7 @@
   assert(out == begin + dest->size());
 }
 
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b) {
   ASSERT_NO_OVERLAP(*dest, a);
   ASSERT_NO_OVERLAP(*dest, b);
@@ -206,7 +205,7 @@
   assert(out == begin + dest->size());
 }
 
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b, const AlphaNum& c) {
   ASSERT_NO_OVERLAP(*dest, a);
   ASSERT_NO_OVERLAP(*dest, b);
@@ -221,7 +220,7 @@
   assert(out == begin + dest->size());
 }
 
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b, const AlphaNum& c, const AlphaNum& d) {
   ASSERT_NO_OVERLAP(*dest, a);
   ASSERT_NO_OVERLAP(*dest, b);
diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h
index 6a07797..eafd8a3 100644
--- a/absl/strings/str_cat.h
+++ b/absl/strings/str_cat.h
@@ -213,7 +213,7 @@
                               !std::is_pointer<Int>::value>::type* = nullptr)
       : Hex(spec, static_cast<uint64_t>(v)) {}
   template <typename Pointee>
-  explicit Hex(absl::Nullable<Pointee*> v, PadSpec spec = absl::kNoPad)
+  explicit Hex(Pointee* absl_nullable v, PadSpec spec = absl::kNoPad)
       : Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
 
   template <typename S>
@@ -359,7 +359,7 @@
           ABSL_ATTRIBUTE_LIFETIME_BOUND)
       : piece_(&buf.data[0], buf.size) {}
 
-  AlphaNum(absl::Nullable<const char*> c_str  // NOLINT(runtime/explicit)
+  AlphaNum(const char* absl_nullable c_str  // NOLINT(runtime/explicit)
                ABSL_ATTRIBUTE_LIFETIME_BOUND)
       : piece_(NullSafeStringView(c_str)) {}
   AlphaNum(absl::string_view pc  // NOLINT(runtime/explicit)
@@ -392,7 +392,7 @@
   AlphaNum& operator=(const AlphaNum&) = delete;
 
   absl::string_view::size_type size() const { return piece_.size(); }
-  absl::Nullable<const char*> data() const { return piece_.data(); }
+  const char* absl_nullable data() const { return piece_.data(); }
   absl::string_view Piece() const { return piece_; }
 
   // Match unscoped enums.  Use integral promotion so that a `char`-backed
@@ -462,7 +462,7 @@
 
 // Do not call directly - this is not part of the public API.
 std::string CatPieces(std::initializer_list<absl::string_view> pieces);
-void AppendPieces(absl::Nonnull<std::string*> dest,
+void AppendPieces(std::string* absl_nonnull dest,
                   std::initializer_list<absl::string_view> pieces);
 
 template <typename Integer>
@@ -592,18 +592,18 @@
 //   absl::string_view p = s;
 //   StrAppend(&s, p);
 
-inline void StrAppend(absl::Nonnull<std::string*>) {}
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+inline void StrAppend(std::string* absl_nonnull) {}
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a);
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b, const AlphaNum& c);
-void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                const AlphaNum& b, const AlphaNum& c, const AlphaNum& d);
 
 // Support 5 or more arguments
 template <typename... AV>
-inline void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
+inline void StrAppend(std::string* absl_nonnull dest, const AlphaNum& a,
                       const AlphaNum& b, const AlphaNum& c, const AlphaNum& d,
                       const AlphaNum& e, const AV&... args) {
   strings_internal::AppendPieces(
diff --git a/absl/strings/str_format.h b/absl/strings/str_format.h
index fc3d3e9..1d86305 100644
--- a/absl/strings/str_format.h
+++ b/absl/strings/str_format.h
@@ -112,7 +112,7 @@
 
  protected:
   explicit UntypedFormatSpec(
-      absl::Nonnull<const str_format_internal::ParsedFormatBase*> pc)
+      const str_format_internal::ParsedFormatBase* absl_nonnull pc)
       : spec_(pc) {}
 
  private:
@@ -152,7 +152,7 @@
 //   EXPECT_EQ(8, n);
 class FormatCountCapture {
  public:
-  explicit FormatCountCapture(absl::Nonnull<int*> p) : p_(p) {}
+  explicit FormatCountCapture(int* absl_nonnull p) : p_(p) {}
 
  private:
   // FormatCountCaptureHelper is used to define FormatConvertImpl() for this
@@ -161,8 +161,8 @@
   // Unused() is here because of the false positive from -Wunused-private-field
   // p_ is used in the templated function of the friend FormatCountCaptureHelper
   // class.
-  absl::Nonnull<int*> Unused() { return p_; }
-  absl::Nonnull<int*> p_;
+  int* absl_nonnull Unused() { return p_; }
+  int* absl_nonnull p_;
 };
 
 // FormatSpec
@@ -377,7 +377,7 @@
 //   std::string orig("For example PI is approximately ");
 //   std::cout << StrAppendFormat(&orig, "%12.6f", 3.14);
 template <typename... Args>
-std::string& StrAppendFormat(absl::Nonnull<std::string*> dst,
+std::string& StrAppendFormat(std::string* absl_nonnull dst,
                              const FormatSpec<Args...>& format,
                              const Args&... args) {
   return str_format_internal::AppendPack(
@@ -437,7 +437,7 @@
 //   Outputs: "The capital of Mongolia is Ulaanbaatar"
 //
 template <typename... Args>
-int FPrintF(absl::Nonnull<std::FILE*> output, const FormatSpec<Args...>& format,
+int FPrintF(std::FILE* absl_nonnull output, const FormatSpec<Args...>& format,
             const Args&... args) {
   return str_format_internal::FprintF(
       output, str_format_internal::UntypedFormatSpecImpl::Extract(format),
@@ -466,7 +466,7 @@
 //   Post-condition: output == "The capital of Mongolia is Ulaanbaatar"
 //
 template <typename... Args>
-int SNPrintF(absl::Nonnull<char*> output, std::size_t size,
+int SNPrintF(char* absl_nonnull output, std::size_t size,
              const FormatSpec<Args...>& format, const Args&... args) {
   return str_format_internal::SnprintF(
       output, size, str_format_internal::UntypedFormatSpecImpl::Extract(format),
@@ -500,7 +500,7 @@
   template <typename T,
             typename = typename std::enable_if<std::is_constructible<
                 str_format_internal::FormatRawSinkImpl, T*>::value>::type>
-  FormatRawSink(absl::Nonnull<T*> raw)  // NOLINT
+  FormatRawSink(T* absl_nonnull raw)  // NOLINT
       : sink_(raw) {}
 
  private:
@@ -857,16 +857,16 @@
   }
 
   // Support `absl::Format(&sink, format, args...)`.
-  friend void AbslFormatFlush(absl::Nonnull<FormatSink*> sink,
+  friend void AbslFormatFlush(FormatSink* absl_nonnull sink,
                               absl::string_view v) {
     sink->Append(v);
   }
 
  private:
   friend str_format_internal::FormatSinkImpl;
-  explicit FormatSink(absl::Nonnull<str_format_internal::FormatSinkImpl*> s)
+  explicit FormatSink(str_format_internal::FormatSinkImpl* absl_nonnull s)
       : sink_(s) {}
-  absl::Nonnull<str_format_internal::FormatSinkImpl*> sink_;
+  str_format_internal::FormatSinkImpl* absl_nonnull sink_;
 };
 
 // FormatConvertResult
diff --git a/absl/strings/str_replace.cc b/absl/strings/str_replace.cc
index a7ab52f..377e30c 100644
--- a/absl/strings/str_replace.cc
+++ b/absl/strings/str_replace.cc
@@ -37,8 +37,8 @@
 // occurred.
 int ApplySubstitutions(
     absl::string_view s,
-    absl::Nonnull<std::vector<strings_internal::ViableSubstitution>*> subs_ptr,
-    absl::Nonnull<std::string*> result_ptr) {
+    std::vector<strings_internal::ViableSubstitution>* absl_nonnull subs_ptr,
+    std::string* absl_nonnull result_ptr) {
   auto& subs = *subs_ptr;
   int substitutions = 0;
   size_t pos = 0;
@@ -83,7 +83,7 @@
 }
 
 int StrReplaceAll(strings_internal::FixedMapping replacements,
-                  absl::Nonnull<std::string*> target) {
+                  std::string* absl_nonnull target) {
   return StrReplaceAll<strings_internal::FixedMapping>(replacements, target);
 }
 
diff --git a/absl/strings/str_replace.h b/absl/strings/str_replace.h
index d74feac..91b920b 100644
--- a/absl/strings/str_replace.h
+++ b/absl/strings/str_replace.h
@@ -114,7 +114,7 @@
 int StrReplaceAll(
     std::initializer_list<std::pair<absl::string_view, absl::string_view>>
         replacements,
-    absl::Nonnull<std::string*> target);
+    std::string* absl_nonnull target);
 
 // Overload of `StrReplaceAll()` to replace patterns within a given output
 // string *in place* with replacements provided within a container of key/value
@@ -130,7 +130,7 @@
 //  EXPECT_EQ("if (ptr &lt; &amp;foo)", s);
 template <typename StrToStrMapping>
 int StrReplaceAll(const StrToStrMapping& replacements,
-                  absl::Nonnull<std::string*> target);
+                  std::string* absl_nonnull target);
 
 // Implementation details only, past this point.
 namespace strings_internal {
@@ -187,8 +187,8 @@
 }
 
 int ApplySubstitutions(absl::string_view s,
-                       absl::Nonnull<std::vector<ViableSubstitution>*> subs_ptr,
-                       absl::Nonnull<std::string*> result_ptr);
+                       std::vector<ViableSubstitution>* absl_nonnull subs_ptr,
+                       std::string* absl_nonnull result_ptr);
 
 }  // namespace strings_internal
 
@@ -204,7 +204,7 @@
 
 template <typename StrToStrMapping>
 int StrReplaceAll(const StrToStrMapping& replacements,
-                  absl::Nonnull<std::string*> target) {
+                  std::string* absl_nonnull target) {
   auto subs = strings_internal::FindSubstitutions(*target, replacements);
   if (subs.empty()) return 0;
 
diff --git a/absl/strings/string_view.cc b/absl/strings/string_view.cc
index dc2951c..33bd1bb 100644
--- a/absl/strings/string_view.cc
+++ b/absl/strings/string_view.cc
@@ -30,10 +30,10 @@
 
 // This is significantly faster for case-sensitive matches with very
 // few possible matches.
-absl::Nullable<const char*> memmatch(absl::Nullable<const char*> phaystack,
-                                     size_t haylen,
-                                     absl::Nullable<const char*> pneedle,
-                                     size_t neelen) {
+const char* absl_nullable memmatch(const char* absl_nullable phaystack,
+                                   size_t haylen,
+                                   const char* absl_nullable pneedle,
+                                   size_t neelen) {
   if (0 == neelen) {
     return phaystack;  // even if haylen is 0
   }
diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h
index b05c036..9a1933b 100644
--- a/absl/strings/string_view.h
+++ b/absl/strings/string_view.h
@@ -163,11 +163,11 @@
  public:
   using traits_type = std::char_traits<char>;
   using value_type = char;
-  using pointer = absl::Nullable<char*>;
-  using const_pointer = absl::Nullable<const char*>;
+  using pointer = char* absl_nullable;
+  using const_pointer = const char* absl_nullable;
   using reference = char&;
   using const_reference = const char&;
-  using const_iterator = absl::Nullable<const char*>;
+  using const_iterator = const char* absl_nullable;
   using iterator = const_iterator;
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
   using reverse_iterator = const_reverse_iterator;
@@ -197,11 +197,11 @@
   // instead (see below).
   // The length check is skipped since it is unnecessary and causes code bloat.
   constexpr string_view(  // NOLINT(runtime/explicit)
-      absl::Nonnull<const char*> str)
+      const char* absl_nonnull str)
       : ptr_(str), length_(str ? StrlenInternal(str) : 0) {}
 
   // Constructor of a `string_view` from a `const char*` and length.
-  constexpr string_view(absl::Nullable<const char*> data, size_type len)
+  constexpr string_view(const char* absl_nullable data, size_type len)
       : ptr_(data), length_(CheckLengthInternal(len)) {}
 
   constexpr string_view(const string_view&) noexcept = default;
@@ -430,21 +430,21 @@
 
   // Overload of `string_view::compare()` for comparing a `string_view` and a
   // a different C-style string `s`.
-  constexpr int compare(absl::Nonnull<const char*> s) const {
+  constexpr int compare(const char* absl_nonnull s) const {
     return compare(string_view(s));
   }
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // `string_view` and a different string C-style string `s`.
   constexpr int compare(size_type pos1, size_type count1,
-                        absl::Nonnull<const char*> s) const {
+                        const char* absl_nonnull s) const {
     return substr(pos1, count1).compare(string_view(s));
   }
 
   // Overload of `string_view::compare()` for comparing a substring of the
   // `string_view` and a substring of a different C-style string `s`.
   constexpr int compare(size_type pos1, size_type count1,
-                        absl::Nonnull<const char*> s, size_type count2) const {
+                        const char* absl_nonnull s, size_type count2) const {
     return substr(pos1, count1).compare(string_view(s, count2));
   }
 
@@ -463,14 +463,14 @@
 
   // Overload of `string_view::find()` for finding a substring of a different
   // C-style string `s` within the `string_view`.
-  size_type find(absl::Nonnull<const char*> s, size_type pos,
+  size_type find(const char* absl_nonnull s, size_type pos,
                  size_type count) const {
     return find(string_view(s, count), pos);
   }
 
   // Overload of `string_view::find()` for finding a different C-style string
   // `s` within the `string_view`.
-  size_type find(absl::Nonnull<const char *> s, size_type pos = 0) const {
+  size_type find(const char* absl_nonnull s, size_type pos = 0) const {
     return find(string_view(s), pos);
   }
 
@@ -487,14 +487,14 @@
 
   // Overload of `string_view::rfind()` for finding a substring of a different
   // C-style string `s` within the `string_view`.
-  size_type rfind(absl::Nonnull<const char*> s, size_type pos,
+  size_type rfind(const char* absl_nonnull s, size_type pos,
                   size_type count) const {
     return rfind(string_view(s, count), pos);
   }
 
   // Overload of `string_view::rfind()` for finding a different C-style string
   // `s` within the `string_view`.
-  size_type rfind(absl::Nonnull<const char*> s, size_type pos = npos) const {
+  size_type rfind(const char* absl_nonnull s, size_type pos = npos) const {
     return rfind(string_view(s), pos);
   }
 
@@ -513,15 +513,14 @@
 
   // Overload of `string_view::find_first_of()` for finding a substring of a
   // different C-style string `s` within the `string_view`.
-  size_type find_first_of(absl::Nonnull<const char*> s, size_type pos,
+  size_type find_first_of(const char* absl_nonnull s, size_type pos,
                           size_type count) const {
     return find_first_of(string_view(s, count), pos);
   }
 
   // Overload of `string_view::find_first_of()` for finding a different C-style
   // string `s` within the `string_view`.
-  size_type find_first_of(absl::Nonnull<const char*> s,
-                          size_type pos = 0) const {
+  size_type find_first_of(const char* absl_nonnull s, size_type pos = 0) const {
     return find_first_of(string_view(s), pos);
   }
 
@@ -540,14 +539,14 @@
 
   // Overload of `string_view::find_last_of()` for finding a substring of a
   // different C-style string `s` within the `string_view`.
-  size_type find_last_of(absl::Nonnull<const char*> s, size_type pos,
+  size_type find_last_of(const char* absl_nonnull s, size_type pos,
                          size_type count) const {
     return find_last_of(string_view(s, count), pos);
   }
 
   // Overload of `string_view::find_last_of()` for finding a different C-style
   // string `s` within the `string_view`.
-  size_type find_last_of(absl::Nonnull<const char*> s,
+  size_type find_last_of(const char* absl_nonnull s,
                          size_type pos = npos) const {
     return find_last_of(string_view(s), pos);
   }
@@ -565,14 +564,14 @@
 
   // Overload of `string_view::find_first_not_of()` for finding a substring of a
   // different C-style string `s` within the `string_view`.
-  size_type find_first_not_of(absl::Nonnull<const char*> s, size_type pos,
+  size_type find_first_not_of(const char* absl_nonnull s, size_type pos,
                               size_type count) const {
     return find_first_not_of(string_view(s, count), pos);
   }
 
   // Overload of `string_view::find_first_not_of()` for finding a different
   // C-style string `s` within the `string_view`.
-  size_type find_first_not_of(absl::Nonnull<const char*> s,
+  size_type find_first_not_of(const char* absl_nonnull s,
                               size_type pos = 0) const {
     return find_first_not_of(string_view(s), pos);
   }
@@ -591,14 +590,14 @@
 
   // Overload of `string_view::find_last_not_of()` for finding a substring of a
   // different C-style string `s` within the `string_view`.
-  size_type find_last_not_of(absl::Nonnull<const char*> s, size_type pos,
+  size_type find_last_not_of(const char* absl_nonnull s, size_type pos,
                              size_type count) const {
     return find_last_not_of(string_view(s, count), pos);
   }
 
   // Overload of `string_view::find_last_not_of()` for finding a different
   // C-style string `s` within the `string_view`.
-  size_type find_last_not_of(absl::Nonnull<const char*> s,
+  size_type find_last_not_of(const char* absl_nonnull s,
                              size_type pos = npos) const {
     return find_last_not_of(string_view(s), pos);
   }
@@ -659,7 +658,7 @@
   // The constructor from std::string delegates to this constructor.
   // See the comment on that constructor for the rationale.
   struct SkipCheckLengthTag {};
-  string_view(absl::Nullable<const char*> data, size_type len,
+  string_view(const char* absl_nullable data, size_type len,
               SkipCheckLengthTag) noexcept
       : ptr_(data), length_(len) {}
 
@@ -671,7 +670,7 @@
     return len;
   }
 
-  static constexpr size_type StrlenInternal(absl::Nonnull<const char*> str) {
+  static constexpr size_type StrlenInternal(const char* absl_nonnull str) {
 #if defined(_MSC_VER) && !defined(__clang__)
     // MSVC 2017+ can evaluate this at compile-time.
     const char* begin = str;
@@ -696,7 +695,7 @@
                                : (compare_result < 0 ? -1 : 1);
   }
 
-  absl::Nullable<const char*> ptr_;
+  const char* absl_nullable ptr_;
   size_type length_;
 };
 
@@ -757,7 +756,7 @@
 // Creates an `absl::string_view` from a pointer `p` even if it's null-valued.
 // This function should be used where an `absl::string_view` can be created from
 // a possibly-null pointer.
-constexpr string_view NullSafeStringView(absl::Nullable<const char*> p) {
+constexpr string_view NullSafeStringView(const char* absl_nullable p) {
   return p ? string_view(p) : string_view();
 }
 
diff --git a/absl/strings/strip.h b/absl/strings/strip.h
index 2d5faf6..55398ff 100644
--- a/absl/strings/strip.h
+++ b/absl/strings/strip.h
@@ -45,7 +45,7 @@
 //   absl::string_view input("abc");
 //   EXPECT_TRUE(absl::ConsumePrefix(&input, "a"));
 //   EXPECT_EQ(input, "bc");
-inline constexpr bool ConsumePrefix(absl::Nonnull<absl::string_view*> str,
+inline constexpr bool ConsumePrefix(absl::string_view* absl_nonnull str,
                                     absl::string_view expected) {
   if (!absl::StartsWith(*str, expected)) return false;
   str->remove_prefix(expected.size());
@@ -62,7 +62,7 @@
 //   absl::string_view input("abcdef");
 //   EXPECT_TRUE(absl::ConsumeSuffix(&input, "def"));
 //   EXPECT_EQ(input, "abc");
-inline constexpr bool ConsumeSuffix(absl::Nonnull<absl::string_view*> str,
+inline constexpr bool ConsumeSuffix(absl::string_view* absl_nonnull str,
                                     absl::string_view expected) {
   if (!absl::EndsWith(*str, expected)) return false;
   str->remove_suffix(expected.size());
diff --git a/absl/strings/substitute.cc b/absl/strings/substitute.cc
index a71f565..3c2ca5d 100644
--- a/absl/strings/substitute.cc
+++ b/absl/strings/substitute.cc
@@ -35,9 +35,10 @@
 ABSL_NAMESPACE_BEGIN
 namespace substitute_internal {
 
-void SubstituteAndAppendArray(
-    absl::Nonnull<std::string*> output, absl::string_view format,
-    absl::Nullable<const absl::string_view*> args_array, size_t num_args) {
+void SubstituteAndAppendArray(std::string* absl_nonnull output,
+                              absl::string_view format,
+                              const absl::string_view* absl_nullable args_array,
+                              size_t num_args) {
   // Determine total size needed.
   size_t size = 0;
   for (size_t i = 0; i < format.size(); i++) {
@@ -109,7 +110,7 @@
   assert(target == output->data() + output->size());
 }
 
-Arg::Arg(absl::Nullable<const void*> value) {
+Arg::Arg(const void* absl_nullable value) {
   static_assert(sizeof(scratch_) >= sizeof(value) * 2 + 2,
                 "fix sizeof(scratch_)");
   if (value == nullptr) {
diff --git a/absl/strings/substitute.h b/absl/strings/substitute.h
index e9c4b6e..08f64e9 100644
--- a/absl/strings/substitute.h
+++ b/absl/strings/substitute.h
@@ -106,7 +106,7 @@
   // Overloads for string-y things
   //
   // Explicitly overload `const char*` so the compiler doesn't cast to `bool`.
-  Arg(absl::Nullable<const char*> value)  // NOLINT(google-explicit-constructor)
+  Arg(const char* absl_nullable value)  // NOLINT(google-explicit-constructor)
       : piece_(absl::NullSafeStringView(value)) {}
   template <typename Allocator>
   Arg(  // NOLINT
@@ -199,7 +199,7 @@
   // `void*` values, with the exception of `char*`, are printed as
   // "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
   Arg(  // NOLINT(google-explicit-constructor)
-      absl::Nullable<const void*> value);
+      const void* absl_nullable value);
 
   // Normal enums are already handled by the integer formatters.
   // This overload matches only scoped enums.
@@ -222,12 +222,13 @@
 
 // Internal helper function. Don't call this from outside this implementation.
 // This interface may change without notice.
-void SubstituteAndAppendArray(
-    absl::Nonnull<std::string*> output, absl::string_view format,
-    absl::Nullable<const absl::string_view*> args_array, size_t num_args);
+void SubstituteAndAppendArray(std::string* absl_nonnull output,
+                              absl::string_view format,
+                              const absl::string_view* absl_nullable args_array,
+                              size_t num_args);
 
 #if defined(ABSL_BAD_CALL_IF)
-constexpr int CalculateOneBit(absl::Nonnull<const char*> format) {
+constexpr int CalculateOneBit(const char* absl_nonnull format) {
   // Returns:
   // * 2^N for '$N' when N is in [0-9]
   // * 0 for correct '$' escaping: '$$'.
@@ -236,11 +237,11 @@
                                           : (1 << (*format - '0'));
 }
 
-constexpr const char* SkipNumber(absl::Nonnull<const char*> format) {
+constexpr const char* SkipNumber(const char* absl_nonnull format) {
   return !*format ? format : (format + 1);
 }
 
-constexpr int PlaceholderBitmask(absl::Nonnull<const char*> format) {
+constexpr int PlaceholderBitmask(const char* absl_nonnull format) {
   return !*format
              ? 0
              : *format != '$' ? PlaceholderBitmask(format + 1)
@@ -273,12 +274,12 @@
 //    absl::SubstituteAndAppend(boilerplate, format, args...);
 //  }
 //
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format) {
   substitute_internal::SubstituteAndAppendArray(output, format, nullptr, 0);
 }
 
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format,
                                 const substitute_internal::Arg& a0) {
   const absl::string_view args[] = {a0.piece()};
@@ -286,7 +287,7 @@
                                                 ABSL_ARRAYSIZE(args));
 }
 
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format,
                                 const substitute_internal::Arg& a0,
                                 const substitute_internal::Arg& a1) {
@@ -295,7 +296,7 @@
                                                 ABSL_ARRAYSIZE(args));
 }
 
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format,
                                 const substitute_internal::Arg& a0,
                                 const substitute_internal::Arg& a1,
@@ -305,7 +306,7 @@
                                                 ABSL_ARRAYSIZE(args));
 }
 
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format,
                                 const substitute_internal::Arg& a0,
                                 const substitute_internal::Arg& a1,
@@ -317,7 +318,7 @@
                                                 ABSL_ARRAYSIZE(args));
 }
 
-inline void SubstituteAndAppend(absl::Nonnull<std::string*> output,
+inline void SubstituteAndAppend(std::string* absl_nonnull output,
                                 absl::string_view format,
                                 const substitute_internal::Arg& a0,
                                 const substitute_internal::Arg& a1,
@@ -331,7 +332,7 @@
 }
 
 inline void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::string_view format,
+    std::string* absl_nonnull output, absl::string_view format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5) {
@@ -342,7 +343,7 @@
 }
 
 inline void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::string_view format,
+    std::string* absl_nonnull output, absl::string_view format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -355,7 +356,7 @@
 }
 
 inline void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::string_view format,
+    std::string* absl_nonnull output, absl::string_view format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -368,7 +369,7 @@
 }
 
 inline void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::string_view format,
+    std::string* absl_nonnull output, absl::string_view format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -382,7 +383,7 @@
 }
 
 inline void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::string_view format,
+    std::string* absl_nonnull output, absl::string_view format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -398,16 +399,16 @@
 #if defined(ABSL_BAD_CALL_IF)
 // This body of functions catches cases where the number of placeholders
 // doesn't match the number of data arguments.
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format)
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format)
     ABSL_BAD_CALL_IF(
         substitute_internal::PlaceholderBitmask(format) != 0,
         "There were no substitution arguments "
         "but this format string either has a $[0-9] in it or contains "
         "an unescaped $ character (use $$ instead)");
 
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format,
                          const substitute_internal::Arg& a0)
     ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1,
                      "There was 1 substitution argument given, but "
@@ -415,8 +416,8 @@
                      "one of $1-$9, or contains an unescaped $ character (use "
                      "$$ instead)");
 
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1)
     ABSL_BAD_CALL_IF(
@@ -425,8 +426,8 @@
         "missing its $0/$1, contains one of $2-$9, or contains an "
         "unescaped $ character (use $$ instead)");
 
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1,
                          const substitute_internal::Arg& a2)
@@ -436,8 +437,8 @@
         "this format string is missing its $0/$1/$2, contains one of "
         "$3-$9, or contains an unescaped $ character (use $$ instead)");
 
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1,
                          const substitute_internal::Arg& a2,
@@ -448,8 +449,8 @@
         "this format string is missing its $0-$3, contains one of "
         "$4-$9, or contains an unescaped $ character (use $$ instead)");
 
-void SubstituteAndAppend(absl::Nonnull<std::string*> output,
-                         absl::Nonnull<const char*> format,
+void SubstituteAndAppend(std::string* absl_nonnull output,
+                         const char* absl_nonnull format,
                          const substitute_internal::Arg& a0,
                          const substitute_internal::Arg& a1,
                          const substitute_internal::Arg& a2,
@@ -462,7 +463,7 @@
         "$5-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+    std::string* absl_nonnull output, const char* absl_nonnull format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5)
@@ -473,7 +474,7 @@
         "$6-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+    std::string* absl_nonnull output, const char* absl_nonnull format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -485,7 +486,7 @@
         "$7-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+    std::string* absl_nonnull output, const char* absl_nonnull format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -497,7 +498,7 @@
         "$8-$9, or contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+    std::string* absl_nonnull output, const char* absl_nonnull format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -510,7 +511,7 @@
         "contains an unescaped $ character (use $$ instead)");
 
 void SubstituteAndAppend(
-    absl::Nonnull<std::string*> output, absl::Nonnull<const char*> format,
+    std::string* absl_nonnull output, const char* absl_nonnull format,
     const substitute_internal::Arg& a0, const substitute_internal::Arg& a1,
     const substitute_internal::Arg& a2, const substitute_internal::Arg& a3,
     const substitute_internal::Arg& a4, const substitute_internal::Arg& a5,
@@ -643,13 +644,13 @@
 #if defined(ABSL_BAD_CALL_IF)
 // This body of functions catches cases where the number of placeholders
 // doesn't match the number of data arguments.
-std::string Substitute(absl::Nonnull<const char*> format)
+std::string Substitute(const char* absl_nonnull format)
     ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0,
                      "There were no substitution arguments "
                      "but this format string either has a $[0-9] in it or "
                      "contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0)
     ABSL_BAD_CALL_IF(
         substitute_internal::PlaceholderBitmask(format) != 1,
@@ -657,7 +658,7 @@
         "this format string is missing its $0, contains one of $1-$9, "
         "or contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1)
     ABSL_BAD_CALL_IF(
@@ -666,7 +667,7 @@
         "this format string is missing its $0/$1, contains one of "
         "$2-$9, or contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2)
@@ -676,7 +677,7 @@
         "this format string is missing its $0/$1/$2, contains one of "
         "$3-$9, or contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2,
@@ -687,7 +688,7 @@
         "this format string is missing its $0-$3, contains one of "
         "$4-$9, or contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2,
@@ -699,7 +700,7 @@
         "this format string is missing its $0-$4, contains one of "
         "$5-$9, or contains an unescaped $ character (use $$ instead)");
 
-std::string Substitute(absl::Nonnull<const char*> format,
+std::string Substitute(const char* absl_nonnull format,
                        const substitute_internal::Arg& a0,
                        const substitute_internal::Arg& a1,
                        const substitute_internal::Arg& a2,
@@ -713,7 +714,7 @@
         "$6-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
-    absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+    const char* absl_nonnull format, const substitute_internal::Arg& a0,
     const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6)
@@ -724,7 +725,7 @@
         "$7-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
-    absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+    const char* absl_nonnull format, const substitute_internal::Arg& a0,
     const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
@@ -736,7 +737,7 @@
         "$8-$9, or contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
-    absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+    const char* absl_nonnull format, const substitute_internal::Arg& a0,
     const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
@@ -748,7 +749,7 @@
         "contains an unescaped $ character (use $$ instead)");
 
 std::string Substitute(
-    absl::Nonnull<const char*> format, const substitute_internal::Arg& a0,
+    const char* absl_nonnull format, const substitute_internal::Arg& a0,
     const substitute_internal::Arg& a1, const substitute_internal::Arg& a2,
     const substitute_internal::Arg& a3, const substitute_internal::Arg& a4,
     const substitute_internal::Arg& a5, const substitute_internal::Arg& a6,
diff --git a/absl/types/span.h b/absl/types/span.h
index 33904a9..9d16496 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -724,12 +724,12 @@
 //   }
 //
 template <int&... ExplicitArgumentBarrier, typename T>
-constexpr Span<T> MakeSpan(absl::Nullable<T*> ptr, size_t size) noexcept {
+constexpr Span<T> MakeSpan(T* absl_nullable ptr, size_t size) noexcept {
   return Span<T>(ptr, size);
 }
 
 template <int&... ExplicitArgumentBarrier, typename T>
-Span<T> MakeSpan(absl::Nullable<T*> begin, absl::Nullable<T*> end) noexcept {
+Span<T> MakeSpan(T* absl_nullable begin, T* absl_nullable end) noexcept {
   ABSL_HARDENING_ASSERT(begin <= end);
   return Span<T>(begin, static_cast<size_t>(end - begin));
 }
@@ -770,14 +770,14 @@
 //   ProcessInts(absl::MakeConstSpan(std::vector<int>{ 0, 0, 0 }));
 //
 template <int&... ExplicitArgumentBarrier, typename T>
-constexpr Span<const T> MakeConstSpan(absl::Nullable<T*> ptr,
+constexpr Span<const T> MakeConstSpan(T* absl_nullable ptr,
                                       size_t size) noexcept {
   return Span<const T>(ptr, size);
 }
 
 template <int&... ExplicitArgumentBarrier, typename T>
-Span<const T> MakeConstSpan(absl::Nullable<T*> begin,
-                            absl::Nullable<T*> end) noexcept {
+Span<const T> MakeConstSpan(T* absl_nullable begin,
+                            T* absl_nullable end) noexcept {
   ABSL_HARDENING_ASSERT(begin <= end);
   return Span<const T>(begin, end - begin);
 }