diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index b64f133..f8dea45 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -26,6 +26,7 @@
   "base/internal/low_level_alloc.cc"
   "base/internal/low_level_alloc.h"
   "base/internal/low_level_scheduling.h"
+  "base/internal/nullability_impl.h"
   "base/internal/per_thread_tls.h"
   "base/internal/prefetch.h"
   "base/prefetch.h"
@@ -56,6 +57,7 @@
   "base/log_severity.cc"
   "base/log_severity.h"
   "base/macros.h"
+  "base/nullability.h"
   "base/optimization.h"
   "base/options.h"
   "base/policy_checks.h"
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 28cbf28..fb008db 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -63,6 +63,18 @@
 )
 
 cc_library(
+    name = "nullability",
+    srcs = ["internal/nullability_impl.h"],
+    hdrs = ["nullability.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":core_headers",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_library(
     name = "raw_logging_internal",
     srcs = ["internal/raw_logging.cc"],
     hdrs = ["internal/raw_logging.h"],
@@ -553,6 +565,16 @@
 )
 
 cc_test(
+    name = "nullability_test",
+    srcs = ["nullability_test.cc"],
+    deps = [
+        ":core_headers",
+        ":nullability",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
     name = "raw_logging_test",
     srcs = ["raw_logging_test.cc"],
     copts = ABSL_TEST_COPTS,
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index 71b9379..76c4ff1 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -54,6 +54,33 @@
     ${ABSL_DEFAULT_COPTS}
 )
 
+absl_cc_library(
+  NAME
+    nullability
+  HDRS
+    "nullability.h"
+  SRCS
+    "internal/nullability_impl.h"
+  DEPS
+    absl::core_headers
+    absl::type_traits
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_test(
+  NAME
+    nullability_test
+  SRCS
+    "nullability_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::core_headers
+    absl::nullability
+    GTest::gtest_main
+)
+
 # Internal-only target, do not depend on directly.
 absl_cc_library(
   NAME
diff --git a/absl/base/internal/nullability_impl.h b/absl/base/internal/nullability_impl.h
new file mode 100644
index 0000000..74f4a41
--- /dev/null
+++ b/absl/base/internal/nullability_impl.h
@@ -0,0 +1,106 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+
+#include <memory>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+
+namespace nullability_internal {
+
+// `IsNullabilityCompatible` checks whether its first argument is a class
+// explicitly tagged as supporting nullability annotations. The tag is the type
+// declaration `absl_nullability_compatible`.
+template <typename, typename = void>
+struct IsNullabilityCompatible : std::false_type {};
+
+template <typename T>
+struct IsNullabilityCompatible<
+    T, absl::void_t<typename T::absl_nullability_compatible>> : std::true_type {
+};
+
+template <typename T>
+constexpr bool IsSupportedType = IsNullabilityCompatible<T>::value;
+
+template <typename T>
+constexpr bool IsSupportedType<T*> = true;
+
+template <typename T, typename U>
+constexpr bool IsSupportedType<T U::*> = true;
+
+template <typename T, typename... Deleter>
+constexpr bool IsSupportedType<std::unique_ptr<T, Deleter...>> = true;
+
+template <typename T>
+constexpr bool IsSupportedType<std::shared_ptr<T>> = true;
+
+template <typename T>
+struct EnableNullable {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNonNull {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNullabilityUnknown {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+// Note: we do not apply Clang nullability attributes (e.g. _Nullable).  These
+// only support raw pointers, and conditionally enabling them only for raw
+// pointers inhibits template arg deduction.  Ideally, they would support all
+// pointer-like types.
+template <typename T, typename = typename EnableNullable<T>::type>
+using NullableImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullable")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNonNull<T>::type>
+using NonNullImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nonnull")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNullabilityUnknown<T>::type>
+using NullabilityUnknownImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullability_Unspecified")]]
+#endif
+    = T;
+
+}  // namespace nullability_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
index 79853f0..0eeb7d0 100644
--- a/absl/base/internal/thread_identity.cc
+++ b/absl/base/internal/thread_identity.cc
@@ -58,18 +58,19 @@
 // that protected visibility is unsupported.
 ABSL_CONST_INIT  // Must come before __attribute__((visibility("protected")))
 #if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
-__attribute__((visibility("protected")))
+    __attribute__((visibility("protected")))
 #endif  // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
 #if ABSL_PER_THREAD_TLS
-// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
-ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+    // Prefer __thread to thread_local as benchmarks indicate it is a bit
+    // faster.
+    ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
 #elif defined(ABSL_HAVE_THREAD_LOCAL)
-thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+    thread_local ThreadIdentity* thread_identity_ptr = nullptr;
 #endif  // ABSL_PER_THREAD_TLS
 #endif  // TLS or CPP11
 
-void SetCurrentThreadIdentity(
-    ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer) {
   assert(CurrentThreadIdentityIfPresent() == nullptr);
   // Associate our destructor.
   // NOTE: This call to pthread_setspecific is currently the only immovable
@@ -134,7 +135,7 @@
     ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
   thread_identity_ptr = nullptr;
 #elif ABSL_THREAD_IDENTITY_MODE == \
-      ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
   // pthread_setspecific expected to clear value on destruction
   assert(CurrentThreadIdentityIfPresent() == nullptr);
 #endif
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h
index 496ec21..b6e917c 100644
--- a/absl/base/internal/thread_identity.h
+++ b/absl/base/internal/thread_identity.h
@@ -62,8 +62,8 @@
     return reinterpret_cast<ThreadIdentity*>(this);
   }
 
-  PerThreadSynch *next;  // Circular waiter queue; initialized to 0.
-  PerThreadSynch *skip;  // If non-zero, all entries in Mutex queue
+  PerThreadSynch* next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch* skip;  // If non-zero, all entries in Mutex queue
                          // up to and including "skip" have same
                          // condition as this, and will be woken later
   bool may_skip;         // if false while on mutex queue, a mutex unlocker
@@ -104,10 +104,7 @@
   //
   // Transitions from kAvailable to kQueued require no barrier, they
   // are externally ordered by the Mutex.
-  enum State {
-    kAvailable,
-    kQueued
-  };
+  enum State { kAvailable, kQueued };
   std::atomic<State> state;
 
   // The wait parameters of the current wait.  waitp is null if the
@@ -122,14 +119,14 @@
   // pointer unchanged.
   SynchWaitParams* waitp;
 
-  intptr_t readers;     // Number of readers in mutex.
+  intptr_t readers;  // Number of readers in mutex.
 
   // When priority will next be read (cycles).
   int64_t next_priority_read_cycles;
 
   // Locks held; used during deadlock detection.
   // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
-  SynchLocksHeld *all_locks;
+  SynchLocksHeld* all_locks;
 };
 
 // The instances of this class are allocated in NewThreadIdentity() with an
@@ -220,7 +217,7 @@
 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
 #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) &&        \
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
     (__GOOGLE_GRTE_VERSION__ >= 20140228L)
 // Support for async-safe TLS was specifically added in GRTEv4.  It's not
 // present in the upstream eglibc.
diff --git a/absl/base/nullability.h b/absl/base/nullability.h
new file mode 100644
index 0000000..42525dd
--- /dev/null
+++ b/absl/base/nullability.h
@@ -0,0 +1,224 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: nullability.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of "templated annotations" for designating the
+// expected nullability of pointers. These annotations allow you to designate
+// pointers in one of three classification states:
+//
+//  * "Non-null" (for pointers annotated `NonNull<T>`), indicating that it is
+//    invalid for the given pointer to ever be null.
+//  * "Nullable" (for pointers annotated `Nullable<T>`), indicating that it is
+//    valid for the given pointer to be null.
+//  * "Unknown" (for pointers annotated `NullabilityUnknown<T>`), indicating
+//    that the given pointer has not been yet classified as either nullable or
+//    non-null. This is the default state of unannotated pointers.
+//
+// NOTE: unannotated pointers implicitly bear the annotation
+// `NullabilityUnknown<T>`; you should rarely, if ever, see this annotation used
+// in the codebase explicitly.
+//
+// -----------------------------------------------------------------------------
+// Nullability and Contracts
+// -----------------------------------------------------------------------------
+//
+// These nullability annotations allow you to more clearly specify contracts on
+// software components by narrowing the *preconditions*, *postconditions*, and
+// *invariants* of pointer state(s) in any given interface. It then depends on
+// context who is responsible for fulfilling the annotation's requirements.
+//
+// For example, a function may receive a pointer argument. Designating that
+// pointer argument as "non-null" tightens the precondition of the contract of
+// that function. It is then the responsibility of anyone calling such a
+// function to ensure that the passed pointer is not null.
+//
+// Similarly, a function may have a pointer as a return value. Designating that
+// return value as "non-null" tightens the postcondition of the contract of that
+// function. In this case, however, it is the responsibility of the function
+// itself to ensure that the returned pointer is not null.
+//
+// Clearly defining these contracts allows providers (and consumers) of such
+// pointers to have more confidence in their null state. If a function declares
+// a return value as "non-null", for example, the caller should not need to
+// check whether the returned value is `nullptr`; it can simply assume the
+// pointer is valid.
+//
+// Of course most interfaces already have expectations on the nullability state
+// of pointers, and these expectations are, in effect, a contract; often,
+// however, those contracts are either poorly or partially specified, assumed,
+// or misunderstood. These nullability annotations are designed to allow you to
+// formalize those contracts within the codebase.
+//
+// -----------------------------------------------------------------------------
+// Using Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// It is important to note that these annotations are not distinct strong
+// *types*. They are alias templates defined to be equal to the underlying
+// pointer type. A pointer annotated `NonNull<T*>`, for example, is simply a
+// pointer of type `T*`. Each annotation acts as a form of documentation about
+// the contract for the given pointer. Each annotation requires providers or
+// consumers of these pointers across API boundaries to take appropriate steps
+// when setting or using these pointers:
+//
+// * "Non-null" pointers should never be null. It is the responsibility of the
+//   provider of this pointer to ensure that the pointer may never be set to
+//   null. Consumers of such pointers can treat such pointers as non-null.
+// * "Nullable" pointers may or may not be null. Consumers of such pointers
+//   should precede any usage of that pointer (e.g. a dereference operation)
+//   with a a `nullptr` check.
+// * "Unknown" pointers may be either "non-null" or "nullable" but have not been
+//   definitively determined to be in either classification state. Providers of
+//   such pointers across API boundaries should determine --  over time -- to
+//   annotate the pointer in either of the above two states. Consumers of such
+//   pointers across an API boundary should continue to treat such pointers as
+//   they currently do.
+//
+// Example:
+//
+// // PaySalary() requires the passed pointer to an `Employee` to be non-null.
+// void PaySalary(absl::NonNull<Employee *> e) {
+//   pay(e->salary);  // OK to dereference
+// }
+//
+// // CompleteTransaction() guarantees the returned pointer to an `Account` to
+// // be non-null.
+// absl::NonNull<Account *> balance CompleteTransaction(double fee) {
+// ...
+// }
+//
+// // Note that specifying a nullability annotation does not prevent someone
+// // from violating the contract:
+//
+// Nullable<Employee *> find(Map& employees, std::string_view name);
+//
+// void g(Map& employees) {
+//   Employee *e = find(employees, "Pat");
+//   // `e` can now be null.
+//   PaySalary(e); // Violates contract, but compiles!
+// }
+//
+// Nullability annotations, in other words, are useful for defining and
+// narrowing contracts; *enforcement* of those contracts depends on use and any
+// additional (static or dynamic analysis) tooling.
+//
+// NOTE: The "unknown" annotation state indicates that a pointer's contract has
+// not yet been positively identified. The unknown state therefore acts as a
+// form of documentation of your technical debt, and a codebase that adopts
+// nullability annotations should aspire to annotate every pointer as either
+// "non-null" or "nullable".
+//
+// -----------------------------------------------------------------------------
+// Applicability of Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// By default, nullability annotations are applicable to raw and smart
+// pointers. User-defined types can indicate compatibility with nullability
+// annotations by providing an `absl_nullability_compatible` nested type. The
+// actual definition of this inner type is not relevant as it is used merely as
+// a marker. It is common to use a using declaration of
+// `absl_nullability_compatible` set to void.
+//
+// // Example:
+// struct MyPtr {
+//   using absl_nullability_compatible = void;
+//   ...
+// };
+//
+// DISCLAIMER:
+// ===========================================================================
+// These nullability annotations are primarily a human readable signal about the
+// intended contract of the pointer. They are not *types* and do not currently
+// provide any correctness guarantees. For example, a pointer annotated as
+// `NonNull<T*>` is *not guaranteed* to be non-null, and the compiler won't
+// alert or prevent assignment of a `Nullable<T*>` to a `NonNull<T*>`.
+// ===========================================================================
+#ifndef ABSL_BASE_NULLABILITY_H_
+#define ABSL_BASE_NULLABILITY_H_
+
+#include "absl/base/internal/nullability_impl.h"
+
+namespace absl {
+
+// absl::NonNull
+//
+// The indicated pointer is never null. It is the responsibility of the provider
+// of this pointer across an API boundary to ensure that the pointer is never be
+// set to null. Consumers of this pointer across an API boundary may safely
+// dereference the pointer.
+//
+// Example:
+//
+// // `employee` is designated as not null.
+// void PaySalary(absl::NotNull<Employee *> employee) {
+//   pay(*employee);  // OK to dereference
+// }
+template <typename T>
+using NonNull = nullability_internal::NonNullImpl<T>;
+
+// absl::Nullable
+//
+// The indicated pointer may, by design, be either null or non-null. Consumers
+// of this pointer across an API boundary should perform a `nullptr` check
+// before performing any operation using the pointer.
+//
+// Example:
+//
+// // `employee` may  be null.
+// void PaySalary(absl::Nullable<Employee *> employee) {
+//   if (employee != nullptr) {
+//     Pay(*employee);  // OK to dereference
+//   }
+// }
+template <typename T>
+using Nullable = nullability_internal::NullableImpl<T>;
+
+// absl::NullabilityUnknown (default)
+//
+// The indicated pointer has not yet been determined to be definitively
+// "non-null" or "nullable." Providers of such pointers across API boundaries
+// should, over time, annotate such pointers as either "non-null" or "nullable."
+// Consumers of these pointers across an API boundary should treat such pointers
+// with the same caution they treat currently unannotated pointers. Most
+// existing code will have "unknown"  pointers, which should eventually be
+// migrated into one of the above two nullability states: `NonNull<T>` or
+//  `Nullable<T>`.
+//
+// NOTE: Because this annotation is the global default state, pointers without
+// any annotation are assumed to have "unknown" semantics. This assumption is
+// designed to minimize churn and reduce clutter within the codebase.
+//
+// Example:
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(absl::NullabilityUnknown<Employee *> employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+//
+// Note that a pointer without an annotation, by default, is assumed to have the
+// annotation `NullabilityUnknown`.
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(Employee* employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+template <typename T>
+using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_NULLABILITY_H_
diff --git a/absl/base/nullability_test.cc b/absl/base/nullability_test.cc
new file mode 100644
index 0000000..6edd7cd
--- /dev/null
+++ b/absl/base/nullability_test.cc
@@ -0,0 +1,129 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/nullability.h"
+
+#include <cassert>
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+
+namespace {
+using ::absl::NonNull;
+using ::absl::NullabilityUnknown;
+using ::absl::Nullable;
+
+void funcWithNonnullArg(NonNull<int*> /*arg*/) {}
+template <typename T>
+void funcWithDeducedNonnullArg(NonNull<T*> /*arg*/) {}
+
+TEST(NonNullTest, NonNullArgument) {
+  int var = 0;
+  funcWithNonnullArg(&var);
+  funcWithDeducedNonnullArg(&var);
+}
+
+NonNull<int*> funcWithNonnullReturn() {
+  static int var = 0;
+  return &var;
+}
+
+TEST(NonNullTest, NonNullReturn) {
+  auto var = funcWithNonnullReturn();
+  (void)var;
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToInt) {
+  EXPECT_TRUE((std::is_same<NonNull<int*>, int*>::value));
+  EXPECT_TRUE((std::is_same<Nullable<int*>, int*>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<int*>, int*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToVoid) {
+  EXPECT_TRUE((std::is_same<NonNull<void*>, void*>::value));
+  EXPECT_TRUE((std::is_same<Nullable<void*>, void*>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<void*>, void*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughUniquePointerToInt) {
+  using T = std::unique_ptr<int>;
+  EXPECT_TRUE((std::is_same<NonNull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToInt) {
+  using T = std::shared_ptr<int>;
+  EXPECT_TRUE((std::is_same<NonNull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToVoid) {
+  using T = std::shared_ptr<void>;
+  EXPECT_TRUE((std::is_same<NonNull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberObject) {
+  using T = decltype(&std::pair<int, int>::first);
+  EXPECT_TRUE((std::is_same<NonNull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberFunction) {
+  using T = decltype(&std::unique_ptr<int>::reset);
+  EXPECT_TRUE((std::is_same<NonNull<T>, T>::value));
+  EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+  EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+}  // namespace
+
+// Nullable ADL lookup test
+namespace util {
+// Helper for NullableAdlTest.  Returns true, denoting that argument-dependent
+// lookup found this implementation of DidAdlWin.  Must be in namespace
+// util itself, not a nested anonymous namespace.
+template <typename T>
+bool DidAdlWin(T*) {
+  return true;
+}
+
+// Because this type is defined in namespace util, an unqualified call to
+// DidAdlWin with a pointer to MakeAdlWin will find the above implementation.
+struct MakeAdlWin {};
+}  // namespace util
+
+namespace {
+// Returns false, denoting that ADL did not inspect namespace util.  If it
+// had, the better match (T*) above would have won out over the (...) here.
+bool DidAdlWin(...) { return false; }
+
+TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) {
+  // Treatment: util::Nullable<int*> contributes nothing to ADL because
+  // int* itself doesn't.
+  EXPECT_FALSE(DidAdlWin((int*)nullptr));
+  EXPECT_FALSE(DidAdlWin((Nullable<int*>)nullptr));
+
+  // Control: Argument-dependent lookup does find the implementation in
+  // namespace util when the underlying pointee type resides there.
+  EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr));
+  EXPECT_TRUE(DidAdlWin((Nullable<util::MakeAdlWin*>)nullptr));
+}
+}  // namespace
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
index c8b8439..3f08716 100644
--- a/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -13,6 +13,7 @@
 #include <cassert>
 #include <cstdint>
 #include <iostream>
+#include <limits>
 
 #include "absl/base/attributes.h"
 #include "absl/debugging/internal/address_is_readable.h"
diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h
index 4f9ea01..cf71164 100644
--- a/absl/meta/type_traits.h
+++ b/absl/meta/type_traits.h
@@ -500,8 +500,12 @@
 // there.
 //
 // TODO(b/275003464): remove the opt-out once the bug is fixed.
-#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
-    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64)))
+//
+// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
+// work with NVCC either.
+#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) &&                 \
+    !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
+    !defined(__NVCC__)
 template <class T>
 struct is_trivially_relocatable
     : std::integral_constant<bool, __is_trivially_relocatable(T)> {};
diff --git a/absl/random/internal/randen_benchmarks.cc b/absl/random/internal/randen_benchmarks.cc
index f589172..ec086ce 100644
--- a/absl/random/internal/randen_benchmarks.cc
+++ b/absl/random/internal/randen_benchmarks.cc
@@ -47,8 +47,10 @@
 // Randen implementation benchmarks.
 template <typename T>
 struct AbsorbFn : public T {
-  mutable uint64_t state[kStateSizeT] = {};
-  mutable uint32_t seed[kSeedSizeT] = {};
+  // These are both cast to uint128* in the RandenHwAes implementation, so
+  // ensure they are 16 byte aligned.
+  alignas(16) mutable uint64_t state[kStateSizeT] = {};
+  alignas(16) mutable uint32_t seed[kSeedSizeT] = {};
 
   static constexpr size_t bytes() { return sizeof(seed); }
 
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc
index f1ddbbb..eacaa28 100644
--- a/absl/synchronization/internal/create_thread_identity.cc
+++ b/absl/synchronization/internal/create_thread_identity.cc
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 #include <stdint.h>
+
 #include <new>
 
 // This file is a no-op if the required LowLevelAlloc support is missing.
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index e2ee411..3aa5560 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -35,10 +35,9 @@
 
 #include <algorithm>
 #include <atomic>
-#include <cinttypes>
 #include <cstddef>
+#include <cstdlib>
 #include <cstring>
-#include <iterator>
 #include <thread>  // NOLINT(build/c++11)
 
 #include "absl/base/attributes.h"
@@ -55,7 +54,6 @@
 #include "absl/base/internal/thread_identity.h"
 #include "absl/base/internal/tsan_mutex_interface.h"
 #include "absl/base/optimization.h"
-#include "absl/base/port.h"
 #include "absl/debugging/stacktrace.h"
 #include "absl/debugging/symbolize.h"
 #include "absl/synchronization/internal/graphcycles.h"
@@ -63,6 +61,7 @@
 #include "absl/time/time.h"
 
 using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::CycleClock;
 using absl::base_internal::PerThreadSynch;
 using absl::base_internal::SchedulingGuard;
 using absl::base_internal::ThreadIdentity;
@@ -98,15 +97,15 @@
 absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
     submit_profile_data;
 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
-    const char *msg, const void *obj, int64_t wait_cycles)>
+    const char* msg, const void* obj, int64_t wait_cycles)>
     mutex_tracer;
 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
-    absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
-        cond_var_tracer;
+absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
+    cond_var_tracer;
 
 }  // namespace
 
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
                                           bool locking, bool trylock,
                                           bool read_lock);
 
@@ -114,12 +113,12 @@
   submit_profile_data.Store(fn);
 }
 
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
                                     int64_t wait_cycles)) {
   mutex_tracer.Store(fn);
 }
 
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
   cond_var_tracer.Store(fn);
 }
 
@@ -141,25 +140,24 @@
   return absl::Now() - before;
 }
 
-const MutexGlobals &GetMutexGlobals() {
+const MutexGlobals& GetMutexGlobals() {
   ABSL_CONST_INIT static MutexGlobals data;
   absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
-    const int num_cpus = absl::base_internal::NumCPUs();
-    data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
-    // If this a uniprocessor, only yield/sleep.
-    // Real-time threads are often unable to yield, so the sleep time needs
-    // to be long enough to keep the calling thread asleep until scheduling
-    // happens.
-    // If this is multiprocessor, allow spinning. If the mode is
-    // aggressive then spin many times before yielding.  If the mode is
-    // gentle then spin only a few times before yielding.  Aggressive spinning
-    // is used to ensure that an Unlock() call, which must get the spin lock
-    // for any thread to make progress gets it without undue delay.
-    if (num_cpus > 1) {
+    if (absl::base_internal::NumCPUs() > 1) {
+      // If this is multiprocessor, allow spinning. If the mode is
+      // aggressive then spin many times before yielding. If the mode is
+      // gentle then spin only a few times before yielding. Aggressive spinning
+      // is used to ensure that an Unlock() call, which must get the spin lock
+      // for any thread to make progress gets it without undue delay.
+      data.spinloop_iterations = 1500;
       data.mutex_sleep_spins[AGGRESSIVE] = 5000;
       data.mutex_sleep_spins[GENTLE] = 250;
       data.mutex_sleep_time = absl::Microseconds(10);
     } else {
+      // If this a uniprocessor, only yield/sleep. Real-time threads are often
+      // unable to yield, so the sleep time needs to be long enough to keep
+      // the calling thread asleep until scheduling happens.
+      data.spinloop_iterations = 0;
       data.mutex_sleep_spins[AGGRESSIVE] = 0;
       data.mutex_sleep_spins[GENTLE] = 0;
       data.mutex_sleep_time = MeasureTimeToYield() * 5;
@@ -212,8 +210,7 @@
     v = pv->load(std::memory_order_relaxed);
   } while ((v & bits) != bits &&
            ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v | bits,
-                                       std::memory_order_release,
+            !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
                                        std::memory_order_relaxed)));
 }
 
@@ -228,8 +225,7 @@
     v = pv->load(std::memory_order_relaxed);
   } while ((v & bits) != 0 &&
            ((v & wait_until_clear) != 0 ||
-            !pv->compare_exchange_weak(v, v & ~bits,
-                                       std::memory_order_release,
+            !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
                                        std::memory_order_relaxed)));
 }
 
@@ -240,7 +236,7 @@
     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
 
 // Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ABSL_CONST_INIT static GraphCycles* deadlock_graph
     ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
 
 //------------------------------------------------------------------
@@ -284,7 +280,7 @@
 // Properties of the events.
 static const struct {
   int flags;
-  const char *msg;
+  const char* msg;
 } event_properties[] = {
     {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
     {0, "TryLock failed "},
@@ -309,12 +305,12 @@
 // Can't be too small, as it's used for deadlock detection information.
 static constexpr uint32_t kNSynchEvent = 1031;
 
-static struct SynchEvent {     // this is a trivial hash table for the events
+static struct SynchEvent {  // this is a trivial hash table for the events
   // struct is freed when refcount reaches 0
   int refcount ABSL_GUARDED_BY(synch_event_mu);
 
   // buckets have linear, 0-terminated  chains
-  SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+  SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
 
   // Constant after initialization
   uintptr_t masked_addr;  // object at this address is called "name"
@@ -322,13 +318,13 @@
   // No explicit synchronization used.  Instead we assume that the
   // client who enables/disables invariants/logging on a Mutex does so
   // while the Mutex is not being concurrently accessed by others.
-  void (*invariant)(void *arg);  // called on each event
-  void *arg;            // first arg to (*invariant)()
-  bool log;             // logging turned on
+  void (*invariant)(void* arg);  // called on each event
+  void* arg;                     // first arg to (*invariant)()
+  bool log;                      // logging turned on
 
   // Constant after initialization
-  char name[1];         // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+  char name[1];  // actually longer---NUL-terminated string
+}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
 
 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
 // set "bits" in the word there (waiting until lockbit is clear before doing
@@ -337,11 +333,11 @@
 // the string name is copied into it.
 // When used with a mutex, the caller should also ensure that kMuEvent
 // is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
-                                    const char *name, intptr_t bits,
+static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
+                                    const char* name, intptr_t bits,
                                     intptr_t lockbit) {
   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
+  SynchEvent* e;
   // first look for existing SynchEvent struct..
   synch_event_mu.Lock();
   for (e = synch_event[h];
@@ -353,9 +349,9 @@
       name = "";
     }
     size_t l = strlen(name);
-    e = reinterpret_cast<SynchEvent *>(
+    e = reinterpret_cast<SynchEvent*>(
         base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
-    e->refcount = 2;    // one for return value, one for linked list
+    e->refcount = 2;  // one for return value, one for linked list
     e->masked_addr = base_internal::HidePtr(addr);
     e->invariant = nullptr;
     e->arg = nullptr;
@@ -365,19 +361,19 @@
     AtomicSetBits(addr, bits, lockbit);
     synch_event[h] = e;
   } else {
-    e->refcount++;      // for return value
+    e->refcount++;  // for return value
   }
   synch_event_mu.Unlock();
   return e;
 }
 
 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
+static void DeleteSynchEvent(SynchEvent* e) {
   base_internal::LowLevelAlloc::Free(e);
 }
 
 // Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
+static void UnrefSynchEvent(SynchEvent* e) {
   if (e != nullptr) {
     synch_event_mu.Lock();
     bool del = (--(e->refcount) == 0);
@@ -391,11 +387,11 @@
 // Forget the mapping from the object (Mutex or CondVar) at address addr
 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
 // is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
                              intptr_t lockbit) {
   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
-  SynchEvent **pe;
-  SynchEvent *e;
+  SynchEvent** pe;
+  SynchEvent* e;
   synch_event_mu.Lock();
   for (pe = &synch_event[h];
        (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -416,9 +412,9 @@
 // Return a refcounted reference to the SynchEvent of the object at address
 // "addr", if any.  The pointer returned is valid until the UnrefSynchEvent() is
 // called.
-static SynchEvent *GetSynchEvent(const void *addr) {
+static SynchEvent* GetSynchEvent(const void* addr) {
   uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
-  SynchEvent *e;
+  SynchEvent* e;
   synch_event_mu.Lock();
   for (e = synch_event[h];
        e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -433,17 +429,17 @@
 
 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
 // if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
-  SynchEvent *e = GetSynchEvent(obj);
+static void PostSynchEvent(void* obj, int ev) {
+  SynchEvent* e = GetSynchEvent(obj);
   // logging is on if event recording is on and either there's no event struct,
   // or it explicitly says to log
   if (e == nullptr || e->log) {
-    void *pcs[40];
+    void* pcs[40];
     int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
     // A buffer with enough space for the ASCII for all the PCs, even on a
     // 64-bit machine.
     char buffer[ABSL_ARRAYSIZE(pcs) * 24];
-    int pos = snprintf(buffer, sizeof (buffer), " @");
+    int pos = snprintf(buffer, sizeof(buffer), " @");
     for (int i = 0; i != n; i++) {
       int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
                        " %p", pcs[i]);
@@ -465,13 +461,13 @@
     // get false positive race reports later.
     // Reuse EvalConditionAnnotated to properly call into user code.
     struct local {
-      static bool pred(SynchEvent *ev) {
+      static bool pred(SynchEvent* ev) {
         (*ev->invariant)(ev->arg);
         return false;
       }
     };
     Condition cond(&local::pred, e);
-    Mutex *mu = static_cast<Mutex *>(obj);
+    Mutex* mu = static_cast<Mutex*>(obj);
     const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
     const bool trylock = (flags & SYNCH_F_TRY) != 0;
     const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -497,32 +493,32 @@
 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
 // the thread is on a Mutex's waiter queue.
 struct SynchWaitParams {
-  SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
-                  KernelTimeout timeout_arg, Mutex *cvmu_arg,
-                  PerThreadSynch *thread_arg,
-                  std::atomic<intptr_t> *cv_word_arg)
+  SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
+                  KernelTimeout timeout_arg, Mutex* cvmu_arg,
+                  PerThreadSynch* thread_arg,
+                  std::atomic<intptr_t>* cv_word_arg)
       : how(how_arg),
         cond(cond_arg),
         timeout(timeout_arg),
         cvmu(cvmu_arg),
         thread(thread_arg),
         cv_word(cv_word_arg),
-        contention_start_cycles(base_internal::CycleClock::Now()),
+        contention_start_cycles(CycleClock::Now()),
         should_submit_contention_data(false) {}
 
   const Mutex::MuHow how;  // How this thread needs to wait.
-  const Condition *cond;  // The condition that this thread is waiting for.
-                          // In Mutex, this field is set to zero if a timeout
-                          // expires.
+  const Condition* cond;   // The condition that this thread is waiting for.
+                           // In Mutex, this field is set to zero if a timeout
+                           // expires.
   KernelTimeout timeout;  // timeout expiry---absolute time
                           // In Mutex, this field is set to zero if a timeout
                           // expires.
-  Mutex *const cvmu;      // used for transfer from cond var to mutex
-  PerThreadSynch *const thread;  // thread that is waiting
+  Mutex* const cvmu;      // used for transfer from cond var to mutex
+  PerThreadSynch* const thread;  // thread that is waiting
 
   // If not null, thread should be enqueued on the CondVar whose state
   // word is cv_word instead of queueing normally on the Mutex.
-  std::atomic<intptr_t> *cv_word;
+  std::atomic<intptr_t>* cv_word;
 
   int64_t contention_start_cycles;  // Time (in cycles) when this thread started
                                     // to contend for the mutex.
@@ -530,12 +526,12 @@
 };
 
 struct SynchLocksHeld {
-  int n;              // number of valid entries in locks[]
-  bool overflow;      // true iff we overflowed the array at some point
+  int n;          // number of valid entries in locks[]
+  bool overflow;  // true iff we overflowed the array at some point
   struct {
-    Mutex *mu;        // lock acquired
-    int32_t count;      // times acquired
-    GraphId id;       // deadlock_graph id of acquired lock
+    Mutex* mu;      // lock acquired
+    int32_t count;  // times acquired
+    GraphId id;     // deadlock_graph id of acquired lock
   } locks[40];
   // If a thread overfills the array during deadlock detection, we
   // continue, discarding information as needed.  If no overflow has
@@ -545,11 +541,11 @@
 
 // A sentinel value in lists that is not 0.
 // A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
-  reinterpret_cast<PerThreadSynch *>(1);
+static PerThreadSynch* const kPerThreadSynchNull =
+    reinterpret_cast<PerThreadSynch*>(1);
 
-static SynchLocksHeld *LocksHeldAlloc() {
-  SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+static SynchLocksHeld* LocksHeldAlloc() {
+  SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
       base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
   ret->n = 0;
   ret->overflow = false;
@@ -557,24 +553,24 @@
 }
 
 // Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
-  ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+static PerThreadSynch* Synch_GetPerThread() {
+  ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
   return &identity->per_thread_synch;
 }
 
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
   }
-  PerThreadSynch *w = Synch_GetPerThread();
+  PerThreadSynch* w = Synch_GetPerThread();
   if (mu) {
     ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
   }
   return w;
 }
 
-static SynchLocksHeld *Synch_GetAllLocks() {
-  PerThreadSynch *s = Synch_GetPerThread();
+static SynchLocksHeld* Synch_GetAllLocks() {
+  PerThreadSynch* s = Synch_GetPerThread();
   if (s->all_locks == nullptr) {
     s->all_locks = LocksHeldAlloc();  // Freed by ReclaimThreadIdentity.
   }
@@ -582,7 +578,7 @@
 }
 
 // Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
     // We miss synchronization around passing PerThreadSynch between threads
@@ -598,7 +594,7 @@
 }
 
 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
   if (mu) {
     ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
   }
@@ -619,7 +615,7 @@
 // Mutex code checking that the "waitp" field has not been reused.
 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
   // Fix the per-thread state only if it exists.
-  ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
   if (identity != nullptr) {
     identity->per_thread_synch.suppress_fatal_errors = true;
   }
@@ -638,7 +634,7 @@
 //    bit-twiddling trick in Mutex::Unlock().
 //  o kMuWriter / kMuReader == kMuWrWait / kMuWait,
 //    to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader      = 0x0001L;  // a reader holds the lock
+static const intptr_t kMuReader = 0x0001L;  // a reader holds the lock
 // There's a designated waker.
 // INVARIANT1:  there's a thread that was blocked on the mutex, is
 // no longer, yet has not yet acquired the mutex.  If there's a
@@ -658,9 +654,9 @@
 // the reader will first queue itself and block, but then the last unlocking
 // reader will wake it.
 static const intptr_t kMuWrWait = 0x0020L;
-static const intptr_t kMuSpin        = 0x0040L;  // spinlock protects wait list
-static const intptr_t kMuLow         = 0x00ffL;  // mask all mutex bits
-static const intptr_t kMuHigh        = ~kMuLow;  // mask pointer/reader count
+static const intptr_t kMuSpin = 0x0040L;  // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL;   // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow;  // mask pointer/reader count
 
 // Hack to make constant values available to gdb pretty printer
 enum {
@@ -756,8 +752,8 @@
   ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
 }
 
-void Mutex::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+void Mutex::EnableDebugLog(const char* name) {
+  SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
   e->log = true;
   UnrefSynchEvent(e);
 }
@@ -766,11 +762,10 @@
   synch_check_invariants.store(enabled, std::memory_order_release);
 }
 
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
-                                     void *arg) {
+void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
   if (synch_check_invariants.load(std::memory_order_acquire) &&
       invariant != nullptr) {
-    SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+    SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
     e->invariant = invariant;
     e->arg = arg;
     UnrefSynchEvent(e);
@@ -786,15 +781,15 @@
 // waiters with the same condition, type of lock, and thread priority.
 //
 // Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
   return x->waitp->how == y->waitp->how && x->priority == y->priority &&
          Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
 }
 
 // Given the contents of a mutex word containing a PerThreadSynch pointer,
 // return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
-  return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
+  return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
 }
 
 // The next several routines maintain the per-thread next and skip fields
@@ -852,17 +847,17 @@
 // except those in the added node and the former "head" node.  This implies
 // that the new node is added after head, and so must be the new head or the
 // new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
-  PerThreadSynch *x0 = nullptr;
-  PerThreadSynch *x1 = x;
-  PerThreadSynch *x2 = x->skip;
+static PerThreadSynch* Skip(PerThreadSynch* x) {
+  PerThreadSynch* x0 = nullptr;
+  PerThreadSynch* x1 = x;
+  PerThreadSynch* x2 = x->skip;
   if (x2 != nullptr) {
     // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
     // such that   x1 == x0->skip && x2 == x1->skip
     while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
-      x0->skip = x2;      // short-circuit skip from x0 to x2
+      x0->skip = x2;  // short-circuit skip from x0 to x2
     }
-    x->skip = x1;         // short-circuit skip from x to result
+    x->skip = x1;  // short-circuit skip from x to result
   }
   return x1;
 }
@@ -871,7 +866,7 @@
 // The latter is going to be removed out of order, because of a timeout.
 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
 // and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
   if (ancestor->skip == to_be_removed) {  // ancestor->skip left dangling
     if (to_be_removed->skip != nullptr) {
       ancestor->skip = to_be_removed->skip;  // can skip past to_be_removed
@@ -883,7 +878,7 @@
   }
 }
 
-static void CondVarEnqueue(SynchWaitParams *waitp);
+static void CondVarEnqueue(SynchWaitParams* waitp);
 
 // Enqueue thread "waitp->thread" on a waiter queue.
 // Called with mutex spinlock held if head != nullptr
@@ -904,8 +899,8 @@
 // returned. This mechanism is used by CondVar to queue a thread on the
 // condition variable queue instead of the mutex queue in implementing Wait().
 // In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
-                               SynchWaitParams *waitp, intptr_t mu, int flags) {
+static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
+                               intptr_t mu, int flags) {
   // If we have been given a cv_word, call CondVarEnqueue() and return
   // the previous head of the Mutex waiter queue.
   if (waitp->cv_word != nullptr) {
@@ -913,23 +908,23 @@
     return head;
   }
 
-  PerThreadSynch *s = waitp->thread;
+  PerThreadSynch* s = waitp->thread;
   ABSL_RAW_CHECK(
       s->waitp == nullptr ||    // normal case
           s->waitp == waitp ||  // Fer()---transfer from condition variable
           s->suppress_fatal_errors,
       "detected illegal recursion into Mutex code");
   s->waitp = waitp;
-  s->skip = nullptr;             // maintain skip invariant (see above)
-  s->may_skip = true;            // always true on entering queue
-  s->wake = false;               // not being woken
+  s->skip = nullptr;   // maintain skip invariant (see above)
+  s->may_skip = true;  // always true on entering queue
+  s->wake = false;     // not being woken
   s->cond_waiter = ((flags & kMuIsCond) != 0);
 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
-  int64_t now_cycles = base_internal::CycleClock::Now();
+  int64_t now_cycles = CycleClock::Now();
   if (s->next_priority_read_cycles < now_cycles) {
     // Every so often, update our idea of the thread's priority.
     // pthread_getschedparam() is 5% of the block/wakeup time;
-    // base_internal::CycleClock::Now() is 0.5%.
+    // CycleClock::Now() is 0.5%.
     int policy;
     struct sched_param param;
     const int err = pthread_getschedparam(pthread_self(), &policy, &param);
@@ -938,8 +933,7 @@
     } else {
       s->priority = param.sched_priority;
       s->next_priority_read_cycles =
-          now_cycles +
-          static_cast<int64_t>(base_internal::CycleClock::Frequency());
+          now_cycles + static_cast<int64_t>(CycleClock::Frequency());
     }
   }
 #endif
@@ -949,7 +943,7 @@
     s->maybe_unlocking = false;  // no one is searching an empty list
     head = s;                    // s is new head
   } else {
-    PerThreadSynch *enqueue_after = nullptr;  // we'll put s after this element
+    PerThreadSynch* enqueue_after = nullptr;  // we'll put s after this element
 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
     if (s->priority > head->priority) {  // s's priority is above head's
       // try to put s in priority-fifo order, or failing that at the front.
@@ -960,20 +954,20 @@
         // Within a skip chain, all waiters have the same priority, so we can
         // skip forward through the chains until we find one with a lower
         // priority than the waiter to be enqueued.
-        PerThreadSynch *advance_to = head;    // next value of enqueue_after
+        PerThreadSynch* advance_to = head;  // next value of enqueue_after
         do {
           enqueue_after = advance_to;
           // (side-effect: optimizes skip chain)
           advance_to = Skip(enqueue_after->next);
         } while (s->priority <= advance_to->priority);
-              // termination guaranteed because s->priority > head->priority
-              // and head is the end of a skip chain
+        // termination guaranteed because s->priority > head->priority
+        // and head is the end of a skip chain
       } else if (waitp->how == kExclusive &&
                  Condition::GuaranteedEqual(waitp->cond, nullptr)) {
         // An unlocker could be scanning the queue, but we know it will recheck
         // the queue front for writers that have no condition, which is what s
         // is, so an insert at front is safe.
-        enqueue_after = head;       // add after head, at front
+        enqueue_after = head;  // add after head, at front
       }
     }
 #endif
@@ -998,12 +992,12 @@
         enqueue_after->skip = enqueue_after->next;
       }
       if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
-        s->skip = s->next;                // s may skip to its successor
+        s->skip = s->next;                   // s may skip to its successor
       }
-    } else {   // enqueue not done any other way, so
-               // we're inserting s at the back
+    } else {  // enqueue not done any other way, so
+              // we're inserting s at the back
       // s will become new head; copy data from head into it
-      s->next = head->next;        // add s after head
+      s->next = head->next;  // add s after head
       head->next = s;
       s->readers = head->readers;  // reader count is from previous head
       s->maybe_unlocking = head->maybe_unlocking;  // same for unlock hint
@@ -1022,17 +1016,17 @@
 // whose last element is head.  The new head element is returned, or null
 // if the list is made empty.
 // Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
-  PerThreadSynch *w = pw->next;
-  pw->next = w->next;         // snip w out of list
-  if (head == w) {            // we removed the head
+static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
+  PerThreadSynch* w = pw->next;
+  pw->next = w->next;                 // snip w out of list
+  if (head == w) {                    // we removed the head
     head = (pw == w) ? nullptr : pw;  // either emptied list, or pw is new head
   } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
     // pw can skip to its new successor
     if (pw->next->skip !=
         nullptr) {  // either skip to its successors skip target
       pw->skip = pw->next->skip;
-    } else {                   // or to pw's successor
+    } else {  // or to pw's successor
       pw->skip = pw->next;
     }
   }
@@ -1045,27 +1039,27 @@
 // singly-linked list wake_list in the order found.   Assumes that
 // there is only one such element if the element has how == kExclusive.
 // Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
-                                          PerThreadSynch *pw,
-                                          PerThreadSynch **wake_tail) {
-  PerThreadSynch *orig_h = head;
-  PerThreadSynch *w = pw->next;
+static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
+                                          PerThreadSynch* pw,
+                                          PerThreadSynch** wake_tail) {
+  PerThreadSynch* orig_h = head;
+  PerThreadSynch* w = pw->next;
   bool skipped = false;
   do {
-    if (w->wake) {                    // remove this element
+    if (w->wake) {  // remove this element
       ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
       // we're removing pw's successor so either pw->skip is zero or we should
       // already have removed pw since if pw->skip!=null, pw has the same
       // condition as w.
       head = Dequeue(head, pw);
-      w->next = *wake_tail;           // keep list terminated
-      *wake_tail = w;                 // add w to wake_list;
-      wake_tail = &w->next;           // next addition to end
+      w->next = *wake_tail;               // keep list terminated
+      *wake_tail = w;                     // add w to wake_list;
+      wake_tail = &w->next;               // next addition to end
       if (w->waitp->how == kExclusive) {  // wake at most 1 writer
         break;
       }
-    } else {                // not waking this one; skip
-      pw = Skip(w);       // skip as much as possible
+    } else {         // not waking this one; skip
+      pw = Skip(w);  // skip as much as possible
       skipped = true;
     }
     w = pw->next;
@@ -1083,7 +1077,7 @@
 
 // Try to remove thread s from the list of waiters on this mutex.
 // Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+void Mutex::TryRemove(PerThreadSynch* s) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   // acquire spinlock & lock
@@ -1091,16 +1085,16 @@
       mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
                                   std::memory_order_acquire,
                                   std::memory_order_relaxed)) {
-    PerThreadSynch *h = GetPerThreadSynch(v);
+    PerThreadSynch* h = GetPerThreadSynch(v);
     if (h != nullptr) {
-      PerThreadSynch *pw = h;   // pw is w's predecessor
-      PerThreadSynch *w;
+      PerThreadSynch* pw = h;  // pw is w's predecessor
+      PerThreadSynch* w;
       if ((w = pw->next) != s) {  // search for thread,
         do {                      // processing at least one element
           // If the current element isn't equivalent to the waiter to be
           // removed, we can skip the entire chain.
           if (!MuEquivalentWaiter(s, w)) {
-            pw = Skip(w);                // so skip all that won't match
+            pw = Skip(w);  // so skip all that won't match
             // we don't have to worry about dangling skip fields
             // in the threads we skipped; none can point to s
             // because they are in a different equivalence class.
@@ -1112,7 +1106,7 @@
           // process the first thread again.
         } while ((w = pw->next) != s && pw != h);
       }
-      if (w == s) {                 // found thread; remove it
+      if (w == s) {  // found thread; remove it
         // pw->skip may be non-zero here; the loop above ensured that
         // no ancestor of s can skip to s, so removal is safe anyway.
         h = Dequeue(h, pw);
@@ -1121,16 +1115,15 @@
       }
     }
     intptr_t nv;
-    do {                        // release spinlock and lock
+    do {  // release spinlock and lock
       v = mu_.load(std::memory_order_relaxed);
       nv = v & (kMuDesig | kMuEvent);
       if (h != nullptr) {
         nv |= kMuWait | reinterpret_cast<intptr_t>(h);
-        h->readers = 0;            // we hold writer lock
+        h->readers = 0;              // we hold writer lock
         h->maybe_unlocking = false;  // finished unlocking
       }
-    } while (!mu_.compare_exchange_weak(v, nv,
-                                        std::memory_order_release,
+    } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
                                         std::memory_order_relaxed));
   }
 }
@@ -1140,7 +1133,7 @@
 // if the wait extends past the absolute time specified, even if "s" is still
 // on the mutex queue.  In this case, remove "s" from the queue and return
 // true, otherwise return false.
-void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch* s) {
   while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
     if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
       // After a timeout, we go into a spin loop until we remove ourselves
@@ -1159,7 +1152,7 @@
         // is not on the queue.
         this->TryRemove(s);
       }
-      s->waitp->timeout = KernelTimeout::Never();      // timeout is satisfied
+      s->waitp->timeout = KernelTimeout::Never();  // timeout is satisfied
       s->waitp->cond = nullptr;  // condition no longer relevant for wakeups
     }
   }
@@ -1169,8 +1162,8 @@
 }
 
 // Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
-  PerThreadSynch *next = w->next;
+PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
+  PerThreadSynch* next = w->next;
   w->next = nullptr;
   w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
   IncrementSynchSem(this, w);
@@ -1178,7 +1171,7 @@
   return next;
 }
 
-static GraphId GetGraphIdLocked(Mutex *mu)
+static GraphId GetGraphIdLocked(Mutex* mu)
     ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
   if (!deadlock_graph) {  // (re)create the deadlock graph.
     deadlock_graph =
@@ -1188,7 +1181,7 @@
   return deadlock_graph->GetId(mu);
 }
 
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
   deadlock_graph_mu.Lock();
   GraphId id = GetGraphIdLocked(mu);
   deadlock_graph_mu.Unlock();
@@ -1198,7 +1191,7 @@
 // Record a lock acquisition.  This is used in debug mode for deadlock
 // detection.  The held_locks pointer points to the relevant data
 // structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
   int n = held_locks->n;
   int i = 0;
   while (i != n && held_locks->locks[i].id != id) {
@@ -1222,7 +1215,7 @@
 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
 // It does not process the event if is not needed when deadlock detection is
 // disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
   int n = held_locks->n;
   int i = 0;
   while (i != n && held_locks->locks[i].id != id) {
@@ -1237,11 +1230,11 @@
         i++;
       }
       if (i == n) {  // mu missing means releasing unheld lock
-        SynchEvent *mu_events = GetSynchEvent(mu);
+        SynchEvent* mu_events = GetSynchEvent(mu);
         ABSL_RAW_LOG(FATAL,
                      "thread releasing lock it does not hold: %p %s; "
                      ,
-                     static_cast<void *>(mu),
+                     static_cast<void*>(mu),
                      mu_events == nullptr ? "" : mu_events->name);
       }
     }
@@ -1258,7 +1251,7 @@
 }
 
 // Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
+static inline void DebugOnlyLockEnter(Mutex* mu) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1268,7 +1261,7 @@
 }
 
 // Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1278,7 +1271,7 @@
 }
 
 // Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
+static inline void DebugOnlyLockLeave(Mutex* mu) {
   if (kDebugMode) {
     if (synch_deadlock_detection.load(std::memory_order_acquire) !=
         OnDeadlockCycle::kIgnore) {
@@ -1287,7 +1280,7 @@
   }
 }
 
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
+static char* StackString(void** pcs, int n, char* buf, int maxlen,
                          bool symbolize) {
   static constexpr int kSymLen = 200;
   char sym[kSymLen];
@@ -1310,15 +1303,17 @@
   return buf;
 }
 
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
-  void *pcs[40];
+static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
+  void* pcs[40];
   return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
                      maxlen, symbolize);
 }
 
 namespace {
-enum { kMaxDeadlockPathLen = 10 };  // maximum length of a deadlock cycle;
-                                    // a path this long would be remarkable
+enum {
+  kMaxDeadlockPathLen = 10
+};  // maximum length of a deadlock cycle;
+    // a path this long would be remarkable
 // Buffers required to report a deadlock.
 // We do not allocate them on stack to avoid large stack frame.
 struct DeadlockReportBuffers {
@@ -1328,11 +1323,11 @@
 
 struct ScopedDeadlockReportBuffers {
   ScopedDeadlockReportBuffers() {
-    b = reinterpret_cast<DeadlockReportBuffers *>(
+    b = reinterpret_cast<DeadlockReportBuffers*>(
         base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
   }
   ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
-  DeadlockReportBuffers *b;
+  DeadlockReportBuffers* b;
 };
 
 // Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1343,13 +1338,13 @@
 
 // Called in debug mode when a thread is about to acquire a lock in a way that
 // may block.
-static GraphId DeadlockCheck(Mutex *mu) {
+static GraphId DeadlockCheck(Mutex* mu) {
   if (synch_deadlock_detection.load(std::memory_order_acquire) ==
       OnDeadlockCycle::kIgnore) {
     return InvalidGraphId();
   }
 
-  SynchLocksHeld *all_locks = Synch_GetAllLocks();
+  SynchLocksHeld* all_locks = Synch_GetAllLocks();
 
   absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
   const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1371,8 +1366,8 @@
   // For each other mutex already held by this thread:
   for (int i = 0; i != all_locks->n; i++) {
     const GraphId other_node_id = all_locks->locks[i].id;
-    const Mutex *other =
-        static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+    const Mutex* other =
+        static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
     if (other == nullptr) {
       // Ignore stale lock
       continue;
@@ -1381,7 +1376,7 @@
     // Add the acquired-before edge to the graph.
     if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
       ScopedDeadlockReportBuffers scoped_buffers;
-      DeadlockReportBuffers *b = scoped_buffers.b;
+      DeadlockReportBuffers* b = scoped_buffers.b;
       static int number_of_reported_deadlocks = 0;
       number_of_reported_deadlocks++;
       // Symbolize only 2 first deadlock report to avoid huge slowdowns.
@@ -1392,25 +1387,25 @@
       for (int j = 0; j != all_locks->n; j++) {
         void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
         if (pr != nullptr) {
-          snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+          snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
           len += strlen(&b->buf[len]);
         }
       }
       ABSL_RAW_LOG(ERROR,
                    "Acquiring absl::Mutex %p while holding %s; a cycle in the "
                    "historical lock ordering graph has been observed",
-                   static_cast<void *>(mu), b->buf);
+                   static_cast<void*>(mu), b->buf);
       ABSL_RAW_LOG(ERROR, "Cycle: ");
-      int path_len = deadlock_graph->FindPath(
-          mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
+      int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
+                                              ABSL_ARRAYSIZE(b->path), b->path);
       for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
         GraphId id = b->path[j];
-        Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+        Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
         if (path_mu == nullptr) continue;
         void** stack;
         int depth = deadlock_graph->GetStackTrace(id, &stack);
         snprintf(b->buf, sizeof(b->buf),
-                 "mutex@%p stack: ", static_cast<void *>(path_mu));
+                 "mutex@%p stack: ", static_cast<void*>(path_mu));
         StackString(stack, depth, b->buf + strlen(b->buf),
                     static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
                     symbolize);
@@ -1425,7 +1420,7 @@
         ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
         return mu_id;
       }
-      break;   // report at most one potential deadlock per acquisition
+      break;  // report at most one potential deadlock per acquisition
     }
   }
 
@@ -1434,7 +1429,7 @@
 
 // Invoke DeadlockCheck() iff we're in debug mode and
 // deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
   if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
                         OnDeadlockCycle::kIgnore) {
     return DeadlockCheck(mu);
@@ -1461,13 +1456,13 @@
       (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
       synch_deadlock_detection.load(std::memory_order_acquire) !=
           OnDeadlockCycle::kIgnore) {
-    GraphId id = GetGraphId(const_cast<Mutex *>(this));
-    SynchLocksHeld *locks = Synch_GetAllLocks();
+    GraphId id = GetGraphId(const_cast<Mutex*>(this));
+    SynchLocksHeld* locks = Synch_GetAllLocks();
     for (int i = 0; i != locks->n; i++) {
       if (locks->locks[i].id == id) {
-        SynchEvent *mu_events = GetSynchEvent(this);
+        SynchEvent* mu_events = GetSynchEvent(this);
         ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
-                     static_cast<const void *>(this),
+                     static_cast<const void*>(this),
                      (mu_events == nullptr ? "" : mu_events->name));
       }
     }
@@ -1480,8 +1475,8 @@
   int c = GetMutexGlobals().spinloop_iterations;
   do {  // do/while somewhat faster on AMD
     intptr_t v = mu->load(std::memory_order_relaxed);
-    if ((v & (kMuReader|kMuEvent)) != 0) {
-      return false;  // a reader or tracing -> give up
+    if ((v & (kMuReader | kMuEvent)) != 0) {
+      return false;                       // a reader or tracing -> give up
     } else if (((v & kMuWriter) == 0) &&  // no holder -> try to acquire
                mu->compare_exchange_strong(v, kMuWriter | v,
                                            std::memory_order_acquire,
@@ -1498,8 +1493,7 @@
   intptr_t v = mu_.load(std::memory_order_relaxed);
   // try fast acquire, then spin loop
   if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
-      !mu_.compare_exchange_strong(v, kMuWriter | v,
-                                   std::memory_order_acquire,
+      !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
                                    std::memory_order_relaxed)) {
     // try spin acquire, then slow loop
     if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1525,7 +1519,7 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
 }
 
-void Mutex::LockWhen(const Condition &cond) {
+void Mutex::LockWhen(const Condition& cond) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
   this->LockSlow(kExclusive, &cond, 0);
@@ -1533,27 +1527,26 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
 }
 
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
+bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
-  bool res = LockSlowWithDeadline(kExclusive, &cond,
-                                  KernelTimeout(timeout), 0);
+  bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
   DebugOnlyLockEnter(this, id);
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
   return res;
 }
 
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
   GraphId id = DebugOnlyDeadlockCheck(this);
-  bool res = LockSlowWithDeadline(kExclusive, &cond,
-                                  KernelTimeout(deadline), 0);
+  bool res =
+      LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
   DebugOnlyLockEnter(this, id);
   ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
   return res;
 }
 
-void Mutex::ReaderLockWhen(const Condition &cond) {
+void Mutex::ReaderLockWhen(const Condition& cond) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
   this->LockSlow(kShared, &cond, 0);
@@ -1561,7 +1554,7 @@
   ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
 }
 
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
                                       absl::Duration timeout) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1571,7 +1564,7 @@
   return res;
 }
 
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
                                        absl::Time deadline) {
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
   GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1581,19 +1574,19 @@
   return res;
 }
 
-void Mutex::Await(const Condition &cond) {
-  if (cond.Eval()) {    // condition already true; nothing to do
+void Mutex::Await(const Condition& cond) {
+  if (cond.Eval()) {  // condition already true; nothing to do
     if (kDebugMode) {
       this->AssertReaderHeld();
     }
-  } else {              // normal case
+  } else {  // normal case
     ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
                    "condition untrue on return from Await");
   }
 }
 
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
-  if (cond.Eval()) {      // condition already true; nothing to do
+bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
+  if (cond.Eval()) {  // condition already true; nothing to do
     if (kDebugMode) {
       this->AssertReaderHeld();
     }
@@ -1607,8 +1600,8 @@
   return res;
 }
 
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
-  if (cond.Eval()) {      // condition already true; nothing to do
+bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+  if (cond.Eval()) {  // condition already true; nothing to do
     if (kDebugMode) {
       this->AssertReaderHeld();
     }
@@ -1622,14 +1615,14 @@
   return res;
 }
 
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
   this->AssertReaderHeld();
   MuHow how =
       (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
-  SynchWaitParams waitp(
-      how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
+  SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
+                        Synch_GetPerThreadAnnotated(this),
+                        nullptr /*no cv_word*/);
   int flags = kMuHasBlocked;
   if (!Condition::GuaranteedEqual(&cond, nullptr)) {
     flags |= kMuIsCond;
@@ -1649,14 +1642,13 @@
   ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
   intptr_t v = mu_.load(std::memory_order_relaxed);
   if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&  // try fast acquire
-      mu_.compare_exchange_strong(v, kMuWriter | v,
-                                  std::memory_order_acquire,
+      mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
                                   std::memory_order_relaxed)) {
     DebugOnlyLockEnter(this);
     ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
     return true;
   }
-  if ((v & kMuEvent) != 0) {              // we're recording events
+  if ((v & kMuEvent) != 0) {                      // we're recording events
     if ((v & kExclusive->slow_need_zero) == 0 &&  // try fast acquire
         mu_.compare_exchange_strong(
             v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1682,7 +1674,7 @@
   // changing (typically because the reader count changes) under the CAS.  We
   // limit the number of attempts to avoid having to think about livelock.
   int loop_limit = 5;
-  while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+  while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
     if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
                                     std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
@@ -1694,7 +1686,7 @@
     loop_limit--;
     v = mu_.load(std::memory_order_relaxed);
   }
-  if ((v & kMuEvent) != 0) {   // we're recording events
+  if ((v & kMuEvent) != 0) {  // we're recording events
     loop_limit = 5;
     while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
       if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1733,7 +1725,7 @@
   // should_try_cas is whether we'll try a compare-and-swap immediately.
   // NOTE: optimized out when kDebugMode is false.
   bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
-                          (v & (kMuWait | kMuDesig)) != kMuWait);
+                         (v & (kMuWait | kMuDesig)) != kMuWait);
   // But, we can use an alternate computation of it, that compilers
   // currently don't find on their own.  When that changes, this function
   // can be simplified.
@@ -1750,10 +1742,9 @@
                  static_cast<long long>(v), static_cast<long long>(x),
                  static_cast<long long>(y));
   }
-  if (x < y &&
-      mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
-                                  std::memory_order_release,
-                                  std::memory_order_relaxed)) {
+  if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+                                           std::memory_order_release,
+                                           std::memory_order_relaxed)) {
     // fast writer release (writer with no waiters or with designated waker)
   } else {
     this->UnlockSlow(nullptr /*no waitp*/);  // take slow path
@@ -1763,7 +1754,7 @@
 
 // Requires v to represent a reader-locked state.
 static bool ExactlyOneReader(intptr_t v) {
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
+  assert((v & (kMuWriter | kMuReader)) == kMuReader);
   assert((v & kMuHigh) != 0);
   // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
   // on some architectures the following generates slightly smaller code.
@@ -1776,12 +1767,11 @@
   ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
   DebugOnlyLockLeave(this);
   intptr_t v = mu_.load(std::memory_order_relaxed);
-  assert((v & (kMuWriter|kMuReader)) == kMuReader);
-  if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+  assert((v & (kMuWriter | kMuReader)) == kMuReader);
+  if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
     // fast reader release (reader with no waiters)
-    intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
-    if (mu_.compare_exchange_strong(v, v - clear,
-                                    std::memory_order_release,
+    intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+    if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
                                     std::memory_order_relaxed)) {
       ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
       return;
@@ -1820,7 +1810,7 @@
 }
 
 // Internal version of LockWhen().  See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
                                              int flags) {
   ABSL_RAW_CHECK(
       this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1828,7 +1818,7 @@
 }
 
 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
                                           bool locking, bool trylock,
                                           bool read_lock) {
   // Delicate annotation dance.
@@ -1878,7 +1868,7 @@
 // tsan). As the result there is no tsan-visible synchronization between the
 // addition and this thread. So if we would enable race detection here,
 // it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
   // Memory accesses are already ignored inside of lock/unlock operations,
   // but synchronization operations are also ignored. When we evaluate the
   // predicate we must ignore only memory accesses but not synchronization,
@@ -1903,7 +1893,7 @@
 //   obstruct this call
 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
 //   Await,  LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
                                  KernelTimeout t, int flags) {
   intptr_t v = mu_.load(std::memory_order_relaxed);
   bool unlock = false;
@@ -1920,9 +1910,9 @@
     }
     unlock = true;
   }
-  SynchWaitParams waitp(
-      how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
-      nullptr /*no cv_word*/);
+  SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
+                        Synch_GetPerThreadAnnotated(this),
+                        nullptr /*no cv_word*/);
   if (!Condition::GuaranteedEqual(cond, nullptr)) {
     flags |= kMuIsCond;
   }
@@ -1963,20 +1953,20 @@
   if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
   RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
                 "%s: Mutex corrupt: both reader and writer lock held: %p",
-                label, reinterpret_cast<void *>(v));
+                label, reinterpret_cast<void*>(v));
   RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
-                "%s: Mutex corrupt: waiting writer with no waiters: %p",
-                label, reinterpret_cast<void *>(v));
+                "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
+                reinterpret_cast<void*>(v));
   assert(false);
 }
 
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   int c = 0;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-         waitp->how == kExclusive?  SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+    PostSynchEvent(
+        this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
   }
   ABSL_RAW_CHECK(
       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -2001,11 +1991,11 @@
         flags |= kMuHasBlocked;
         c = 0;
       }
-    } else {                      // need to access waiter list
+    } else {  // need to access waiter list
       bool dowait = false;
-      if ((v & (kMuSpin|kMuWait)) == 0) {   // no waiters
+      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
         // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+        PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
         intptr_t nv =
             (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
             kMuWait;
@@ -2017,7 +2007,7 @@
                 v, reinterpret_cast<intptr_t>(new_h) | nv,
                 std::memory_order_release, std::memory_order_relaxed)) {
           dowait = true;
-        } else {            // attempted Enqueue() failed
+        } else {  // attempted Enqueue() failed
           // zero out the waitp field set by Enqueue()
           waitp->thread->waitp = nullptr;
         }
@@ -2030,9 +2020,9 @@
                 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
                     kMuSpin | kMuReader,
                 std::memory_order_acquire, std::memory_order_relaxed)) {
-          PerThreadSynch *h = GetPerThreadSynch(v);
-          h->readers += kMuOne;       // inc reader count in waiter
-          do {                        // release spinlock
+          PerThreadSynch* h = GetPerThreadSynch(v);
+          h->readers += kMuOne;  // inc reader count in waiter
+          do {                   // release spinlock
             v = mu_.load(std::memory_order_relaxed);
           } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
                                               std::memory_order_release,
@@ -2042,7 +2032,7 @@
                                      waitp->how == kShared)) {
             break;  // we timed out, or condition true, so return
           }
-          this->UnlockSlow(waitp);           // got lock but condition false
+          this->UnlockSlow(waitp);  // got lock but condition false
           this->Block(waitp->thread);
           flags |= kMuHasBlocked;
           c = 0;
@@ -2053,18 +2043,19 @@
                      (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
                          kMuSpin | kMuWait,
                      std::memory_order_acquire, std::memory_order_relaxed)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+        PerThreadSynch* h = GetPerThreadSynch(v);
+        PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
         intptr_t wr_wait = 0;
         ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
         if (waitp->how == kExclusive && (v & kMuReader) != 0) {
-          wr_wait = kMuWrWait;      // give priority to a waiting writer
+          wr_wait = kMuWrWait;  // give priority to a waiting writer
         }
-        do {                        // release spinlock
+        do {  // release spinlock
           v = mu_.load(std::memory_order_relaxed);
         } while (!mu_.compare_exchange_weak(
-            v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
-            reinterpret_cast<intptr_t>(new_h),
+            v,
+            (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+                reinterpret_cast<intptr_t>(new_h),
             std::memory_order_release, std::memory_order_relaxed));
         dowait = true;
       }
@@ -2084,9 +2075,9 @@
       waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
       "detected illegal recursion into Mutex code");
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                   waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
-                                      SYNCH_EV_READERLOCK_RETURNING);
+    PostSynchEvent(this, waitp->how == kExclusive
+                             ? SYNCH_EV_LOCK_RETURNING
+                             : SYNCH_EV_READERLOCK_RETURNING);
   }
 }
 
@@ -2095,28 +2086,28 @@
 // which holds the lock but is not runnable because its condition is false
 // or it is in the process of blocking on a condition variable; it must requeue
 // itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v = mu_.load(std::memory_order_relaxed);
   this->AssertReaderHeld();
   CheckForMutexCorruption(v, "Unlock");
   if ((v & kMuEvent) != 0) {
-    PostSynchEvent(this,
-                (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+    PostSynchEvent(
+        this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
   }
   int c = 0;
   // the waiter under consideration to wake, or zero
-  PerThreadSynch *w = nullptr;
+  PerThreadSynch* w = nullptr;
   // the predecessor to w or zero
-  PerThreadSynch *pw = nullptr;
+  PerThreadSynch* pw = nullptr;
   // head of the list searched previously, or zero
-  PerThreadSynch *old_h = nullptr;
+  PerThreadSynch* old_h = nullptr;
   // a condition that's known to be false.
-  const Condition *known_false = nullptr;
-  PerThreadSynch *wake_list = kPerThreadSynchNull;   // list of threads to wake
-  intptr_t wr_wait = 0;        // set to kMuWrWait if we wake a reader and a
-                               // later writer could have acquired the lock
-                               // (starvation avoidance)
+  const Condition* known_false = nullptr;
+  PerThreadSynch* wake_list = kPerThreadSynchNull;  // list of threads to wake
+  intptr_t wr_wait = 0;  // set to kMuWrWait if we wake a reader and a
+                         // later writer could have acquired the lock
+                         // (starvation avoidance)
   ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
                      waitp->thread->suppress_fatal_errors,
                  "detected illegal recursion into Mutex code");
@@ -2136,8 +2127,7 @@
     } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
       // fast reader release (reader with no waiters)
       intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
-      if (mu_.compare_exchange_strong(v, v - clear,
-                                      std::memory_order_release,
+      if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
                                       std::memory_order_relaxed)) {
         return;
       }
@@ -2145,16 +2135,16 @@
                mu_.compare_exchange_strong(v, v | kMuSpin,
                                            std::memory_order_acquire,
                                            std::memory_order_relaxed)) {
-      if ((v & kMuWait) == 0) {       // no one to wake
+      if ((v & kMuWait) == 0) {  // no one to wake
         intptr_t nv;
         bool do_enqueue = true;  // always Enqueue() the first time
         ABSL_RAW_CHECK(waitp != nullptr,
                        "UnlockSlow is confused");  // about to sleep
-        do {    // must loop to release spinlock as reader count may change
+        do {  // must loop to release spinlock as reader count may change
           v = mu_.load(std::memory_order_relaxed);
           // decrement reader count if there are readers
-          intptr_t new_readers = (v >= kMuOne)?  v - kMuOne : v;
-          PerThreadSynch *new_h = nullptr;
+          intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
+          PerThreadSynch* new_h = nullptr;
           if (do_enqueue) {
             // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
             // we must not retry here.  The initial attempt will always have
@@ -2178,21 +2168,20 @@
           }
           // release spinlock & our lock; retry if reader-count changed
           // (writer count cannot change since we hold lock)
-        } while (!mu_.compare_exchange_weak(v, nv,
-                                            std::memory_order_release,
+        } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
                                             std::memory_order_relaxed));
         break;
       }
 
       // There are waiters.
       // Set h to the head of the circular waiter list.
-      PerThreadSynch *h = GetPerThreadSynch(v);
+      PerThreadSynch* h = GetPerThreadSynch(v);
       if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
         // a reader but not the last
-        h->readers -= kMuOne;  // release our lock
-        intptr_t nv = v;       // normally just release spinlock
+        h->readers -= kMuOne;    // release our lock
+        intptr_t nv = v;         // normally just release spinlock
         if (waitp != nullptr) {  // but waitp!=nullptr => must queue ourselves
-          PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+          PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
           ABSL_RAW_CHECK(new_h != nullptr,
                          "waiters disappeared during Enqueue()!");
           nv &= kMuLow;
@@ -2210,8 +2199,8 @@
 
       // The lock is becoming free, and there's a waiter
       if (old_h != nullptr &&
-          !old_h->may_skip) {                  // we used old_h as a terminator
-        old_h->may_skip = true;                // allow old_h to skip once more
+          !old_h->may_skip) {    // we used old_h as a terminator
+        old_h->may_skip = true;  // allow old_h to skip once more
         ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
         if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
           old_h->skip = old_h->next;  // old_h not head & can skip to successor
@@ -2220,7 +2209,7 @@
       if (h->next->waitp->how == kExclusive &&
           Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
         // easy case: writer with no condition; no need to search
-        pw = h;                       // wake w, the successor of h (=pw)
+        pw = h;  // wake w, the successor of h (=pw)
         w = h->next;
         w->wake = true;
         // We are waking up a writer.  This writer may be racing against
@@ -2243,13 +2232,13 @@
         // waiter has a condition or is a reader.  We avoid searching over
         // waiters we've searched on previous iterations by starting at
         // old_h if it's set.  If old_h==h, there's no one to wakeup at all.
-        if (old_h == h) {      // we've searched before, and nothing's new
-                               // so there's no one to wake.
-          intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+        if (old_h == h) {  // we've searched before, and nothing's new
+                           // so there's no one to wake.
+          intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
           h->readers = 0;
-          h->maybe_unlocking = false;   // finished unlocking
-          if (waitp != nullptr) {       // we must queue ourselves and sleep
-            PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+          h->maybe_unlocking = false;  // finished unlocking
+          if (waitp != nullptr) {      // we must queue ourselves and sleep
+            PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
             nv &= kMuLow;
             if (new_h != nullptr) {
               nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2263,12 +2252,12 @@
         }
 
         // set up to walk the list
-        PerThreadSynch *w_walk;   // current waiter during list walk
-        PerThreadSynch *pw_walk;  // previous waiter during list walk
+        PerThreadSynch* w_walk;   // current waiter during list walk
+        PerThreadSynch* pw_walk;  // previous waiter during list walk
         if (old_h != nullptr) {  // we've searched up to old_h before
           pw_walk = old_h;
           w_walk = old_h->next;
-        } else {            // no prior search, start at beginning
+        } else {  // no prior search, start at beginning
           pw_walk =
               nullptr;  // h->next's predecessor may change; don't record it
           w_walk = h->next;
@@ -2294,7 +2283,7 @@
         // to walk the path from w_walk to h inclusive. (TryRemove() can remove
         // a waiter anywhere, but it acquires both the spinlock and the Mutex)
 
-        old_h = h;        // remember we searched to here
+        old_h = h;  // remember we searched to here
 
         // Walk the path upto and including h looking for waiters we can wake.
         while (pw_walk != h) {
@@ -2306,24 +2295,24 @@
                //  is in fact true
                EvalConditionIgnored(this, w_walk->waitp->cond))) {
             if (w == nullptr) {
-              w_walk->wake = true;    // can wake this waiter
+              w_walk->wake = true;  // can wake this waiter
               w = w_walk;
               pw = pw_walk;
               if (w_walk->waitp->how == kExclusive) {
                 wr_wait = kMuWrWait;
-                break;                // bail if waking this writer
+                break;  // bail if waking this writer
               }
             } else if (w_walk->waitp->how == kShared) {  // wake if a reader
               w_walk->wake = true;
-            } else {   // writer with true condition
+            } else {  // writer with true condition
               wr_wait = kMuWrWait;
             }
-          } else {                  // can't wake; condition false
+          } else {                              // can't wake; condition false
             known_false = w_walk->waitp->cond;  // remember last false condition
           }
-          if (w_walk->wake) {   // we're waking reader w_walk
-            pw_walk = w_walk;   // don't skip similar waiters
-          } else {              // not waking; skip as much as possible
+          if (w_walk->wake) {  // we're waking reader w_walk
+            pw_walk = w_walk;  // don't skip similar waiters
+          } else {             // not waking; skip as much as possible
             pw_walk = Skip(w_walk);
           }
           // If pw_walk == h, then load of pw_walk->next can race with
@@ -2350,8 +2339,8 @@
       h = DequeueAllWakeable(h, pw, &wake_list);
 
       intptr_t nv = (v & kMuEvent) | kMuDesig;
-                                             // assume no waiters left,
-                                             // set kMuDesig for INV1a
+      // assume no waiters left,
+      // set kMuDesig for INV1a
 
       if (waitp != nullptr) {  // we must queue ourselves and sleep
         h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2364,7 +2353,7 @@
 
       if (h != nullptr) {  // there are waiters left
         h->readers = 0;
-        h->maybe_unlocking = false;     // finished unlocking
+        h->maybe_unlocking = false;  // finished unlocking
         nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
       }
 
@@ -2375,12 +2364,12 @@
     }
     // aggressive here; no one can proceed till we do
     c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
-  }                            // end of for(;;)-loop
+  }  // end of for(;;)-loop
 
   if (wake_list != kPerThreadSynchNull) {
     int64_t total_wait_cycles = 0;
     int64_t max_wait_cycles = 0;
-    int64_t now = base_internal::CycleClock::Now();
+    int64_t now = CycleClock::Now();
     do {
       // Profile lock contention events only if the waiter was trying to acquire
       // the lock, not waiting on a condition variable or Condition.
@@ -2392,7 +2381,7 @@
         wake_list->waitp->contention_start_cycles = now;
         wake_list->waitp->should_submit_contention_data = true;
       }
-      wake_list = Wakeup(wake_list);              // wake waiters
+      wake_list = Wakeup(wake_list);  // wake waiters
     } while (wake_list != kPerThreadSynchNull);
     if (total_wait_cycles > 0) {
       mutex_tracer("slow release", this, total_wait_cycles);
@@ -2420,7 +2409,7 @@
 // condition variable.  If this mutex is free, we simply wake the thread.
 // It will later acquire the mutex with high probability.  Otherwise, we
 // enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+void Mutex::Fer(PerThreadSynch* w) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   int c = 0;
   ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2445,9 +2434,9 @@
       IncrementSynchSem(this, w);
       return;
     } else {
-      if ((v & (kMuSpin|kMuWait)) == 0) {       // no waiters
+      if ((v & (kMuSpin | kMuWait)) == 0) {  // no waiters
         // This thread tries to become the one and only waiter.
-        PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+        PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
         ABSL_RAW_CHECK(new_h != nullptr,
                        "Enqueue failed");  // we must queue ourselves
         if (mu_.compare_exchange_strong(
@@ -2457,8 +2446,8 @@
         }
       } else if ((v & kMuSpin) == 0 &&
                  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
-        PerThreadSynch *h = GetPerThreadSynch(v);
-        PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+        PerThreadSynch* h = GetPerThreadSynch(v);
+        PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
         ABSL_RAW_CHECK(new_h != nullptr,
                        "Enqueue failed");  // we must queue ourselves
         do {
@@ -2477,19 +2466,18 @@
 
 void Mutex::AssertHeld() const {
   if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
+    SynchEvent* e = GetSynchEvent(this);
     ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
-                 static_cast<const void *>(this),
-                 (e == nullptr ? "" : e->name));
+                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
   }
 }
 
 void Mutex::AssertReaderHeld() const {
   if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
-    SynchEvent *e = GetSynchEvent(this);
-    ABSL_RAW_LOG(
-        FATAL, "thread should hold at least a read lock on Mutex %p %s",
-        static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+    SynchEvent* e = GetSynchEvent(this);
+    ABSL_RAW_LOG(FATAL,
+                 "thread should hold at least a read lock on Mutex %p %s",
+                 static_cast<const void*>(this), (e == nullptr ? "" : e->name));
   }
 }
 
@@ -2500,13 +2488,17 @@
 static const intptr_t kCvLow = 0x0003L;  // low order bits of CV
 
 // Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+enum {
+  kGdbCvSpin = kCvSpin,
+  kGdbCvEvent = kCvEvent,
+  kGdbCvLow = kCvLow,
+};
 
 static_assert(PerThreadSynch::kAlignment > kCvLow,
               "PerThreadSynch::kAlignment must be greater than kCvLow");
 
-void CondVar::EnableDebugLog(const char *name) {
-  SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+void CondVar::EnableDebugLog(const char* name) {
+  SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
   e->log = true;
   UnrefSynchEvent(e);
 }
@@ -2517,25 +2509,23 @@
   }
 }
 
-
 // Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+void CondVar::Remove(PerThreadSynch* s) {
   SchedulingGuard::ScopedDisable disable_rescheduling;
   intptr_t v;
   int c = 0;
   for (v = cv_.load(std::memory_order_relaxed);;
        v = cv_.load(std::memory_order_relaxed)) {
     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
+        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
       if (h != nullptr) {
-        PerThreadSynch *w = h;
+        PerThreadSynch* w = h;
         while (w->next != s && w->next != h) {  // search for thread
           w = w->next;
         }
-        if (w->next == s) {           // found thread; remove it
+        if (w->next == s) {  // found thread; remove it
           w->next = s->next;
           if (h == s) {
             h = (w == s) ? nullptr : w;
@@ -2544,7 +2534,7 @@
           s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
         }
       }
-                                      // release spinlock
+      // release spinlock
       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
                 std::memory_order_release);
       return;
@@ -2567,14 +2557,14 @@
 // variable queue just before the mutex is to be unlocked, and (most
 // importantly) after any call to an external routine that might re-enter the
 // mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
+static void CondVarEnqueue(SynchWaitParams* waitp) {
   // This thread might be transferred to the Mutex queue by Fer() when
   // we are woken.  To make sure that is what happens, Enqueue() doesn't
   // call CondVarEnqueue() again but instead uses its normal code.  We
   // must do this before we queue ourselves so that cv_word will be null
   // when seen by the dequeuer, who may wish immediately to requeue
   // this thread on another queue.
-  std::atomic<intptr_t> *cv_word = waitp->cv_word;
+  std::atomic<intptr_t>* cv_word = waitp->cv_word;
   waitp->cv_word = nullptr;
 
   intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2587,8 +2577,8 @@
     v = cv_word->load(std::memory_order_relaxed);
   }
   ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
-  waitp->thread->waitp = waitp;      // prepare ourselves for waiting
-  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+  waitp->thread->waitp = waitp;  // prepare ourselves for waiting
+  PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
   if (h == nullptr) {  // add this thread to waiter list
     waitp->thread->next = waitp->thread;
   } else {
@@ -2601,8 +2591,8 @@
                  std::memory_order_release);
 }
 
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
-  bool rc = false;          // return value; true iff we timed-out
+bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
+  bool rc = false;  // return value; true iff we timed-out
 
   intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
   Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2669,27 +2659,25 @@
   return rc;
 }
 
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
+bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
   return WaitCommon(mu, KernelTimeout(timeout));
 }
 
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
   return WaitCommon(mu, KernelTimeout(deadline));
 }
 
-void CondVar::Wait(Mutex *mu) {
-  WaitCommon(mu, KernelTimeout::Never());
-}
+void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
 
 // Wake thread w
 // If it was a timed wait, w will be waiting on w->cv
 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
+void CondVar::Wakeup(PerThreadSynch* w) {
   if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
     // The waiting thread only needs to observe "w->state == kAvailable" to be
     // released, we must cache "cvmu" before clearing "next".
-    Mutex *mu = w->waitp->cvmu;
+    Mutex* mu = w->waitp->cvmu;
     w->next = nullptr;
     w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
     Mutex::IncrementSynchSem(mu, w);
@@ -2706,11 +2694,10 @@
   for (v = cv_.load(std::memory_order_relaxed); v != 0;
        v = cv_.load(std::memory_order_relaxed)) {
     if ((v & kCvSpin) == 0 &&  // attempt to acquire spinlock
-        cv_.compare_exchange_strong(v, v | kCvSpin,
-                                    std::memory_order_acquire,
+        cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
-      PerThreadSynch *w = nullptr;
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
+      PerThreadSynch* w = nullptr;
       if (h != nullptr) {  // remove first waiter
         w = h->next;
         if (w == h) {
@@ -2719,11 +2706,11 @@
           h->next = w->next;
         }
       }
-                                      // release spinlock
+      // release spinlock
       cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
                 std::memory_order_release);
       if (w != nullptr) {
-        CondVar::Wakeup(w);                // wake waiter, if there was one
+        CondVar::Wakeup(w);  // wake waiter, if there was one
         cond_var_tracer("Signal wakeup", this);
       }
       if ((v & kCvEvent) != 0) {
@@ -2738,7 +2725,7 @@
   ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
 }
 
-void CondVar::SignalAll () {
+void CondVar::SignalAll() {
   ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
   intptr_t v;
   int c = 0;
@@ -2752,11 +2739,11 @@
     if ((v & kCvSpin) == 0 &&
         cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
                                     std::memory_order_relaxed)) {
-      PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+      PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
       if (h != nullptr) {
-        PerThreadSynch *w;
-        PerThreadSynch *n = h->next;
-        do {                          // for every thread, wake it up
+        PerThreadSynch* w;
+        PerThreadSynch* n = h->next;
+        do {  // for every thread, wake it up
           w = n;
           n = n->next;
           CondVar::Wakeup(w);
@@ -2784,42 +2771,41 @@
 }
 
 #ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
+extern "C" void __tsan_read1(void* addr);
 #else
 #define __tsan_read1(addr)  // do nothing if TSan not enabled
 #endif
 
 // A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
+static bool Dereference(void* arg) {
   // ThreadSanitizer does not instrument this file for memory accesses.
   // This function dereferences a user variable that can participate
   // in a data race, so we need to manually tell TSan about this memory access.
   __tsan_read1(arg);
-  return *(static_cast<bool *>(arg));
+  return *(static_cast<bool*>(arg));
 }
 
 ABSL_CONST_INIT const Condition Condition::kTrue;
 
-Condition::Condition(bool (*func)(void *), void *arg)
-    : eval_(&CallVoidPtrFunction),
-      arg_(arg) {
+Condition::Condition(bool (*func)(void*), void* arg)
+    : eval_(&CallVoidPtrFunction), arg_(arg) {
   static_assert(sizeof(&func) <= sizeof(callback_),
                 "An overlarge function pointer passed to Condition.");
   StoreCallback(func);
 }
 
-bool Condition::CallVoidPtrFunction(const Condition *c) {
-  using FunctionPointer = bool (*)(void *);
+bool Condition::CallVoidPtrFunction(const Condition* c) {
+  using FunctionPointer = bool (*)(void*);
   FunctionPointer function_pointer;
   std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
   return (*function_pointer)(c->arg_);
 }
 
-Condition::Condition(const bool *cond)
+Condition::Condition(const bool* cond)
     : eval_(CallVoidPtrFunction),
       // const_cast is safe since Dereference does not modify arg
-      arg_(const_cast<bool *>(cond)) {
-  using FunctionPointer = bool (*)(void *);
+      arg_(const_cast<bool*>(cond)) {
+  using FunctionPointer = bool (*)(void*);
   const FunctionPointer dereference = Dereference;
   StoreCallback(dereference);
 }
@@ -2829,7 +2815,7 @@
   return (this->eval_ == nullptr) || (*this->eval_)(this);
 }
 
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
   // kTrue logic.
   if (a == nullptr || a->eval_ == nullptr) {
     return b == nullptr || b->eval_ == nullptr;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index 0b6a9e1..2fd077c 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -141,8 +141,9 @@
 // issues that could potentially result in race conditions and deadlocks.
 //
 // For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
+// [Thread Safety
+// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
+// documentation.
 //
 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
 
@@ -323,7 +324,7 @@
   // `true`, `Await()` *may* skip the release/re-acquire step.
   //
   // `Await()` requires that this thread holds this `Mutex` in some mode.
-  void Await(const Condition &cond);
+  void Await(const Condition& cond);
 
   // Mutex::LockWhen()
   // Mutex::ReaderLockWhen()
@@ -333,11 +334,11 @@
   // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
   // logically equivalent to `*Lock(); Await();` though they may have different
   // performance characteristics.
-  void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+  void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
 
-  void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+  void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION();
 
-  void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+  void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     this->LockWhen(cond);
   }
 
@@ -362,9 +363,9 @@
   // Negative timeouts are equivalent to a zero timeout.
   //
   // This method requires that this thread holds this `Mutex` in some mode.
-  bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+  bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout);
 
-  bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+  bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
 
   // Mutex::LockWhenWithTimeout()
   // Mutex::ReaderLockWhenWithTimeout()
@@ -377,11 +378,11 @@
   // `true` on return.
   //
   // Negative timeouts are equivalent to a zero timeout.
-  bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+  bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithTimeout(cond, timeout);
   }
@@ -397,11 +398,11 @@
   // on return.
   //
   // Deadlines in the past are equivalent to an immediate deadline.
-  bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_EXCLUSIVE_LOCK_FUNCTION();
-  bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_SHARED_LOCK_FUNCTION();
-  bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+  bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
     return this->LockWhenWithDeadline(cond, deadline);
   }
@@ -423,7 +424,7 @@
   // substantially reduce `Mutex` performance; it should be set only for
   // non-production runs.  Optimization options may also disable invariant
   // checks.
-  void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+  void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
 
   // Mutex::EnableDebugLog()
   //
@@ -432,7 +433,7 @@
   // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
   //
   // Note: This method substantially reduces `Mutex` performance.
-  void EnableDebugLog(const char *name);
+  void EnableDebugLog(const char* name);
 
   // Deadlock detection
 
@@ -460,7 +461,7 @@
 
   // A `MuHow` is a constant that indicates how a lock should be acquired.
   // Internal implementation detail.  Clients should ignore.
-  typedef const struct MuHowS *MuHow;
+  typedef const struct MuHowS* MuHow;
 
   // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
   //
@@ -482,37 +483,37 @@
 
   // Post()/Wait() versus associated PerThreadSem; in class for required
   // friendship with PerThreadSem.
-  static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
-  static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+  static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
+  static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
                                 synchronization_internal::KernelTimeout t);
 
   // slow path acquire
-  void LockSlowLoop(SynchWaitParams *waitp, int flags);
+  void LockSlowLoop(SynchWaitParams* waitp, int flags);
   // wrappers around LockSlowLoop()
-  bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+  bool LockSlowWithDeadline(MuHow how, const Condition* cond,
                             synchronization_internal::KernelTimeout t,
                             int flags);
-  void LockSlow(MuHow how, const Condition *cond,
+  void LockSlow(MuHow how, const Condition* cond,
                 int flags) ABSL_ATTRIBUTE_COLD;
   // slow path release
-  void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+  void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
   // Common code between Await() and AwaitWithTimeout/Deadline()
-  bool AwaitCommon(const Condition &cond,
+  bool AwaitCommon(const Condition& cond,
                    synchronization_internal::KernelTimeout t);
   // Attempt to remove thread s from queue.
-  void TryRemove(base_internal::PerThreadSynch *s);
+  void TryRemove(base_internal::PerThreadSynch* s);
   // Block a thread on mutex.
-  void Block(base_internal::PerThreadSynch *s);
+  void Block(base_internal::PerThreadSynch* s);
   // Wake a thread; return successor.
-  base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+  base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
 
   friend class CondVar;   // for access to Trans()/Fer().
   void Trans(MuHow how);  // used for CondVar->Mutex transfer
   void Fer(
-      base_internal::PerThreadSynch *w);  // used for CondVar->Mutex transfer
+      base_internal::PerThreadSynch* w);  // used for CondVar->Mutex transfer
 
   // Catch the error of writing Mutex when intending MutexLock.
-  Mutex(const volatile Mutex * /*ignored*/) {}  // NOLINT(runtime/explicit)
+  explicit Mutex(const volatile Mutex* /*ignored*/) {}
 
   Mutex(const Mutex&) = delete;
   Mutex& operator=(const Mutex&) = delete;
@@ -547,28 +548,28 @@
   // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
   // guaranteed to be locked when this object is constructed. Requires that
   // `mu` be dereferenceable.
-  explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+  explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     this->mu_->Lock();
   }
 
   // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
   // the above, the condition given by `cond` is also guaranteed to hold when
   // this object is constructed.
-  explicit MutexLock(Mutex *mu, const Condition &cond)
+  explicit MutexLock(Mutex* mu, const Condition& cond)
       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     this->mu_->LockWhen(cond);
   }
 
-  MutexLock(const MutexLock &) = delete;  // NOLINT(runtime/mutex)
-  MutexLock(MutexLock&&) = delete;  // NOLINT(runtime/mutex)
+  MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
+  MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
   MutexLock& operator=(const MutexLock&) = delete;
   MutexLock& operator=(MutexLock&&) = delete;
 
   ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // ReaderMutexLock
@@ -577,11 +578,11 @@
 // releases a shared lock on a `Mutex` via RAII.
 class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
  public:
-  explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+  explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
     mu->ReaderLock();
   }
 
-  explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+  explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
       ABSL_SHARED_LOCK_FUNCTION(mu)
       : mu_(mu) {
     mu->ReaderLockWhen(cond);
@@ -595,7 +596,7 @@
   ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // WriterMutexLock
@@ -604,12 +605,12 @@
 // releases a write (exclusive) lock on a `Mutex` via RAII.
 class ABSL_SCOPED_LOCKABLE WriterMutexLock {
  public:
-  explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     mu->WriterLock();
   }
 
-  explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+  explicit WriterMutexLock(Mutex* mu, const Condition& cond)
       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     mu->WriterLockWhen(cond);
@@ -623,7 +624,7 @@
   ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
 };
 
 // -----------------------------------------------------------------------------
@@ -681,7 +682,7 @@
 class Condition {
  public:
   // A Condition that returns the result of "(*func)(arg)"
-  Condition(bool (*func)(void *), void *arg);
+  Condition(bool (*func)(void*), void* arg);
 
   // Templated version for people who are averse to casts.
   //
@@ -692,8 +693,8 @@
   // Note: lambdas in this case must contain no bound variables.
   //
   // See class comment for performance advice.
-  template<typename T>
-  Condition(bool (*func)(T *), T *arg);
+  template <typename T>
+  Condition(bool (*func)(T*), T* arg);
 
   // Same as above, but allows for cases where `arg` comes from a pointer that
   // is convertible to the function parameter type `T*` but not an exact match.
@@ -707,7 +708,7 @@
   // a function template is passed as `func`. Also, the dummy `typename = void`
   // template parameter exists just to work around a MSVC mangling bug.
   template <typename T, typename = void>
-  Condition(bool (*func)(T *), typename absl::internal::identity<T>::type *arg);
+  Condition(bool (*func)(T*), typename absl::internal::identity<T>::type* arg);
 
   // Templated version for invoking a method that returns a `bool`.
   //
@@ -717,16 +718,16 @@
   // Implementation Note: `absl::internal::identity` is used to allow methods to
   // come from base classes. A simpler signature like
   // `Condition(T*, bool (T::*)())` does not suffice.
-  template<typename T>
-  Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+  template <typename T>
+  Condition(T* object, bool (absl::internal::identity<T>::type::*method)());
 
   // Same as above, for const members
-  template<typename T>
-  Condition(const T *object,
-            bool (absl::internal::identity<T>::type::* method)() const);
+  template <typename T>
+  Condition(const T* object,
+            bool (absl::internal::identity<T>::type::*method)() const);
 
   // A Condition that returns the value of `*cond`
-  explicit Condition(const bool *cond);
+  explicit Condition(const bool* cond);
 
   // Templated version for invoking a functor that returns a `bool`.
   // This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -753,9 +754,9 @@
   // Implementation note: The second template parameter ensures that this
   // constructor doesn't participate in overload resolution if T doesn't have
   // `bool operator() const`.
-  template <typename T, typename E = decltype(
-      static_cast<bool (T::*)() const>(&T::operator()))>
-  explicit Condition(const T *obj)
+  template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
+                            &T::operator()))>
+  explicit Condition(const T* obj)
       : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
 
   // A Condition that always returns `true`.
@@ -771,7 +772,7 @@
   // Two `Condition` values are guaranteed equal if both their `func` and `arg`
   // components are the same. A null pointer is equivalent to a `true`
   // condition.
-  static bool GuaranteedEqual(const Condition *a, const Condition *b);
+  static bool GuaranteedEqual(const Condition* a, const Condition* b);
 
  private:
   // Sizing an allocation for a method pointer can be subtle. In the Itanium
@@ -799,12 +800,14 @@
   bool (*eval_)(const Condition*) = nullptr;
 
   // Either an argument for a function call or an object for a method call.
-  void *arg_ = nullptr;
+  void* arg_ = nullptr;
 
   // Various functions eval_ can point to:
   static bool CallVoidPtrFunction(const Condition*);
-  template <typename T> static bool CastAndCallFunction(const Condition* c);
-  template <typename T> static bool CastAndCallMethod(const Condition* c);
+  template <typename T>
+  static bool CastAndCallFunction(const Condition* c);
+  template <typename T>
+  static bool CastAndCallMethod(const Condition* c);
 
   // Helper methods for storing, validating, and reading callback arguments.
   template <typename T>
@@ -816,7 +819,7 @@
   }
 
   template <typename T>
-  inline void ReadCallback(T *callback) const {
+  inline void ReadCallback(T* callback) const {
     std::memcpy(callback, callback_, sizeof(*callback));
   }
 
@@ -873,7 +876,7 @@
   // spurious wakeup), then reacquires the `Mutex` and returns.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  void Wait(Mutex *mu);
+  void Wait(Mutex* mu);
 
   // CondVar::WaitWithTimeout()
   //
@@ -888,7 +891,7 @@
   // to return `true` or `false`.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+  bool WaitWithTimeout(Mutex* mu, absl::Duration timeout);
 
   // CondVar::WaitWithDeadline()
   //
@@ -905,7 +908,7 @@
   // to return `true` or `false`.
   //
   // Requires and ensures that the current thread holds the `Mutex`.
-  bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+  bool WaitWithDeadline(Mutex* mu, absl::Time deadline);
 
   // CondVar::Signal()
   //
@@ -922,18 +925,17 @@
   // Causes all subsequent uses of this `CondVar` to be logged via
   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
   // Note: this method substantially reduces `CondVar` performance.
-  void EnableDebugLog(const char *name);
+  void EnableDebugLog(const char* name);
 
  private:
-  bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
-  void Remove(base_internal::PerThreadSynch *s);
-  void Wakeup(base_internal::PerThreadSynch *w);
+  bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
+  void Remove(base_internal::PerThreadSynch* s);
+  void Wakeup(base_internal::PerThreadSynch* w);
   std::atomic<intptr_t> cv_;  // Condition variable state.
   CondVar(const CondVar&) = delete;
   CondVar& operator=(const CondVar&) = delete;
 };
 
-
 // Variants of MutexLock.
 //
 // If you find yourself using one of these, consider instead using
@@ -944,14 +946,14 @@
 // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
 class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
  public:
-  explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     if (this->mu_ != nullptr) {
       this->mu_->Lock();
     }
   }
 
-  explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+  explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     if (this->mu_ != nullptr) {
@@ -960,11 +962,13 @@
   }
 
   ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+    if (this->mu_ != nullptr) {
+      this->mu_->Unlock();
+    }
   }
 
  private:
-  Mutex *const mu_;
+  Mutex* const mu_;
   MutexLockMaybe(const MutexLockMaybe&) = delete;
   MutexLockMaybe(MutexLockMaybe&&) = delete;
   MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -977,25 +981,27 @@
 // mutex before destruction. `Release()` may be called at most once.
 class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
  public:
-  explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+  explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     this->mu_->Lock();
   }
 
-  explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+  explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
       : mu_(mu) {
     this->mu_->LockWhen(cond);
   }
 
   ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
-    if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+    if (this->mu_ != nullptr) {
+      this->mu_->Unlock();
+    }
   }
 
   void Release() ABSL_UNLOCK_FUNCTION();
 
  private:
-  Mutex *mu_;
+  Mutex* mu_;
   ReleasableMutexLock(const ReleasableMutexLock&) = delete;
   ReleasableMutexLock(ReleasableMutexLock&&) = delete;
   ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
@@ -1012,8 +1018,8 @@
 
 // static
 template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
-  T *object = static_cast<T *>(c->arg_);
+bool Condition::CastAndCallMethod(const Condition* c) {
+  T* object = static_cast<T*>(c->arg_);
   bool (T::*method_pointer)();
   c->ReadCallback(&method_pointer);
   return (object->*method_pointer)();
@@ -1021,44 +1027,43 @@
 
 // static
 template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
-  bool (*function)(T *);
+bool Condition::CastAndCallFunction(const Condition* c) {
+  bool (*function)(T*);
   c->ReadCallback(&function);
-  T *argument = static_cast<T *>(c->arg_);
+  T* argument = static_cast<T*>(c->arg_);
   return (*function)(argument);
 }
 
 template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
+inline Condition::Condition(bool (*func)(T*), T* arg)
     : eval_(&CastAndCallFunction<T>),
-      arg_(const_cast<void *>(static_cast<const void *>(arg))) {
+      arg_(const_cast<void*>(static_cast<const void*>(arg))) {
   static_assert(sizeof(&func) <= sizeof(callback_),
                 "An overlarge function pointer was passed to Condition.");
   StoreCallback(func);
 }
 
 template <typename T, typename>
-inline Condition::Condition(bool (*func)(T *),
-                            typename absl::internal::identity<T>::type *arg)
+inline Condition::Condition(bool (*func)(T*),
+                            typename absl::internal::identity<T>::type* arg)
     // Just delegate to the overload above.
     : Condition(func, arg) {}
 
 template <typename T>
-inline Condition::Condition(T *object,
+inline Condition::Condition(T* object,
                             bool (absl::internal::identity<T>::type::*method)())
-    : eval_(&CastAndCallMethod<T>),
-      arg_(object) {
+    : eval_(&CastAndCallMethod<T>), arg_(object) {
   static_assert(sizeof(&method) <= sizeof(callback_),
                 "An overlarge method pointer was passed to Condition.");
   StoreCallback(method);
 }
 
 template <typename T>
-inline Condition::Condition(const T *object,
+inline Condition::Condition(const T* object,
                             bool (absl::internal::identity<T>::type::*method)()
                                 const)
     : eval_(&CastAndCallMethod<T>),
-      arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {
+      arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
   StoreCallback(method);
 }
 
@@ -1088,7 +1093,7 @@
 //
 // This has the same ordering and single-use limitations as
 // RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
                                     int64_t wait_cycles));
 
 // Register a hook for CondVar tracing.
@@ -1103,7 +1108,7 @@
 //
 // This has the same ordering and single-use limitations as
 // RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
 
 // EnableMutexInvariantDebugging()
 //
@@ -1120,7 +1125,7 @@
 enum class OnDeadlockCycle {
   kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
   kReport,  // Report lock cycles to stderr when detected
-  kAbort,  // Report lock cycles to stderr when detected, then abort
+  kAbort,   // Report lock cycles to stderr when detected, then abort
 };
 
 // SetMutexDeadlockDetectionMode()
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 35802b2..b585c34 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -1868,4 +1868,29 @@
   EXPECT_TRUE(saw_wrote.load());
 }
 
+TEST(Mutex, LockWhenWithTimeoutResult) {
+  // Check various corner cases for Await/LockWhen return value
+  // with always true/always false conditions.
+  absl::Mutex mu;
+  const bool kAlwaysTrue = true, kAlwaysFalse = false;
+  const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
+  EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+  mu.Unlock();
+  EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+  EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
+  EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
+  std::thread th1([&]() {
+    EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+    mu.Unlock();
+  });
+  std::thread th2([&]() {
+    EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+    mu.Unlock();
+  });
+  absl::SleepFor(absl::Milliseconds(100));
+  mu.Unlock();
+  th1.join();
+  th2.join();
+}
+
 }  // namespace