diff --git a/absl/base/attributes.h b/absl/base/attributes.h
index 2665d8f..e390782 100644
--- a/absl/base/attributes.h
+++ b/absl/base/attributes.h
@@ -318,8 +318,16 @@
 // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
 // This functionality is supported by GNU linker.
 #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
+#ifdef _AIX
+// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo
+// op which includes an additional integer as part of its syntax indcating
+// alignment. If data fall under different alignments then you might get a
+// compilation error indicating a `Section type conflict`.
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#else
 #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
 #endif
+#endif
 
 // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
 //
diff --git a/absl/base/config.h b/absl/base/config.h
index c7b2e64..5d3edcd 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -408,10 +408,10 @@
 // POSIX.1-2001.
 #ifdef ABSL_HAVE_MMAP
 #error ABSL_HAVE_MMAP cannot be directly set
-#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) ||   \
-    defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
-    defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
-    defined(__ASYLO__) || defined(__myriad2__)
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(_AIX) || defined(__ros__) || defined(__native_client__) ||    \
+    defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) ||    \
+    defined(__sun) || defined(__ASYLO__) || defined(__myriad2__)
 #define ABSL_HAVE_MMAP 1
 #endif
 
@@ -422,7 +422,7 @@
 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
 #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
 #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
-    defined(__ros__)
+    defined(_AIX) || defined(__ros__)
 #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
 #endif
 
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index 08a1e28..a7cfb46 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -131,6 +131,8 @@
 #elif defined(_WIN32)
   const unsigned hardware_concurrency = Win32NumCPUs();
   return hardware_concurrency ? hardware_concurrency : 1;
+#elif defined(_AIX)
+  return sysconf(_SC_NPROCESSORS_ONLN);
 #else
   // Other possibilities:
   //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index fc07e30..4d352bd 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -87,6 +87,10 @@
 double UnscaledCycleClock::Frequency() {
 #ifdef __GLIBC__
   return __ppc_get_timebase_freq();
+#elif defined(_AIX)
+  // This is the same constant value as returned by
+  // __ppc_get_timebase_freq().
+  return static_cast<double>(512000000);
 #elif defined(__FreeBSD__)
   static once_flag init_timebase_frequency_once;
   static double timebase_frequency = 0.0;
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 6bbf414..f0a8d4a 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -693,9 +693,8 @@
 
   // btree_multimap::merge()
   //
-  // Extracts elements from a given `source` btree_multimap into this
-  // `btree_multimap`. If the destination `btree_multimap` already contains an
-  // element with an equivalent key, that element is not extracted.
+  // Extracts all elements from a given `source` btree_multimap into this
+  // `btree_multimap`.
   using Base::merge;
 
   // btree_multimap::swap(btree_multimap& other)
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index c07ccd9..8973900 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -604,9 +604,8 @@
 
   // btree_multiset::merge()
   //
-  // Extracts elements from a given `source` btree_multiset into this
-  // `btree_multiset`. If the destination `btree_multiset` already contains an
-  // element with an equivalent key, that element is not extracted.
+  // Extracts all elements from a given `source` btree_multiset into this
+  // `btree_multiset`.
   using Base::merge;
 
   // btree_multiset::swap(btree_multiset& other)
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 37e5fef..df9e099 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -207,8 +207,8 @@
 
       other.storage_.SetInlinedSize(0);
     } else if (other.storage_.GetIsAllocated()) {
-      storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
-                                other.storage_.GetAllocatedCapacity());
+      storage_.SetAllocation({other.storage_.GetAllocatedData(),
+                              other.storage_.GetAllocatedCapacity()});
       storage_.SetAllocatedSize(other.storage_.GetSize());
 
       other.storage_.SetInlinedSize(0);
@@ -242,8 +242,8 @@
       other.storage_.SetInlinedSize(0);
     } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
                other.storage_.GetIsAllocated()) {
-      storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
-                                other.storage_.GetAllocatedCapacity());
+      storage_.SetAllocation({other.storage_.GetAllocatedData(),
+                              other.storage_.GetAllocatedCapacity()});
       storage_.SetAllocatedSize(other.storage_.GetSize());
 
       other.storage_.SetInlinedSize(0);
@@ -735,15 +735,12 @@
 
   // `InlinedVector::shrink_to_fit()`
   //
-  // Reduces memory usage by freeing unused memory. After being called, calls to
-  // `capacity()` will be equal to `max(N, size())`.
+  // Attempts to reduce memory usage by moving elements to (or keeping elements
+  // in) the smallest available buffer sufficient for containing `size()`
+  // elements.
   //
-  // If `size() <= N` and the inlined vector contains allocated memory, the
-  // elements will all be moved to the inlined space and the allocated memory
-  // will be deallocated.
-  //
-  // If `size() > N` and `size() < capacity()`, the elements will be moved to a
-  // smaller allocation.
+  // If `size()` is sufficiently small, the elements will be moved into (or kept
+  // in) the inlined space.
   void shrink_to_fit() {
     if (storage_.GetIsAllocated()) {
       storage_.ShrinkToFit();
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 1cfba9b..e2ecf46 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -21,8 +21,11 @@
 #include <iterator>
 #include <limits>
 #include <memory>
+#include <new>
+#include <type_traits>
 #include <utility>
 
+#include "absl/base/attributes.h"
 #include "absl/base/macros.h"
 #include "absl/container/internal/compressed_tuple.h"
 #include "absl/memory/memory.h"
@@ -95,13 +98,34 @@
 void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
                      SizeType<A> destroy_size) {
   if (destroy_first != nullptr) {
-    for (auto i = destroy_size; i != 0;) {
+    for (SizeType<A> i = destroy_size; i != 0;) {
       --i;
       AllocatorTraits<A>::destroy(allocator, destroy_first + i);
     }
   }
 }
 
+template <typename A>
+struct Allocation {
+  Pointer<A> data;
+  SizeType<A> capacity;
+};
+
+template <typename A,
+          bool IsOverAligned =
+              (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+  static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+    return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+            requested_capacity};
+  }
+
+  static void Deallocate(A& allocator, Pointer<A> pointer,
+                         SizeType<A> capacity) {
+    AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+  }
+};
+
 // If kUseMemcpy is true, memcpy(dst, src, n); else do nothing.
 // Useful to avoid compiler warnings when memcpy() is used for T values
 // that are not trivially copyable in non-reachable code.
@@ -201,7 +225,7 @@
 
   ~AllocationTransaction() {
     if (DidAllocate()) {
-      AllocatorTraits<A>::deallocate(GetAllocator(), GetData(), GetCapacity());
+      MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
     }
   }
 
@@ -213,18 +237,27 @@
   SizeType<A>& GetCapacity() { return capacity_; }
 
   bool DidAllocate() { return GetData() != nullptr; }
-  Pointer<A> Allocate(SizeType<A> capacity) {
-    GetData() = AllocatorTraits<A>::allocate(GetAllocator(), capacity);
-    GetCapacity() = capacity;
-    return GetData();
+
+  Pointer<A> Allocate(SizeType<A> requested_capacity) {
+    Allocation<A> result =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    GetData() = result.data;
+    GetCapacity() = result.capacity;
+    return result.data;
   }
 
+  ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+    Allocation<A> result = {GetData(), GetCapacity()};
+    Reset();
+    return result;
+  }
+
+ private:
   void Reset() {
     GetData() = nullptr;
     GetCapacity() = 0;
   }
 
- private:
   container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
   SizeType<A> capacity_;
 };
@@ -405,15 +438,9 @@
     GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
   }
 
-  void SetAllocatedData(Pointer<A> data, SizeType<A> capacity) {
-    data_.allocated.allocated_data = data;
-    data_.allocated.allocated_capacity = capacity;
-  }
-
-  void AcquireAllocatedData(AllocationTransaction<A>& allocation_tx) {
-    SetAllocatedData(allocation_tx.GetData(), allocation_tx.GetCapacity());
-
-    allocation_tx.Reset();
+  void SetAllocation(Allocation<A> allocation) {
+    data_.allocated.allocated_data = allocation.data;
+    data_.allocated.allocated_capacity = allocation.capacity;
   }
 
   void MemcpyFrom(const Storage& other_storage) {
@@ -425,8 +452,8 @@
 
   void DeallocateIfAllocated() {
     if (GetIsAllocated()) {
-      AllocatorTraits<A>::deallocate(GetAllocator(), GetAllocatedData(),
-                                     GetAllocatedCapacity());
+      MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+                                   GetAllocatedCapacity());
     }
   }
 
@@ -465,7 +492,7 @@
 
 template <typename T, size_t N, typename A>
 void Storage<T, N, A>::InitFrom(const Storage& other) {
-  const auto n = other.GetSize();
+  const SizeType<A> n = other.GetSize();
   assert(n > 0);  // Empty sources handled handled in caller.
   ConstPointer<A> src;
   Pointer<A> dst;
@@ -476,9 +503,11 @@
     // Because this is only called from the `InlinedVector` constructors, it's
     // safe to take on the allocation with size `0`. If `ConstructElements(...)`
     // throws, deallocation will be automatically handled by `~Storage()`.
-    SizeType<A> new_capacity = ComputeCapacity(GetInlinedCapacity(), n);
-    dst = AllocatorTraits<A>::allocate(GetAllocator(), new_capacity);
-    SetAllocatedData(dst, new_capacity);
+    SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+    Allocation<A> allocation =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    SetAllocation(allocation);
+    dst = allocation.data;
     src = other.GetAllocatedData();
   }
   if (IsMemcpyOk<A>::value) {
@@ -503,9 +532,12 @@
     // Because this is only called from the `InlinedVector` constructors, it's
     // safe to take on the allocation with size `0`. If `ConstructElements(...)`
     // throws, deallocation will be automatically handled by `~Storage()`.
-    SizeType<A> new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
-    construct_data = AllocatorTraits<A>::allocate(GetAllocator(), new_capacity);
-    SetAllocatedData(construct_data, new_capacity);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(GetInlinedCapacity(), new_size);
+    Allocation<A> allocation =
+        MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+    construct_data = allocation.data;
+    SetAllocation(allocation);
     SetIsAllocated();
   } else {
     construct_data = GetInlinedData();
@@ -532,8 +564,9 @@
   absl::Span<ValueType<A>> destroy_loop;
 
   if (new_size > storage_view.capacity) {
-    SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
     destroy_loop = {storage_view.data, storage_view.size};
   } else if (new_size > storage_view.size) {
     assign_loop = {storage_view.data, storage_view.size};
@@ -553,7 +586,7 @@
 
   if (allocation_tx.DidAllocate()) {
     DeallocateIfAllocated();
-    AcquireAllocatedData(allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
     SetIsAllocated();
   }
 
@@ -565,9 +598,9 @@
 auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
     -> void {
   StorageView<A> storage_view = MakeStorageView();
-  auto* const base = storage_view.data;
+  Pointer<A> const base = storage_view.data;
   const SizeType<A> size = storage_view.size;
-  auto& alloc = GetAllocator();
+  A& alloc = GetAllocator();
   if (new_size <= size) {
     // Destroy extra old elements.
     DestroyElements<A>(alloc, base + new_size, size - new_size);
@@ -583,8 +616,9 @@
     // Use transactional wrappers for the first two steps so we can roll
     // back if necessary due to exceptions.
     AllocationTransaction<A> allocation_tx(alloc);
-    SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
 
     ConstructionTransaction<A> construction_tx(alloc);
     construction_tx.Construct(new_data + size, values, new_size - size);
@@ -596,7 +630,7 @@
     DestroyElements<A>(alloc, base, size);
     construction_tx.Commit();
     DeallocateIfAllocated();
-    AcquireAllocatedData(allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
     SetIsAllocated();
   }
   SetSize(new_size);
@@ -621,8 +655,9 @@
     IteratorValueAdapter<A, MoveIterator<A>> move_values(
         MoveIterator<A>(storage_view.data));
 
-    SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
-    Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+    SizeType<A> requested_capacity =
+        ComputeCapacity(storage_view.capacity, new_size);
+    Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
 
     construction_tx.Construct(new_data + insert_index, values, insert_count);
 
@@ -636,7 +671,7 @@
     construction_tx.Commit();
     move_construction_tx.Commit();
     DeallocateIfAllocated();
-    AcquireAllocatedData(allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
 
     SetAllocatedSize(new_size);
     return Iterator<A>(new_data + insert_index);
@@ -697,7 +732,7 @@
 template <typename... Args>
 auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
   StorageView<A> storage_view = MakeStorageView();
-  const auto n = storage_view.size;
+  const SizeType<A> n = storage_view.size;
   if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
     // Fast path; new element fits.
     Pointer<A> last_ptr = storage_view.data + n;
@@ -717,8 +752,8 @@
   AllocationTransaction<A> allocation_tx(GetAllocator());
   IteratorValueAdapter<A, MoveIterator<A>> move_values(
       MoveIterator<A>(storage_view.data));
-  SizeType<A> new_capacity = NextCapacity(storage_view.capacity);
-  Pointer<A> construct_data = allocation_tx.Allocate(new_capacity);
+  SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+  Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
   Pointer<A> last_ptr = construct_data + storage_view.size;
 
   // Construct new element.
@@ -737,7 +772,7 @@
   DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
 
   DeallocateIfAllocated();
-  AcquireAllocatedData(allocation_tx);
+  SetAllocation(std::move(allocation_tx).Release());
   SetIsAllocated();
   AddSize(1);
   return *last_ptr;
@@ -778,9 +813,9 @@
   IteratorValueAdapter<A, MoveIterator<A>> move_values(
       MoveIterator<A>(storage_view.data));
 
-  SizeType<A> new_capacity =
+  SizeType<A> new_requested_capacity =
       ComputeCapacity(storage_view.capacity, requested_capacity);
-  Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+  Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
 
   ConstructElements<A>(GetAllocator(), new_data, move_values,
                        storage_view.size);
@@ -788,7 +823,7 @@
   DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
 
   DeallocateIfAllocated();
-  AcquireAllocatedData(allocation_tx);
+  SetAllocation(std::move(allocation_tx).Release());
   SetIsAllocated();
 }
 
@@ -809,8 +844,12 @@
 
   Pointer<A> construct_data;
   if (storage_view.size > GetInlinedCapacity()) {
-    SizeType<A> new_capacity = storage_view.size;
-    construct_data = allocation_tx.Allocate(new_capacity);
+    SizeType<A> requested_capacity = storage_view.size;
+    construct_data = allocation_tx.Allocate(requested_capacity);
+    if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+      // Already using the smallest available heap allocation.
+      return;
+    }
   } else {
     construct_data = GetInlinedData();
   }
@@ -820,17 +859,17 @@
                          storage_view.size);
   }
   ABSL_INTERNAL_CATCH_ANY {
-    SetAllocatedData(storage_view.data, storage_view.capacity);
+    SetAllocation({storage_view.data, storage_view.capacity});
     ABSL_INTERNAL_RETHROW;
   }
 
   DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
 
-  AllocatorTraits<A>::deallocate(GetAllocator(), storage_view.data,
-                                 storage_view.capacity);
+  MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+                               storage_view.capacity);
 
   if (allocation_tx.DidAllocate()) {
-    AcquireAllocatedData(allocation_tx);
+    SetAllocation(std::move(allocation_tx).Release());
   } else {
     UnsetIsAllocated();
   }
@@ -881,16 +920,16 @@
                            inlined_ptr->GetSize());
     }
     ABSL_INTERNAL_CATCH_ANY {
-      allocated_ptr->SetAllocatedData(allocated_storage_view.data,
-                                      allocated_storage_view.capacity);
+      allocated_ptr->SetAllocation(
+          {allocated_storage_view.data, allocated_storage_view.capacity});
       ABSL_INTERNAL_RETHROW;
     }
 
     DestroyElements<A>(inlined_ptr->GetAllocator(),
                        inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
 
-    inlined_ptr->SetAllocatedData(allocated_storage_view.data,
-                                  allocated_storage_view.capacity);
+    inlined_ptr->SetAllocation(
+        {allocated_storage_view.data, allocated_storage_view.capacity});
   }
 
   swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 212052e..5c5db12 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -87,6 +87,17 @@
 //
 // This probing function guarantees that after N probes, all the groups of the
 // table will be probed exactly once.
+//
+// The control state and slot array are stored contiguously in a shared heap
+// allocation. The layout of this allocation is: `capacity()` control bytes,
+// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
+// <possible padding>, `capacity()` slots. The sentinel control byte is used in
+// iteration so we know when we reach the end of the table. The cloned control
+// bytes at the end of the table are cloned from the beginning of the table so
+// groups that begin near the end of the table can see a full group. In cases in
+// which there are more than `capacity()` cloned control bytes, the extra bytes
+// are `kEmpty`, and these ensure that we always see at least one empty slot and
+// can stop an unsuccessful search.
 
 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
index 24cc013..d6832ea 100644
--- a/absl/debugging/internal/elf_mem_image.cc
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -22,6 +22,7 @@
 #include <string.h>
 #include <cassert>
 #include <cstddef>
+#include "absl/base/config.h"
 #include "absl/base/internal/raw_logging.h"
 
 // From binutils/include/elf/common.h (this doesn't appear to be documented
@@ -43,11 +44,11 @@
 
 namespace {
 
-#if __WORDSIZE == 32
+#if __SIZEOF_POINTER__ == 4
 const int kElfClass = ELFCLASS32;
 int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
 int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
-#elif __WORDSIZE == 64
+#elif __SIZEOF_POINTER__ == 8
 const int kElfClass = ELFCLASS64;
 int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
 int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
@@ -175,17 +176,17 @@
   }
   switch (base_as_char[EI_DATA]) {
     case ELFDATA2LSB: {
-      if (__LITTLE_ENDIAN != __BYTE_ORDER) {
-        assert(false);
-        return;
-      }
+#ifndef ABSL_IS_LITTLE_ENDIAN
+      assert(false);
+      return;
+#endif
       break;
     }
     case ELFDATA2MSB: {
-      if (__BIG_ENDIAN != __BYTE_ORDER) {
-        assert(false);
-        return;
-      }
+#ifndef ABSL_IS_BIG_ENDIAN
+      assert(false);
+      return;
+#endif
       break;
     }
     default: {
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index 46bfade..8647481 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -31,8 +31,8 @@
 #error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
 #endif
 
-#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
-    !defined(__asmjs__) && !defined(__wasm__)
+#if defined(__ELF__) && !defined(__native_client__) && !defined(__asmjs__) && \
+    !defined(__wasm__)
 #define ABSL_HAVE_ELF_MEM_IMAGE 1
 #endif
 
diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h
index 29b26bd..ff21b71 100644
--- a/absl/debugging/internal/stacktrace_config.h
+++ b/absl/debugging/internal/stacktrace_config.h
@@ -35,7 +35,7 @@
 // Thread local support required for UnwindImpl.
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
+#endif  // defined(ABSL_HAVE_THREAD_LOCAL)
 
 #elif defined(__EMSCRIPTEN__)
 #define ABSL_STACKTRACE_INL_HEADER \
@@ -55,7 +55,7 @@
 // Note: When using glibc this may require -funwind-tables to function properly.
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
+#endif  // __has_include(<execinfo.h>)
 #elif defined(__i386__) || defined(__x86_64__)
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_x86-inl.inc"
@@ -73,9 +73,10 @@
 // Note: When using glibc this may require -funwind-tables to function properly.
 #define ABSL_STACKTRACE_INL_HEADER \
   "absl/debugging/internal/stacktrace_generic-inl.inc"
-#endif
-#endif
-#endif
+#endif  // __has_include(<execinfo.h>)
+#endif  // defined(__has_include)
+
+#endif  // defined(__linux__) && !defined(__ANDROID__)
 
 // Fallback to the empty implementation.
 #if !defined(ABSL_STACKTRACE_INL_HEADER)
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
index 6be16d9..0dfe9ca 100644
--- a/absl/debugging/internal/vdso_support.cc
+++ b/absl/debugging/internal/vdso_support.cc
@@ -20,12 +20,25 @@
 
 #ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
 
+#if !defined(__has_include)
+#define __has_include(header) 0
+#endif
+
 #include <errno.h>
 #include <fcntl.h>
+#if __has_include(<syscall.h>)
+#include <syscall.h>
+#elif __has_include(<sys/syscall.h>)
 #include <sys/syscall.h>
+#endif
 #include <unistd.h>
 
-#if __GLIBC_PREREQ(2, 16)  // GLIBC-2.16 implements getauxval.
+#if defined(__GLIBC__) && \
+    (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#define ABSL_HAVE_GETAUXVAL
+#endif
+
+#ifdef ABSL_HAVE_GETAUXVAL
 #include <sys/auxv.h>
 #endif
 
@@ -65,7 +78,7 @@
 // the operation should be idempotent.
 const void *VDSOSupport::Init() {
   const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
-#if __GLIBC_PREREQ(2, 16)
+#ifdef ABSL_HAVE_GETAUXVAL
   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
     errno = 0;
     const void *const sysinfo_ehdr =
@@ -74,7 +87,7 @@
       vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
     }
   }
-#endif  // __GLIBC_PREREQ(2, 16)
+#endif  // ABSL_HAVE_GETAUXVAL
   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
     int fd = open("/proc/self/auxv", O_RDONLY);
     if (fd == -1) {
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 8636fad..124a2f1 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -290,7 +290,7 @@
 
 template <typename ValueT, typename GenT,
           typename std::enable_if<std::is_integral<ValueT>::value, int>::type =
-              (GenT{}, 0)>
+              ((void)GenT{}, 0)>
 constexpr FlagDefaultArg DefaultArg(int) {
   return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord};
 }
diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h
index e7c1239..8358e79 100644
--- a/absl/meta/type_traits.h
+++ b/absl/meta/type_traits.h
@@ -35,7 +35,7 @@
 #ifndef ABSL_META_TYPE_TRAITS_H_
 #define ABSL_META_TYPE_TRAITS_H_
 
-#include <stddef.h>
+#include <cstddef>
 #include <functional>
 #include <type_traits>
 
@@ -47,6 +47,14 @@
 #define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
 #endif
 
+// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
+// feature.
+#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__
+#else  // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t)
+#endif  // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+
 namespace absl {
 ABSL_NAMESPACE_BEGIN
 
diff --git a/absl/profiling/BUILD.bazel b/absl/profiling/BUILD.bazel
index 5f3a103..ba4811b 100644
--- a/absl/profiling/BUILD.bazel
+++ b/absl/profiling/BUILD.bazel
@@ -27,7 +27,9 @@
     hdrs = ["internal/sample_recorder.h"],
     copts = ABSL_DEFAULT_COPTS,
     linkopts = ABSL_DEFAULT_LINKOPTS,
-    visibility = ["//absl:__subpackages__"],
+    visibility = [
+        "//absl:__subpackages__",
+    ],
     deps = [
         "//absl/base:config",
         "//absl/base:core_headers",
diff --git a/absl/strings/charconv.h b/absl/strings/charconv.h
index e04be32..7c50981 100644
--- a/absl/strings/charconv.h
+++ b/absl/strings/charconv.h
@@ -64,8 +64,9 @@
 // the result in `value`.
 //
 // The matching pattern format is almost the same as that of strtod(), except
-// that C locale is not respected, and an initial '+' character in the input
-// range will never be matched.
+// that (1) C locale is not respected, (2) an initial '+' character in the
+// input range will never be matched, and (3) leading whitespaces are not
+// ignored.
 //
 // If `fmt` is set, it must be one of the enumerator values of the chars_format.
 // (This is despite the fact that chars_format is a bitmask type.)  If set to
diff --git a/absl/strings/numbers.cc b/absl/strings/numbers.cc
index 966d94b..cbd84c9 100644
--- a/absl/strings/numbers.cc
+++ b/absl/strings/numbers.cc
@@ -505,7 +505,7 @@
     *out++ = '-';
     d = -d;
   }
-  if (std::isinf(d)) {
+  if (d > std::numeric_limits<double>::max()) {
     strcpy(out, "inf");  // NOLINT(runtime/printf)
     return out + 3 - buffer;
   }
diff --git a/absl/time/clock_test.cc b/absl/time/clock_test.cc
index 4bcfc6b..e6f627b 100644
--- a/absl/time/clock_test.cc
+++ b/absl/time/clock_test.cc
@@ -18,6 +18,8 @@
 #if defined(ABSL_HAVE_ALARM)
 #include <signal.h>
 #include <unistd.h>
+#elif defined(_AIX)
+typedef void (*sig_t)(int);
 #elif defined(__linux__) || defined(__APPLE__)
 #error all known Linux and Apple targets have alarm
 #endif