diff --git a/absl/cleanup/BUILD.bazel b/absl/cleanup/BUILD.bazel index 5475439..461ab37 100644 --- a/absl/cleanup/BUILD.bazel +++ b/absl/cleanup/BUILD.bazel
@@ -39,6 +39,7 @@ linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:core_headers", + "//absl/base:hardening", "//absl/utility", ], ) @@ -54,6 +55,7 @@ ":cleanup_internal", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:hardening", ], )
diff --git a/absl/cleanup/CMakeLists.txt b/absl/cleanup/CMakeLists.txt index eedf449..47460c9 100644 --- a/absl/cleanup/CMakeLists.txt +++ b/absl/cleanup/CMakeLists.txt
@@ -22,6 +22,7 @@ ${ABSL_DEFAULT_COPTS} DEPS absl::core_headers + absl::hardening absl::utility PUBLIC ) @@ -37,6 +38,7 @@ absl::cleanup_internal absl::config absl::core_headers + absl::hardening PUBLIC )
diff --git a/absl/cleanup/cleanup.h b/absl/cleanup/cleanup.h index 632ec6e..078674a 100644 --- a/absl/cleanup/cleanup.h +++ b/absl/cleanup/cleanup.h
@@ -75,6 +75,7 @@ #include <utility> #include "absl/base/config.h" +#include "absl/base/internal/hardening.h" #include "absl/base/macros.h" #include "absl/cleanup/internal/cleanup.h" @@ -95,12 +96,12 @@ Cleanup(Cleanup&& other) = default; void Cancel() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + absl::base_internal::HardeningAssert(storage_.IsCallbackEngaged()); storage_.DestroyCallback(); } void Invoke() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + absl::base_internal::HardeningAssert(storage_.IsCallbackEngaged()); storage_.InvokeCallback(); storage_.DestroyCallback(); }
diff --git a/absl/cleanup/internal/cleanup.h b/absl/cleanup/internal/cleanup.h index 2022fa7..0a6c3ee 100644 --- a/absl/cleanup/internal/cleanup.h +++ b/absl/cleanup/internal/cleanup.h
@@ -19,6 +19,7 @@ #include <type_traits> #include <utility> +#include "absl/base/internal/hardening.h" #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" #include "absl/utility/utility.h" @@ -55,7 +56,7 @@ } Storage(Storage&& other) { - ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + absl::base_internal::HardeningAssert(other.IsCallbackEngaged()); ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); is_callback_engaged_ = true;
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 65081ee..e748f04 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel
@@ -71,6 +71,7 @@ "//absl/base:config", "//absl/base:core_headers", "//absl/base:dynamic_annotations", + "//absl/base:hardening", "//absl/base:iterator_traits_internal", "//absl/base:throw_delegate", "//absl/hash:weakly_mixed_integer", @@ -1146,6 +1147,7 @@ ":layout", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:hardening", "//absl/base:raw_logging_internal", "//absl/base:throw_delegate", "//absl/hash:weakly_mixed_integer", @@ -1367,6 +1369,7 @@ ":layout", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:hardening", "//absl/base:iterator_traits_internal", ], )
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 983d187..396b944 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt
@@ -35,6 +35,7 @@ absl::container_memory absl::cord absl::core_headers + absl::hardening absl::layout absl::memory absl::raw_logging_internal @@ -129,6 +130,7 @@ absl::config absl::core_headers absl::dynamic_annotations + absl::hardening absl::iterator_traits_internal absl::throw_delegate absl::memory @@ -1214,6 +1216,7 @@ DEPS absl::config absl::core_headers + absl::hardening absl::iterator_traits_internal absl::layout )
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index f3dca8a..1bae02e 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc
@@ -3411,8 +3411,8 @@ EXPECT_EQ(backward, set.begin()); if (IsAssertEnabled()) { - EXPECT_DEATH(forward += 1, "n == 0"); - EXPECT_DEATH(backward += -1, "position >= node->start"); + EXPECT_DEATH(forward += 1, ""); + EXPECT_DEATH(backward += -1, ""); } } @@ -3455,8 +3455,8 @@ EXPECT_EQ(forward, set.end()); if (IsAssertEnabled()) { - EXPECT_DEATH(backward -= 1, "position >= node->start"); - EXPECT_DEATH(forward -= -1, "n == 0"); + EXPECT_DEATH(backward -= 1, ""); + EXPECT_DEATH(forward -= -1, ""); } } @@ -3465,7 +3465,7 @@ absl::btree_set<int> set; for (int i = 0; i < 1000; ++i) set.insert(i); - EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex"); + EXPECT_DEATH(*set.end(), ""); } TEST(Btree, InvalidIteratorComparison) { @@ -3477,13 +3477,10 @@ set2.insert(i); } - constexpr const char *kValueInitDeathMessage = - "Comparing default-constructed iterator with .*non-default-constructed " - "iterator"; typename absl::btree_set<int>::iterator iter1, iter2; EXPECT_EQ(iter1, iter2); - EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage); - EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage); + EXPECT_DEATH(void(set1.begin() == iter1), ""); + EXPECT_DEATH(void(iter1 == set1.begin()), ""); constexpr const char *kDifferentContainerDeathMessage = "Comparing iterators from different containers";
diff --git a/absl/container/chunked_queue.h b/absl/container/chunked_queue.h index d5b1184..ff81447 100644 --- a/absl/container/chunked_queue.h +++ b/absl/container/chunked_queue.h
@@ -107,6 +107,7 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/internal/hardening.h" #include "absl/base/internal/iterator_traits.h" #include "absl/base/macros.h" #include "absl/container/internal/chunked_queue.h" @@ -428,22 +429,22 @@ // Returns a reference to the first element in the container. // REQUIRES: !empty() T& front() { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return *head_; } const T& front() const { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return *head_; } // Returns a reference to the last element in the container. // REQUIRES: !empty() T& back() { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return *(&*tail_ - 1); } const T& back() const { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return *(&*tail_ - 1); } @@ -460,7 +461,8 @@ // (It is undefined behavior to swap between two containers with unequal // allocators if propagate_on_container_swap is false, so we don't have to // handle that here like we do in the move-assignment operator.) - ABSL_HARDENING_ASSERT(get_allocator() == other.get_allocator()); + absl::base_internal::HardeningAssert(get_allocator() == + other.get_allocator()); swap(alloc_and_size_.size, other.alloc_and_size_.size); } } @@ -709,7 +711,7 @@ template <typename T, size_t BLo, size_t BHi, typename Allocator> inline void chunked_queue<T, BLo, BHi, Allocator>::pop_front() { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); ABSL_ASSERT(head_.block); AllocatorTraits::destroy(alloc_and_size_.allocator(), head_.ptr); ++head_.ptr;
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index e6f1528..77949bf 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h
@@ -44,6 +44,7 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/hardening.h" #include "absl/base/internal/iterator_traits.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" @@ -222,7 +223,7 @@ // Returns a reference the ith element of the fixed array. // REQUIRES: 0 <= i < size() reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(i < size()); + absl::base_internal::HardeningAssertLT(i, size()); return data()[i]; } @@ -230,7 +231,7 @@ // ith element of the fixed array. // REQUIRES: 0 <= i < size() const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(i < size()); + absl::base_internal::HardeningAssertLT(i, size()); return data()[i]; } @@ -258,14 +259,14 @@ // // Returns a reference to the first element of the fixed array. reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return data()[0]; } // Overload of FixedArray::front() to return a reference to the first element // of a fixed array of const values. const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return data()[0]; } @@ -273,14 +274,14 @@ // // Returns a reference to the last element of the fixed array. reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return data()[size() - 1]; } // Overload of FixedArray::back() to return a reference to the last element // of a fixed array of const values. const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND { - ABSL_HARDENING_ASSERT(!empty()); + absl::base_internal::HardeningAssertNonEmpty(*this); return data()[size() - 1]; }
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 85e13c9..2e53db6 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h
@@ -59,6 +59,7 @@ #include <utility> #include "absl/base/config.h" +#include "absl/base/internal/hardening.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" @@ -1201,12 +1202,16 @@ // Accessors for the key/value the iterator is pointing at. reference operator*() const { - ABSL_HARDENING_ASSERT(node_ != nullptr); + absl::base_internal::HardeningAssertNonNull(node_); assert_valid_generation(node_); - ABSL_HARDENING_ASSERT(position_ >= node_->start()); + absl::base_internal::HardeningAssertGE(position_, + static_cast<int>(node_->start())); if (position_ >= node_->finish()) { - ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator"); - ABSL_HARDENING_ASSERT(position_ < node_->finish()); + // If this assertion fails, we have tried to dereference an end() + // iterator. + absl::base_internal::HardeningAssert(!IsEndIterator()); + absl::base_internal::HardeningAssertLT(position_, + static_cast<int>(node_->finish())); } return node_->value(static_cast<field_type>(position_)); } @@ -1263,10 +1268,11 @@ position_(other.position_) {} bool Equals(const const_iterator other) const { - ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) || - (node_ != nullptr && other.node_ != nullptr)) && - "Comparing default-constructed iterator with " - "non-default-constructed iterator."); + absl::base_internal::HardeningAssert( + ((node_ == nullptr && other.node_ == nullptr) || + (node_ != nullptr && other.node_ != nullptr)) && + "Comparing default-constructed iterator with " + "non-default-constructed iterator."); // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where // N/M are sizes of the containers containing node_/other.node_. @@ -2222,7 +2228,7 @@ node = node->parent(); } if (position == node->finish()) { - ABSL_HARDENING_ASSERT(n == 0); + absl::base_internal::HardeningAssert(n == 0); return *this = save; } } @@ -2259,7 +2265,8 @@ position = node->position() - 1; node = node->parent(); } - ABSL_HARDENING_ASSERT(position >= node->start()); + absl::base_internal::HardeningAssertGE(position, + static_cast<int>(node->start())); } } else { --n;
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 7cf92a4..4aed15a 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h
@@ -409,6 +409,8 @@ // - In order to prevent user code from depending on iteration order for small // tables, we would need to randomize the iteration order somehow. constexpr size_t SooCapacity() { return 1; } +// Maximum capacity of a table where we don't need to hash any keys. +constexpr size_t MaxSmallCapacity() { return 1; } // Sentinel type to indicate SOO CommonFields construction. struct soo_tag_t {}; // Sentinel type to indicate SOO CommonFields construction with full size. @@ -426,7 +428,9 @@ constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } // Whether a table is small enough that we don't need to hash any keys. -constexpr bool IsSmallCapacity(size_t capacity) { return capacity <= 1; } +constexpr bool IsSmallCapacity(size_t capacity) { + return capacity <= MaxSmallCapacity(); +} // Converts `n` into the next valid capacity, per `IsValidCapacity`. constexpr size_t NormalizeCapacity(size_t n) { @@ -566,6 +570,16 @@ : (size_t{1} << capacity_data_) - 1; } + constexpr bool is_small() const { + // Small tables have capacity 0 or 1. This expression is valid for both + // capacity storage modes. + // Comparing capacity_data_ directly leads to a better generated code. + // One byte comparison is used before computing the capacity in order to + // detect small tables faster for critical path. + static_assert(MaxSmallCapacity() == 1); + return capacity_data_ <= 1; + } + private: // We use these sentinel capacity values in debug mode to indicate different // classes of bugs. @@ -634,6 +648,7 @@ template <HashtableCapacityStorageMode StorageMode> class HashtableInlineDataImpl { public: + static constexpr HashtableCapacityStorageMode kStorageMode = StorageMode; using PerTableSeed = PerTableSeedImpl< std::conditional_t<StorageMode == kCapacityByValue, uint16_t, uint8_t>>; using HashtableCapacity = HashtableCapacityImpl<StorageMode>; @@ -656,6 +671,7 @@ return HashtableCapacity::FromRawData(capacity_internal_); } size_t capacity() const { return maybe_invalid_capacity().capacity(); } + bool is_small() const { return maybe_invalid_capacity().is_small(); } void set_capacity(HashtableCapacity c) { capacity_internal_ = c.ToRawData(); } void set_capacity(size_t c) { set_capacity(HashtableCapacity(c)); } @@ -747,7 +763,11 @@ sizeof(HashtableInlineDataImpl<kCapacityByLog>::HashtableCapacity) == 1); static_assert(sizeof(HashtableInlineDataImpl<kCapacityByLog>) == 8); +#ifndef ABSL_SWISSTABLE_INTERNAL_ENABLE_CAPACITY_BY_LOG using HashtableInlineData = HashtableInlineDataImpl<kCapacityByValue>; +#else +using HashtableInlineData = HashtableInlineDataImpl<kCapacityByLog>; +#endif // ABSL_SWISSTABLE_INTERNAL_ENABLE_CAPACITY_BY_LOG using PerTableSeed = HashtableInlineData::PerTableSeed; using HashtableCapacity = HashtableInlineData::HashtableCapacity; @@ -1240,7 +1260,7 @@ void set_capacity(size_t c) { set_capacity(HashtableCapacity(c)); } - bool is_small() const { return IsSmallCapacity(capacity()); } + bool is_small() const { return inline_data_.is_small(); } // The number of slots we can still fill without needing to rehash. // This is stored in the heap allocation before the control bytes. @@ -1434,13 +1454,25 @@ GenerationType generation, const GenerationType* generation_ptr) { if (!SwisstableDebugEnabled()) return; - const bool ctrl_is_valid_for_comparison = - ctrl == nullptr || ctrl == DefaultIterControl() || IsFull(*ctrl); + const bool ctrl_is_valid_for_comparison = [ctrl]() { + if (ctrl == nullptr) return true; + if (ctrl == DefaultIterControl()) return true; + // Note: if the following line crashes, then it's likely that `ctrl` is from + // a backing array that has been deallocated. If you see a crash here, it + // likely means that you are comparing an invalid iterator from a table that + // has rehashed, moved, or been destroyed. + return IsFull(*ctrl); + }(); if (SwisstableGenerationsEnabled()) { if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { - ABSL_RAW_LOG(FATAL, - "Invalid iterator comparison. The table could have rehashed " - "or moved since this iterator was initialized."); + // Note: in the case of a rehash, we would expect to see a sanitizer crash + // above when `ctrl` is dereferenced so this assertion will only catch + // moved table cases, unless we're using a custom allocator that does not + // deallocate the old backing array (e.g. an arena allocator). + ABSL_RAW_LOG( + FATAL, + "Invalid iterator comparison. The table was likely moved (or " + "possibly rehashed) since this iterator was initialized."); } if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) { ABSL_RAW_LOG(
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 8c7199e..0edf6bf 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc
@@ -926,12 +926,20 @@ static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, ""); static_assert(std::is_empty<std::allocator<int>>::value, ""); - struct MockTable { + struct MockTableByValue { size_t capacity; uint64_t size; void* ctrl; void* slots; }; + struct MockTableByLog { + uint64_t size; + void* ctrl; + void* slots; + }; + using MockTable = + std::conditional_t<HashtableInlineData::kStorageMode == kCapacityByValue, + MockTableByValue, MockTableByLog>; struct StatelessHash { size_t operator()(absl::string_view) const { return 0; } }; @@ -2976,6 +2984,19 @@ EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), InvalidIteratorMatcher()); } +TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperatorMovedFrom) { + if (!SwisstableGenerationsEnabled()) + GTEST_SKIP() << "Generations not enabled."; + + TypeParam t; + for (int i = 0; i < 10; ++i) t.insert(i); + auto iter = t.begin(); + + TypeParam t2 = std::move(t); + + EXPECT_DEATH_IF_SUPPORTED(void(iter == t2.begin()), InvalidIteratorMatcher()); +} + #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) template <typename T> class RawHashSamplerTest : public testing::Test {};
diff --git a/absl/debugging/internal/stacktrace_emscripten-inl.inc b/absl/debugging/internal/stacktrace_emscripten-inl.inc index 9f87006..3c125e2 100644 --- a/absl/debugging/internal/stacktrace_emscripten-inl.inc +++ b/absl/debugging/internal/stacktrace_emscripten-inl.inc
@@ -74,10 +74,19 @@ size = static_cast<int>(emscripten_stack_unwind_buffer(pc, stack, kStackLength)); - int result_count = size - skip_count; - if (result_count < 0) result_count = 0; - if (result_count > max_depth) result_count = max_depth; - for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count]; + int num_frames = size - skip_count; + if (num_frames < 0) num_frames = 0; + if (num_frames > max_depth) num_frames = max_depth; + + int result_count = 0; + for (int i = 0; i < num_frames; i++) { + int stack_index = i + skip_count; + // Follow x86 and stop if the return address is null (end of stack). + if (stack[stack_index] == nullptr) { + break; + } + result[result_count++] = stack[stack_index]; + } if (IS_STACK_FRAMES) { // No implementation for finding out the stack frames yet.
diff --git a/absl/debugging/internal/stacktrace_riscv-inl.inc b/absl/debugging/internal/stacktrace_riscv-inl.inc index 7ae7fef..a4fb383 100644 --- a/absl/debugging/internal/stacktrace_riscv-inl.inc +++ b/absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -142,6 +142,10 @@ void *return_address = nullptr; while (frame_pointer && n < max_depth) { return_address = frame_pointer[-1]; + // Follow x86 and stop if the return address is null (end of stack). + if (return_address == nullptr) { + break; + } // The absl::GetStackFrames routine is called when we are in some // informational context (the failure signal handler for example). Use the
diff --git a/absl/debugging/stacktrace_test.cc b/absl/debugging/stacktrace_test.cc index 5b54fe0..a1108b3 100644 --- a/absl/debugging/stacktrace_test.cc +++ b/absl/debugging/stacktrace_test.cc
@@ -378,4 +378,14 @@ } #endif +TEST(StackTrace, NoNullptrInPopulatedRange) { + constexpr int kMaxDepth = 1024; + void* results[kMaxDepth]; + int depth = absl::GetStackTrace(results, kMaxDepth, 0); + for (int i = 0; i < depth; ++i) { + EXPECT_NE(results[i], nullptr) << "Unexpected nullptr found at index " << i; + } +} + + } // namespace
diff --git a/absl/memory/memory_test.cc b/absl/memory/memory_test.cc index 935b424..34bd212 100644 --- a/absl/memory/memory_test.cc +++ b/absl/memory/memory_test.cc
@@ -69,6 +69,18 @@ p = absl::make_unique_for_overwrite<int>(); } +// The initialization tests needs to suppress dead-store elimination, otherwise +// memset is optimized away, and lifetime is assumed begin after new, triggering +// uninitalized variable warnings. Various tricks to prevent memset from being +// optimized away still result in uninitialized variable warnings. Once we move +// to a C++20 floor we can delegate to std::make_unique_for_overwrite and avoid +// testing absl::make_unique_for_overwrite. +// https://github.com/gcc-mirror/gcc/blob/be1da01067c898a3e3979bfb1edd05f115ab2e3e/libstdc%2B%2B-v3/testsuite/20_util/unique_ptr/creation/for_overwrite.cc#L1 +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC push_options +#pragma GCC optimize("O0") +#endif + // InitializationVerifier fills in a pattern when allocated so we can // distinguish between its default and value initialized states (without // accessing truly uninitialized memory). @@ -114,6 +126,10 @@ EXPECT_EQ(pattern, p[1].b); } +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC pop_options +#endif + struct ArrayWatch { void* operator new[](size_t n) { allocs().push_back(n);
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 97187a5..25af3f2 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel
@@ -1014,6 +1014,7 @@ "//absl/log:check", "//absl/random", "//absl/types:compare", + "//absl/types:span", "@googletest//:gtest", "@googletest//:gtest_main", ],
diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 1d5e255..d15b5c3 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc
@@ -1086,6 +1086,24 @@ } } +size_t CopyCordToSpan(const Cord& src, absl::Span<char> dst) { + if (src.size() <= dst.size()) { + src.CopyToArrayImpl(dst.data()); + return src.size(); + } + + const size_t result = dst.size(); + for (absl::string_view chunk : src.Chunks()) { + size_t n = std::min(chunk.size(), dst.size()); + if (n == 0) { + break; + } + memcpy(dst.data(), chunk.data(), n); + dst.remove_prefix(n); + } + return result; +} + Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) { // Failure of this assertion indicates an attempt to iterate past `end()`. absl::base_internal::HardeningAssertGE(bytes_remaining_, n);
diff --git a/absl/strings/cord.h b/absl/strings/cord.h index 4f27fca..410c957 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h
@@ -98,6 +98,7 @@ #include "absl/strings/string_view.h" #include "absl/types/compare.h" #include "absl/types/optional.h" +#include "absl/types/span.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -107,6 +108,7 @@ Cord MakeCordFromExternal(absl::string_view, Releaser&&); void CopyCordToString(const Cord& src, std::string* absl_nonnull dst); void AppendCordToString(const Cord& src, std::string* absl_nonnull dst); +[[nodiscard]] size_t CopyCordToSpan(const Cord& src, absl::Span<char> dst); // Cord memory accounting modes enum class CordMemoryAccounting { @@ -434,6 +436,12 @@ friend void AppendCordToString(const Cord& src, std::string* absl_nonnull dst); + // CopyCordToSpan() + // + // Copies up to `dest.size()` bytes starting from the beginning of `src` to + // `dst`. Returns the number of bytes copied. + friend size_t CopyCordToSpan(const Cord& src, absl::Span<char> dst); + class CharIterator; //----------------------------------------------------------------------------
diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index 0be2aa4..7591758 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc
@@ -62,6 +62,7 @@ #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/types/compare.h" +#include "absl/types/span.h" // convenience local constants static constexpr auto FLAT = absl::cord_internal::FLAT; @@ -735,6 +736,53 @@ "appending ", "to ", "a ", "string."}))); } +static void VerifyCopyToSpan(const absl::Cord& cord) { + // Test with span exactly the same size as the cord. + { + std::string dst(cord.size(), '\0'); + size_t copied = absl::CopyCordToSpan(cord, absl::MakeSpan(dst)); + EXPECT_EQ(copied, cord.size()); + EXPECT_EQ(dst, cord); + } + + // Test with span larger than the cord. + { + std::string dst(cord.size() + 10, 'x'); + size_t copied = absl::CopyCordToSpan(cord, absl::MakeSpan(dst)); + EXPECT_EQ(copied, cord.size()); + EXPECT_EQ(absl::string_view(dst).substr(0, copied), cord); + if (cord.size() < dst.size()) { + absl::string_view tail = absl::string_view(dst).substr(copied); + EXPECT_EQ(tail, std::string(tail.size(), 'x')); + } + } + + // Test with span smaller than the cord. + { + size_t target_size = cord.size() / 2; + std::string dst(target_size, '\0'); + size_t copied = absl::CopyCordToSpan(cord, absl::MakeSpan(dst)); + EXPECT_EQ(copied, target_size); + EXPECT_EQ(dst, std::string(cord).substr(0, target_size)); + } + + // Test with empty span. + { + char c = 'x'; + size_t copied = absl::CopyCordToSpan(cord, absl::MakeSpan(&c, 0)); + EXPECT_EQ(copied, 0); + EXPECT_EQ(c, 'x'); + } +} + +TEST_P(CordTest, CopyToSpan) { + VerifyCopyToSpan(absl::Cord()); // Empty cords cannot be hardened. + VerifyCopyToSpan(MaybeHardened(absl::Cord("small cord"))); + VerifyCopyToSpan(MaybeHardened( + absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ", + "copying ", "to ", "a ", "span."}))); +} + TEST_P(CordTest, AppendEmptyBuffer) { absl::Cord cord; cord.Append(absl::CordBuffer());
diff --git a/absl/strings/substitute.h b/absl/strings/substitute.h index c93b1cc..3e6a8c7 100644 --- a/absl/strings/substitute.h +++ b/absl/strings/substitute.h
@@ -123,9 +123,9 @@ // probably using them as 8-bit integers and would probably prefer an integer // representation. However, we can't really know, so we make the caller decide // what to do. - Arg(char value) // NOLINT(google-explicit-constructor) - : piece_(scratch_, 1) { + Arg(char value) { // NOLINT(google-explicit-constructor) scratch_[0] = value; + piece_ = absl::string_view(scratch_, 1); } Arg(short value) // NOLINT(*) : piece_(scratch_,
diff --git a/absl/types/variant.h b/absl/types/variant.h index bb73cce..fd90c63 100644 --- a/absl/types/variant.h +++ b/absl/types/variant.h
@@ -56,13 +56,17 @@ using variant_alternative_t ABSL_REFACTOR_INLINE = std::variant_alternative_t<I, T>; -using std::variant_npos; +inline constexpr size_t variant_npos ABSL_REFACTOR_INLINE + = std::variant_npos; template <typename T> using variant_size ABSL_REFACTOR_INLINE = std::variant_size<T>; -using std::variant_size_v; +template <typename T> +inline constexpr size_t variant_size_v ABSL_REFACTOR_INLINE + = std::variant_size_v<T>; + using std::visit; namespace variant_internal {
diff --git a/absl/utility/utility.h b/absl/utility/utility.h index 06e5378..4dbc01c 100644 --- a/absl/utility/utility.h +++ b/absl/utility/utility.h
@@ -38,15 +38,22 @@ using std::apply; using std::exchange; using std::forward; -using std::in_place; -using std::in_place_index; + +inline constexpr const std::in_place_t& in_place ABSL_DEPRECATE_AND_INLINE() = + std::in_place; + +template <size_t I> +inline constexpr const std::in_place_index_t<I>& in_place_index +ABSL_DEPRECATE_AND_INLINE() = std::in_place_index<I>; template <size_t I> using in_place_index_t ABSL_DEPRECATE_AND_INLINE() = std::in_place_index_t<I>; using in_place_t ABSL_DEPRECATE_AND_INLINE() = std::in_place_t; -using std::in_place_type; +template <class T> +inline constexpr const std::in_place_type_t<T>& in_place_type +ABSL_DEPRECATE_AND_INLINE() = std::in_place_type<T>; template <class T> using in_place_type_t ABSL_DEPRECATE_AND_INLINE() = std::in_place_type_t<T>;