diff --git a/absl/base/macros.h b/absl/base/macros.h index ff89944..446a445 100644 --- a/absl/base/macros.h +++ b/absl/base/macros.h
@@ -169,42 +169,65 @@ #define ABSL_INTERNAL_RETHROW do {} while (false) #endif // ABSL_HAVE_EXCEPTIONS -// ABSL_DEPRECATE_AND_INLINE() +// ABSL_REFACTOR_INLINE // -// Marks a function or type alias as deprecated and tags it to be picked up for -// automated refactoring by go/cpp-inliner. It can added to inline function -// definitions or type aliases. It should only be used within a header file. It -// differs from `ABSL_DEPRECATED` in the following ways: +// Marks a function or type for automated refactoring by go/cpp-inliner. It can +// be used on inline function definitions or type aliases in header files and +// should be combined with the `[[deprecated]]` attribute. +// +// Using `ABSL_REFACTOR_INLINE` differs from using the `[[deprecated]]` alone in +// the following ways: // // 1. New uses of the function or type will be discouraged via Tricorder // warnings. // 2. If enabled via `METADATA`, automated changes will be sent out inlining the // functions's body or replacing the type where it is used. // -// For example: +// Examples: // -// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) { +// [[deprecated("Use NewFunc() instead")]] ABSL_REFACTOR_INLINE +// inline int OldFunc(int x) { // return NewFunc(x, 0); // } // -// will mark `OldFunc` as deprecated, and the go/cpp-inliner service will -// replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)`. Once all calls -// to `OldFunc` have been replaced, `OldFunc` can be deleted. +// using OldType [[deprecated("Use NewType instead")]] ABSL_REFACTOR_INLINE = +// NewType; +// +// will mark `OldFunc` and `OldType` as deprecated, and the go/cpp-inliner +// service will replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)` and +// `OldType` with `NewType`. Once all replacements have been completed, the old +// function or type can be deleted. // // See go/cpp-inliner for more information. // // Note: go/cpp-inliner is Google-internal service for automated refactoring. // While open-source users do not have access to this service, the macro is -// provided for compatibility, and so that users receive deprecation warnings. -#if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \ - ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) -#define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]] -#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated) -#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]] +// provided for compatibility. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) +#define ABSL_REFACTOR_INLINE [[clang::annotate("inline-me")]] #else -#define ABSL_DEPRECATE_AND_INLINE() +#define ABSL_REFACTOR_INLINE #endif +// ABSL_DEPRECATE_AND_INLINE() +// +// This is the original macro used by go/cpp-inliner that combines +// [[deprecated]] and ABSL_REFACTOR_INLINE. +// +// Examples: +// +// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) { +// return NewFunc(x, 0); +// } +// +// using OldType ABSL_DEPRECATE_AND_INLINE() = NewType; +// +// The combination of `[[deprecated("Use X instead")]]` and +// `ABSL_REFACTOR_INLINE` is preferred because it provides a more informative +// deprecation message to developers, especially those that do not have access +// to the automated refactoring capabilities of go/cpp-inliner. +#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]] ABSL_REFACTOR_INLINE + // Requires the compiler to prove that the size of the given object is at least // the expected amount. #if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size)
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 965476a..1b8204b 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc
@@ -230,8 +230,8 @@ } } -void RecordInsertSlow(HashtablezInfo* info, size_t hash, - size_t distance_from_desired) { +void RecordInsertMissSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired) { // SwissTables probe in groups of 16, so scale this to count items probes and // not offset from desired. size_t probe_length = distance_from_desired;
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 55ce7ed..5c59a9e 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h
@@ -118,8 +118,8 @@ void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity); -void RecordInsertSlow(HashtablezInfo* info, size_t hash, - size_t distance_from_desired); +void RecordInsertMissSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired); void RecordEraseSlow(HashtablezInfo* info); @@ -174,9 +174,9 @@ RecordClearedReservationSlow(info_); } - inline void RecordInsert(size_t hash, size_t distance_from_desired) { + inline void RecordInsertMiss(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordInsertSlow(info_, hash, distance_from_desired); + RecordInsertMissSlow(info_, hash, distance_from_desired); } inline void RecordErase() { @@ -207,7 +207,8 @@ inline void RecordRehash(size_t /*total_probe_length*/) {} inline void RecordReservation(size_t /*target_capacity*/) {} inline void RecordClearedReservation() {} - inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} + inline void RecordInsertMiss(size_t /*hash*/, + size_t /*distance_from_desired*/) {} inline void RecordErase() {} friend inline void swap(HashtablezInfoHandle& /*lhs*/,
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index ef80cb0..80fe3cf 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -166,7 +166,7 @@ EXPECT_EQ(info.capacity.load(), 20); } -TEST(HashtablezInfoTest, RecordInsert) { +TEST(HashtablezInfoTest, RecordInsertMiss) { HashtablezInfo info; absl::MutexLock l(info.init_mu); const int64_t test_stride = 25; @@ -179,17 +179,17 @@ /*value_size=*/test_value_size, /*soo_capacity_value=*/0); EXPECT_EQ(info.max_probe_length.load(), 0); - RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + RecordInsertMissSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00); - RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); + RecordInsertMissSlow(&info, 0x000FF000, 4 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00); - RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); + RecordInsertMissSlow(&info, 0x00FF0000, 12 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 12); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); @@ -210,7 +210,7 @@ /*soo_capacity_value=*/1); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); - RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + RecordInsertMissSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.size.load(), 1); RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); @@ -233,10 +233,10 @@ /*value_size=*/test_value_size, /*soo_capacity_value=*/0); - RecordInsertSlow(&info, 0x1, 0); - RecordInsertSlow(&info, 0x2, kProbeLength); - RecordInsertSlow(&info, 0x4, kProbeLength); - RecordInsertSlow(&info, 0x8, 2 * kProbeLength); + RecordInsertMissSlow(&info, 0x1, 0); + RecordInsertMissSlow(&info, 0x2, kProbeLength); + RecordInsertMissSlow(&info, 0x4, kProbeLength); + RecordInsertMissSlow(&info, 0x8, 2 * kProbeLength); EXPECT_EQ(info.size.load(), 4); EXPECT_EQ(info.total_probe_length.load(), 4);
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 85e8960..c7b709f 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h
@@ -795,16 +795,9 @@ move_construction_values, move_construction.size()); - for (Pointer<A> - destination = move_assignment.data() + move_assignment.size(), - last_destination = move_assignment.data(), - source = move_assignment_values + move_assignment.size(); - ;) { - --destination; - --source; - if (destination < last_destination) break; - *destination = std::move(*source); - } + std::move_backward(move_assignment_values, + move_assignment_values + move_assignment.size(), + move_assignment.data() + move_assignment.size()); AssignElements<A>(insert_assignment.data(), values, insert_assignment.size());
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 09076b4..0ef10ef 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc
@@ -503,7 +503,7 @@ ResetGrowthLeft(common); FindInfo find_info = find_first_non_full(common, new_hash); SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size); - common.infoz().RecordInsert(new_hash, find_info.probe_length); + common.infoz().RecordInsertMiss(new_hash, find_info.probe_length); common.infoz().RecordRehash(total_probe_length); return find_info.offset; } @@ -719,7 +719,7 @@ ABSL_SWISSTABLE_ASSERT(infoz.IsSampled()); infoz.RecordStorageChanged(common.size() - 1, common.capacity()); infoz.RecordRehash(total_probe_length); - infoz.RecordInsert(hash, distance_from_desired); + infoz.RecordInsertMiss(hash, distance_from_desired); common.set_has_infoz(); // TODO(b/413062340): we could potentially store infoz in place of the // control pointer for the capacity 1 case. @@ -1637,7 +1637,7 @@ PrepareInsertCommon(common); common.growth_info().OverwriteControlAsFull(common.control()[target.offset]); SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size); - common.infoz().RecordInsert(hash, target.probe_length); + common.infoz().RecordInsertMiss(hash, target.probe_length); return target.offset; } @@ -1658,7 +1658,7 @@ const size_t new_hash = get_hash(common.seed().seed()); SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash), policy.slot_size); - common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0); + common.infoz().RecordInsertMiss(new_hash, /*distance_from_desired=*/0); return SooSlotIndex(); } @@ -1921,7 +1921,7 @@ // a full `insert`. const size_t hash = (*hasher)(hash_fn, that_slot, seed); FindInfo target = find_first_non_full(common, hash); - infoz.RecordInsert(hash, target.probe_length); + infoz.RecordInsertMiss(hash, target.probe_length); offset = target.offset; SetCtrl(common, offset, H2(hash), slot_size); copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot); @@ -1971,7 +1971,7 @@ target_group.offset += mask_empty.LowestBitSet(); target_group.offset &= common.capacity(); SetCtrl(common, target_group.offset, H2(hash), policy.slot_size); - common.infoz().RecordInsert(hash, target_group.probe_length); + common.infoz().RecordInsertMiss(hash, target_group.probe_length); return target_group.offset; } } // namespace @@ -2067,6 +2067,14 @@ static_assert(MaxSooSlotSize() == 16); #endif +template void* AllocateBackingArray<BackingArrayAlignment(alignof(size_t)), + std::allocator<char>>(void* alloc, + size_t n); +template void DeallocateBackingArray<BackingArrayAlignment(alignof(size_t)), + std::allocator<char>>( + void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size, + size_t slot_align, bool had_infoz); + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index ab6657c..faa7880 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h
@@ -1169,32 +1169,30 @@ } // General notes on capacity/growth methods below: -// - We use 27/32 as maximum load factor. For 16-wide groups, that gives an -// average of 2.5 empty slots per group. +// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an +// average of two empty slots per group. +// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we // never need to probe (the whole table fits in one group) so we don't need a // load factor less than 1. -// - For (capacity+1) == Group::kWidth, growth is capacity - 1 since we need -// at least one empty slot for probing algorithm. -// - For (capacity+1) > Group::kWidth, growth is 27/32*capacity. // Given `capacity`, applies the load factor; i.e., it returns the maximum // number of values we should put into the table before a resizing rehash. constexpr size_t CapacityToGrowth(size_t capacity) { ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity)); - // `capacity*27/32` + // `capacity*7/8` if (Group::kWidth == 8 && capacity == 7) { - // formula does not work when x==7. + // x-x/8 does not work when x==7. return 6; } - return capacity - capacity / 8 - capacity / 32; + return capacity - capacity / 8; } // Given `size`, "unapplies" the load factor to find how large the capacity // should be to stay within the load factor. // // For size == 0, returns 0. -// For other values, returns the same as `NormalizeCapacity(size*32/27)`. +// For other values, returns the same as `NormalizeCapacity(size*8/7)`. constexpr size_t SizeToCapacity(size_t size) { if (size == 0) { return 0; @@ -1203,10 +1201,18 @@ // Shifting right `~size_t{}` by `leading_zeros` yields // NormalizeCapacity(size). int leading_zeros = absl::countl_zero(size); - size_t next_capacity = ~size_t{} >> leading_zeros; - size_t max_size_for_next_capacity = CapacityToGrowth(next_capacity); + constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3); + // max_size_for_next_capacity = max_load_factor * next_capacity + // = (7/8) * (~size_t{} >> leading_zeros) + // = (7/8*~size_t{}) >> leading_zeros + // = kLast3Bits >> leading_zeros + size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros; // Decrease shift if size is too big for the minimum capacity. leading_zeros -= static_cast<int>(size > max_size_for_next_capacity); + if constexpr (Group::kWidth == 8) { + // Formula doesn't work when size==7 for 8-wide groups. + leading_zeros -= (size == 7); + } return (~size_t{}) >> leading_zeros; } @@ -3697,6 +3703,14 @@ bool); #endif +extern template void* AllocateBackingArray< + BackingArrayAlignment(alignof(size_t)), std::allocator<char>>(void* alloc, + size_t n); +extern template void DeallocateBackingArray< + BackingArrayAlignment(alignof(size_t)), std::allocator<char>>( + void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size, + size_t slot_align, bool had_infoz); + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index e3b5b72..499e966 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc
@@ -302,7 +302,7 @@ } EXPECT_EQ(CapacityToGrowth(15), 14); EXPECT_EQ(CapacityToGrowth(31), 28); - EXPECT_EQ(CapacityToGrowth(63), 55); + EXPECT_EQ(CapacityToGrowth(63), 56); } TEST(Util, GrowthAndCapacity) {
diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc index 420005c..acc8b66 100644 --- a/absl/debugging/stacktrace.cc +++ b/absl/debugging/stacktrace.cc
@@ -43,6 +43,7 @@ #include <algorithm> #include <atomic> #include <iterator> +#include <type_traits> #include "absl/base/attributes.h" #include "absl/base/config.h" @@ -74,18 +75,23 @@ typedef int (*Unwinder)(void**, int*, int, int, const void*, int*); std::atomic<Unwinder> custom; +constexpr size_t kMinPageSize = 4096; + +struct FixupBuffer { + static constexpr size_t kMaxStackElements = 128; // Can be reduced if needed + uintptr_t frames[kMaxStackElements]; + int sizes[kMaxStackElements]; +}; +static_assert(std::is_trivially_default_constructible_v<FixupBuffer>); +static_assert(sizeof(FixupBuffer) < kMinPageSize / 2, + "buffer size should no larger than a small fraction of a page, " + "to avoid stack overflows"); + template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> -ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames, - int* sizes, size_t max_depth, - int skip_count, const void* uc, - int* min_dropped_frames, - bool unwind_with_fixup = true) { - static constexpr size_t kMinPageSize = 4096; - - // Allow up to ~half a page, leaving some slack space for local variables etc. - static constexpr size_t kMaxStackElements = - (kMinPageSize / 2) / (sizeof(*frames) + sizeof(*sizes)); - +ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind( + void** result, uintptr_t* frames, int* sizes, size_t max_depth, + int skip_count, const void* uc, int* min_dropped_frames, + FixupBuffer* fixup_buffer /* if NULL, fixups are skipped */) { // Allocate a buffer dynamically, using the signal-safe allocator. static constexpr auto allocate = [](size_t num_bytes) -> void* { base_internal::InitSigSafeArena(); @@ -93,16 +99,13 @@ num_bytes, base_internal::SigSafeArena()); }; - uintptr_t frames_stackbuf[kMaxStackElements]; - int sizes_stackbuf[kMaxStackElements]; - // We only need to free the buffers if we allocated them with the signal-safe // allocator. bool must_free_frames = false; bool must_free_sizes = false; - unwind_with_fixup = - unwind_with_fixup && internal_stacktrace::ShouldFixUpStack(); + bool unwind_with_fixup = + fixup_buffer != nullptr && internal_stacktrace::ShouldFixUpStack(); #ifdef _WIN32 if (unwind_with_fixup) { @@ -121,8 +124,8 @@ // here. if (frames == nullptr) { - if (max_depth <= std::size(frames_stackbuf)) { - frames = frames_stackbuf; + if (max_depth <= std::size(fixup_buffer->frames)) { + frames = fixup_buffer->frames; } else { frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames))); must_free_frames = true; @@ -130,8 +133,8 @@ } if (sizes == nullptr) { - if (max_depth <= std::size(sizes_stackbuf)) { - sizes = sizes_stackbuf; + if (max_depth <= std::size(fixup_buffer->sizes)) { + sizes = fixup_buffer->sizes; } else { sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes))); must_free_sizes = true; @@ -181,9 +184,10 @@ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames, int* sizes, int max_depth, int skip_count) { + FixupBuffer fixup_stack_buf; return Unwind<true, false>(result, frames, sizes, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr); + nullptr, nullptr, &fixup_stack_buf); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int @@ -191,9 +195,10 @@ int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { + FixupBuffer fixup_stack_buf; return Unwind<true, true>(result, frames, sizes, static_cast<size_t>(max_depth), skip_count, uc, - min_dropped_frames); + min_dropped_frames, &fixup_stack_buf); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int @@ -201,22 +206,24 @@ int skip_count) { return Unwind<false, false>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr, /*unwind_with_fixup=*/false); + nullptr, nullptr, nullptr); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace( void** result, int max_depth, int skip_count) { + FixupBuffer fixup_stack_buf; return Unwind<false, false>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr); + nullptr, nullptr, &fixup_stack_buf); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { + FixupBuffer fixup_stack_buf; return Unwind<false, true>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, uc, - min_dropped_frames); + min_dropped_frames, &fixup_stack_buf); } void SetStackUnwinder(Unwinder w) {
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc index d0a0e55..500d1e0 100644 --- a/absl/hash/hash_test.cc +++ b/absl/hash/hash_test.cc
@@ -1311,7 +1311,7 @@ for (size_t i = 0; i < 128 * 1024; ++i) { size_t v = absl::rotl(i, bit); set.insert(v); - ASSERT_LT(HashtableDebugAccess<decltype(set)>::GetNumProbes(set, v), 32) + ASSERT_LT(HashtableDebugAccess<decltype(set)>::GetNumProbes(set, v), 48) << bit << " " << i; } }
diff --git a/absl/status/internal/status_matchers.h b/absl/status/internal/status_matchers.h index 0750622..d11742b 100644 --- a/absl/status/internal/status_matchers.h +++ b/absl/status/internal/status_matchers.h
@@ -69,8 +69,8 @@ bool MatchAndExplain( StatusOrType actual_value, ::testing::MatchResultListener* result_listener) const override { - if (!GetStatus(actual_value).ok()) { - *result_listener << "which has status " << GetStatus(actual_value); + if (!actual_value.ok()) { + *result_listener << "which has status " << actual_value.status(); return false; }
diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc index 49ce583..89aa603 100644 --- a/absl/strings/escaping.cc +++ b/absl/strings/escaping.cc
@@ -78,30 +78,31 @@ // // Unescapes C escape sequences and is the reverse of CEscape(). // -// If `src` is valid, stores the unescaped string `dst`, and returns -// true. Otherwise returns false and optionally stores the error -// description in `error`. Set `error` to nullptr to disable error -// reporting. +// If `src` is valid, stores the unescaped string in `dst` and the length of +// unescaped string in `dst_size`, and returns true. Otherwise returns false +// and optionally stores the error description in `error`. Set `error` to +// nullptr to disable error reporting. // -// `src` and `dst` may use the same underlying buffer. +// `src` and `dst` may use the same underlying buffer (but keep in mind +// that if this returns an error, it will leave both `src` and `dst` in +// an unspecified state because they are using the same underlying buffer.) +// `dst` must have at least as much space as `src`. // ---------------------------------------------------------------------- bool CUnescapeInternal(absl::string_view src, bool leave_nulls_escaped, - std::string* absl_nonnull dst, + char* absl_nonnull dst, size_t* absl_nonnull dst_size, std::string* absl_nullable error) { - strings_internal::STLStringResizeUninitialized(dst, src.size()); - absl::string_view::size_type p = 0; // Current src position. - std::string::size_type d = 0; // Current dst position. + size_t d = 0; // Current dst position. // When unescaping in-place, skip any prefix that does not have escaping. - if (src.data() == dst->data()) { + if (src.data() == dst) { while (p < src.size() && src[p] != '\\') p++, d++; } while (p < src.size()) { if (src[p] != '\\') { - (*dst)[d++] = src[p++]; + dst[d++] = src[p++]; } else { if (++p >= src.size()) { // skip past the '\\' if (error != nullptr) { @@ -110,17 +111,19 @@ return false; } switch (src[p]) { - case 'a': (*dst)[d++] = '\a'; break; - case 'b': (*dst)[d++] = '\b'; break; - case 'f': (*dst)[d++] = '\f'; break; - case 'n': (*dst)[d++] = '\n'; break; - case 'r': (*dst)[d++] = '\r'; break; - case 't': (*dst)[d++] = '\t'; break; - case 'v': (*dst)[d++] = '\v'; break; - case '\\': (*dst)[d++] = '\\'; break; - case '?': (*dst)[d++] = '\?'; break; - case '\'': (*dst)[d++] = '\''; break; - case '"': (*dst)[d++] = '\"'; break; + // clang-format off + case 'a': dst[d++] = '\a'; break; + case 'b': dst[d++] = '\b'; break; + case 'f': dst[d++] = '\f'; break; + case 'n': dst[d++] = '\n'; break; + case 'r': dst[d++] = '\r'; break; + case 't': dst[d++] = '\t'; break; + case 'v': dst[d++] = '\v'; break; + case '\\': dst[d++] = '\\'; break; + case '?': dst[d++] = '\?'; break; + case '\'': dst[d++] = '\''; break; + case '"': dst[d++] = '\"'; break; + // clang-format on case '0': case '1': case '2': @@ -147,13 +150,13 @@ } if ((ch == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - (*dst)[d++] = '\\'; + dst[d++] = '\\'; while (octal_start <= p) { - (*dst)[d++] = src[octal_start++]; + dst[d++] = src[octal_start++]; } break; } - (*dst)[d++] = static_cast<char>(ch); + dst[d++] = static_cast<char>(ch); break; } case 'x': @@ -187,13 +190,13 @@ } if ((ch == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - (*dst)[d++] = '\\'; + dst[d++] = '\\'; while (hex_start <= p) { - (*dst)[d++] = src[hex_start++]; + dst[d++] = src[hex_start++]; } break; } - (*dst)[d++] = static_cast<char>(ch); + dst[d++] = static_cast<char>(ch); break; } case 'u': { @@ -220,16 +223,16 @@ } if ((rune == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - (*dst)[d++] = '\\'; + dst[d++] = '\\'; while (hex_start <= p) { - (*dst)[d++] = src[hex_start++]; + dst[d++] = src[hex_start++]; } break; } if (IsSurrogate(rune, src.substr(hex_start, 5), error)) { return false; } - d += strings_internal::EncodeUTF8Char(dst->data() + d, rune); + d += strings_internal::EncodeUTF8Char(dst + d, rune); break; } case 'U': { @@ -269,17 +272,17 @@ } if ((rune == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character - (*dst)[d++] = '\\'; + dst[d++] = '\\'; // U00000000 while (hex_start <= p) { - (*dst)[d++] = src[hex_start++]; + dst[d++] = src[hex_start++]; } break; } if (IsSurrogate(rune, src.substr(hex_start, 9), error)) { return false; } - d += strings_internal::EncodeUTF8Char(dst->data() + d, rune); + d += strings_internal::EncodeUTF8Char(dst + d, rune); break; } default: { @@ -293,7 +296,7 @@ } } - dst->erase(d); + *dst_size = d; return true; } @@ -890,9 +893,35 @@ // // See CUnescapeInternal() for implementation details. // ---------------------------------------------------------------------- + bool CUnescape(absl::string_view source, std::string* absl_nonnull dest, std::string* absl_nullable error) { - return CUnescapeInternal(source, kUnescapeNulls, dest, error); + bool success; + + // `CUnescape()` allows for in-place unescaping, which means `source` may + // alias `*dest`. However, absl::StringResizeAndOverwrite() invalidates all + // iterators, pointers, and references into the string, regardless whether + // reallocation occurs. Therefore we need to avoid calling + // absl::StringResizeAndOverwrite() when `source.data() == + // dest->data()`. Comparing the sizes is sufficient to cover this case. + if (dest->size() >= source.size()) { + size_t dest_size = 0; + success = CUnescapeInternal(source, kUnescapeNulls, dest->data(), + &dest_size, error); + ABSL_ASSERT(dest_size <= dest->size()); + dest->erase(dest_size); + } else { + StringResizeAndOverwrite( + *dest, source.size(), + [source, error, &success](char* buf, size_t buf_size) { + size_t dest_size = 0; + success = + CUnescapeInternal(source, kUnescapeNulls, buf, &dest_size, error); + ABSL_ASSERT(dest_size <= buf_size); + return dest_size; + }); + } + return success; } std::string CEscape(absl::string_view src) {
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 1d58582..9b80f1f 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc
@@ -745,7 +745,8 @@ Mutex::~Mutex() { Dtor(); } #endif -#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER) +#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER) || \ + defined(ABSL_BUILD_DLL) void Mutex::Dtor() { if (kDebugMode) { this->ForgetDeadlockInfo();
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 10fafcb..3af74c2 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h
@@ -1139,12 +1139,13 @@ inline Mutex::~Mutex() { Dtor(); } #endif -#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER) -// Use default (empty) destructor in release build for performance reasons. -// We need to mark both Dtor and ~Mutex as always inline for inconsistent -// builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these -// cases we want the empty functions to dissolve entirely rather than being -// exported from dynamic libraries and potentially override the non-empty ones. +#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER) && \ + !defined(ABSL_BUILD_DLL) +// Under NDEBUG and without TSAN, Dtor is normally fully inlined for +// performance. However, when building Abseil as a shared library +// (ABSL_BUILD_DLL), we must provide an out-of-line definition. This ensures the +// Mutex::Dtor symbol is exported from the DLL, maintaining ABI compatibility +// with clients that might be built in debug mode and thus expect the symbol. ABSL_ATTRIBUTE_ALWAYS_INLINE inline void Mutex::Dtor() {} #endif