diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index d490e65..c49d621 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake
@@ -128,6 +128,8 @@ "debugging/internal/address_is_readable.h" "debugging/internal/addresses.h" "debugging/internal/bounded_utf8_length_sequence.h" + "debugging/internal/borrowed_fixup_buffer.h" + "debugging/internal/borrowed_fixup_buffer.cc" "debugging/internal/decode_rust_punycode.cc" "debugging/internal/decode_rust_punycode.h" "debugging/internal/demangle.cc"
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 1b8204b..4adf591 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc
@@ -81,6 +81,7 @@ capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); + num_insert_hits.store(0, std::memory_order_relaxed); num_rehashes.store(0, std::memory_order_relaxed); max_probe_length.store(0, std::memory_order_relaxed); total_probe_length.store(0, std::memory_order_relaxed);
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 5c59a9e..163b18a 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h
@@ -82,6 +82,7 @@ std::atomic<size_t> capacity; std::atomic<size_t> size; std::atomic<size_t> num_erases; + std::atomic<size_t> num_insert_hits; std::atomic<size_t> num_rehashes; std::atomic<size_t> max_probe_length; std::atomic<size_t> total_probe_length; @@ -111,6 +112,16 @@ void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length); +// This is inline to avoid calling convention overhead for an otherwise +// lightweight operation. +inline void RecordInsertHitSlow(HashtablezInfo* info) { + // We avoid fetch_add since no other thread should be mutating the table + // simultaneously without synchronization. + info->num_insert_hits.store( + info->num_insert_hits.load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); +} + void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity); void RecordClearedReservationSlow(HashtablezInfo* info); @@ -184,6 +195,11 @@ RecordEraseSlow(info_); } + inline void RecordInsertHit() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordInsertHitSlow(info_); + } + friend inline void swap(HashtablezInfoHandle& lhs, HashtablezInfoHandle& rhs) { std::swap(lhs.info_, rhs.info_); @@ -210,6 +226,7 @@ inline void RecordInsertMiss(size_t /*hash*/, size_t /*distance_from_desired*/) {} inline void RecordErase() {} + inline void RecordInsertHit() {} friend inline void swap(HashtablezInfoHandle& /*lhs*/, HashtablezInfoHandle& /*rhs*/) {}
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 80fe3cf..cbd8edc 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -99,6 +99,7 @@ EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_insert_hits.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0); @@ -116,6 +117,7 @@ info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); info.num_erases.store(1, std::memory_order_relaxed); + info.num_insert_hits.store(1, std::memory_order_relaxed); info.max_probe_length.store(1, std::memory_order_relaxed); info.total_probe_length.store(1, std::memory_order_relaxed); info.hashes_bitwise_or.store(1, std::memory_order_relaxed); @@ -131,6 +133,7 @@ EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_insert_hits.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0); @@ -221,6 +224,25 @@ EXPECT_EQ(info.soo_capacity, 1); } +TEST(HashtablezInfoTest, RecordInsertHit) { + const int64_t test_stride = 31; + const size_t test_element_size = 29; + const size_t test_key_size = 27; + const size_t test_value_size = 25; + + HashtablezInfo info; + absl::MutexLock l(info.init_mu); + info.PrepareForSampling(test_stride, test_element_size, + /*key_size=*/test_key_size, + /*value_size=*/test_value_size, + /*soo_capacity_value=*/1); + EXPECT_EQ(info.num_insert_hits.load(), 0); + RecordInsertHitSlow(&info); + EXPECT_EQ(info.num_insert_hits.load(), 1); + RecordInsertHitSlow(&info); + EXPECT_EQ(info.num_insert_hits.load(), 2); +} + TEST(HashtablezInfoTest, RecordRehash) { const int64_t test_stride = 33; const size_t test_element_size = 31;
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index faa7880..31b117e 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h
@@ -3173,6 +3173,7 @@ } if (!empty()) { if (equal_to(key, single_slot())) { + common().infoz().RecordInsertHit(); return {single_iterator(), false}; } } @@ -3204,6 +3205,7 @@ if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i)))) { index = seq.offset(i); inserted = false; + common().infoz().RecordInsertHit(); return; } }
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 499e966..8c80463 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2790,6 +2790,7 @@ absl::flat_hash_set<const HashtablezInfo*> preexisting_info(10); absl::flat_hash_map<size_t, int> observed_checksums(10); absl::flat_hash_map<ssize_t, int> reservations(10); + absl::flat_hash_map<std::pair<size_t, size_t>, int> hit_misses(10); start_size += sampler.Iterate([&](const HashtablezInfo& info) { preexisting_info.insert(&info); @@ -2802,6 +2803,8 @@ const bool do_reserve = (i % 10 > 5); const bool do_rehash = !do_reserve && (i % 10 > 0); + const bool do_first_insert_hit = i % 2 == 0; + const bool do_second_insert_hit = i % 4 == 0; if (do_reserve) { // Don't reserve on all tables. @@ -2809,7 +2812,14 @@ } tables.back().insert(1); + if (do_first_insert_hit) { + tables.back().insert(1); + tables.back().insert(1); + } tables.back().insert(i % 5); + if (do_second_insert_hit) { + tables.back().insert(i % 5); + } if (do_rehash) { // Rehash some other tables. @@ -2823,6 +2833,10 @@ observed_checksums[info.hashes_bitwise_xor.load( std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; + hit_misses[std::make_pair( + info.num_insert_hits.load(std::memory_order_relaxed), + info.size.load(std::memory_order_relaxed))]++; + EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type)); EXPECT_EQ(info.key_size, sizeof(typename TypeParam::key_type)); EXPECT_EQ(info.value_size, sizeof(typename TypeParam::value_type)); @@ -2850,6 +2864,21 @@ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05) << reservation; } + + EXPECT_THAT(hit_misses, testing::SizeIs(6)); + const double sampled_tables = end_size - start_size; + // i % 20: { 1, 11 } + EXPECT_NEAR((hit_misses[{1, 1}] / sampled_tables), 0.10, 0.02); + // i % 20: { 6 } + EXPECT_NEAR((hit_misses[{3, 1}] / sampled_tables), 0.05, 0.02); + // i % 20: { 0, 4, 8, 12 } + EXPECT_NEAR((hit_misses[{3, 2}] / sampled_tables), 0.20, 0.02); + // i % 20: { 2, 10, 14, 18 } + EXPECT_NEAR((hit_misses[{2, 2}] / sampled_tables), 0.20, 0.02); + // i % 20: { 16 } + EXPECT_NEAR((hit_misses[{4, 1}] / sampled_tables), 0.05, 0.02); + // i % 20: { 3, 5, 7, 9, 13, 15, 17, 19 } + EXPECT_NEAR((hit_misses[{0, 2}] / sampled_tables), 0.40, 0.02); } std::vector<const HashtablezInfo*> SampleSooMutation(
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel index 7cc053e..aad5e28 100644 --- a/absl/debugging/BUILD.bazel +++ b/absl/debugging/BUILD.bazel
@@ -36,6 +36,33 @@ licenses(["notice"]) cc_library( + name = "borrowed_fixup_buffer", + srcs = ["internal/borrowed_fixup_buffer.cc"], + hdrs = ["internal/borrowed_fixup_buffer.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:malloc_internal", + "//absl/hash", + ], +) + +cc_test( + name = "borrowed_fixup_buffer_test", + srcs = ["internal/borrowed_fixup_buffer_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":borrowed_fixup_buffer", + "//absl/base:config", + "@googletest//:gtest", + "@googletest//:gtest_main", + ], +) + +cc_library( name = "stacktrace", srcs = [ "internal/stacktrace_aarch64-inl.inc", @@ -54,6 +81,7 @@ copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ + ":borrowed_fixup_buffer", ":debugging_internal", "//absl/base:config", "//absl/base:core_headers", @@ -69,6 +97,7 @@ copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ + ":borrowed_fixup_buffer", ":stacktrace", "//absl/base:config", "//absl/base:core_headers", @@ -448,6 +477,7 @@ ":stacktrace", "//absl/base:config", "//absl/base:core_headers", + "//absl/cleanup", "@google_benchmark//:benchmark_main", ], )
diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt index d8249fe..ab3a795 100644 --- a/absl/debugging/CMakeLists.txt +++ b/absl/debugging/CMakeLists.txt
@@ -18,6 +18,38 @@ absl_cc_library( NAME + borrowed_fixup_buffer + SRCS + "internal/borrowed_fixup_buffer.cc" + HDRS + "internal/borrowed_fixup_buffer.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::hash + absl::malloc_internal + PUBLIC +) + +absl_cc_test( + NAME + borrowed_fixup_buffer_test + SRCS + "internal/borrowed_fixup_buffer_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::borrowed_fixup_buffer + absl::config + GTest::gmock_main +) + +absl_cc_library( + NAME stacktrace HDRS "stacktrace.h" @@ -38,6 +70,7 @@ LINKOPTS $<$<BOOL:${EXECINFO_LIBRARY}>:${EXECINFO_LIBRARY}> DEPS + absl::borrowed_fixup_buffer absl::debugging_internal absl::config absl::core_headers
diff --git a/absl/debugging/internal/borrowed_fixup_buffer.cc b/absl/debugging/internal/borrowed_fixup_buffer.cc new file mode 100644 index 0000000..dae78a7 --- /dev/null +++ b/absl/debugging/internal/borrowed_fixup_buffer.cc
@@ -0,0 +1,118 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/debugging/internal/borrowed_fixup_buffer.h" + +#include <assert.h> +#include <limits.h> +#include <stddef.h> +#include <stdint.h> + +#include <atomic> +#include <iterator> + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/low_level_alloc.h" +#include "absl/hash/hash.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal_stacktrace { + +// A buffer for holding fix-up information for stack traces of common sizes_. +struct BorrowedFixupBuffer::FixupStackBuffer { + static constexpr size_t kMaxStackElements = 128; // Can be reduced if needed + std::atomic_flag in_use{}; + uintptr_t frames[kMaxStackElements]; + int sizes[kMaxStackElements]; + + ABSL_CONST_INIT static FixupStackBuffer g_instances[kNumStaticBuffers]; +}; + +ABSL_CONST_INIT BorrowedFixupBuffer::FixupStackBuffer + BorrowedFixupBuffer::FixupStackBuffer::g_instances[kNumStaticBuffers] = {}; + +BorrowedFixupBuffer::~BorrowedFixupBuffer() { + if (borrowed_) { + Unlock(); + } else { + base_internal::LowLevelAlloc::Free(frames_); + } +} + +BorrowedFixupBuffer::BorrowedFixupBuffer(size_t length) { + FixupStackBuffer* fixup_buffer = + 0 < length && length <= FixupStackBuffer::kMaxStackElements ? TryLock() + : nullptr; + borrowed_ = fixup_buffer != nullptr; + if (borrowed_) { + InitViaBorrow(fixup_buffer); + } else { + InitViaAllocation(length); + } +} + +void BorrowedFixupBuffer::InitViaBorrow(FixupStackBuffer* borrowed_buffer) { + assert(borrowed_); + frames_ = borrowed_buffer->frames; + sizes_ = borrowed_buffer->sizes; +} + +void BorrowedFixupBuffer::InitViaAllocation(size_t length) { + static_assert(alignof(decltype(*frames_)) >= alignof(decltype(*sizes_)), + "contiguous layout assumes decreasing alignment, otherwise " + "padding may be needed in the middle"); + assert(!borrowed_); + + base_internal::InitSigSafeArena(); + void* buf = base_internal::LowLevelAlloc::AllocWithArena( + length * (sizeof(*frames_) + sizeof(*sizes_)), + base_internal::SigSafeArena()); + + if (buf == nullptr) { + frames_ = nullptr; + sizes_ = nullptr; + return; + } + + frames_ = new (buf) uintptr_t[length]; + sizes_ = new (static_cast<void*>(static_cast<unsigned char*>(buf) + + length * sizeof(*frames_))) int[length]; +} + +BorrowedFixupBuffer::FixupStackBuffer* BorrowedFixupBuffer::Find() { + size_t i = absl::Hash<const void*>()(this) % + std::size(FixupStackBuffer::g_instances); + return &FixupStackBuffer::g_instances[i]; +} + +[[nodiscard]] BorrowedFixupBuffer::FixupStackBuffer* +BorrowedFixupBuffer::TryLock() { + FixupStackBuffer* instance = Find(); + // Use memory_order_acquire to ensure that no reads and writes on the borrowed + // buffer are reordered before the borrowing. + return !instance->in_use.test_and_set(std::memory_order_acquire) ? instance + : nullptr; +} + +void BorrowedFixupBuffer::Unlock() { + // Use memory_order_release to ensure that no reads and writes on the borrowed + // buffer are reordered after the borrowing. + Find()->in_use.clear(std::memory_order_release); +} + +} // namespace internal_stacktrace +ABSL_NAMESPACE_END +} // namespace absl
diff --git a/absl/debugging/internal/borrowed_fixup_buffer.h b/absl/debugging/internal/borrowed_fixup_buffer.h new file mode 100644 index 0000000..c5ea7a3 --- /dev/null +++ b/absl/debugging/internal/borrowed_fixup_buffer.h
@@ -0,0 +1,74 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_BORROWED_FIXUP_BUFFER_H_ +#define ABSL_DEBUGGING_INTERNAL_BORROWED_FIXUP_BUFFER_H_ + +#include <stddef.h> +#include <stdint.h> +#include <stdlib.h> + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal_stacktrace { + +// An RAII type that temporarily acquires a buffer for stack trace fix-ups from +// a pool of preallocated buffers, or attempts to allocate a new buffer if no +// such buffer is available. +// When destroyed, returns the buffer to the pool if it borrowed successfully, +// otherwise deallocates any previously allocated buffer. +class BorrowedFixupBuffer { + public: + static constexpr size_t kNumStaticBuffers = 64; + ~BorrowedFixupBuffer(); + + // The number of frames to allocate space for. Note that allocations can fail. + explicit BorrowedFixupBuffer(size_t length); + + uintptr_t* frames() const { return frames_; } + int* sizes() const { return sizes_; } + + private: + uintptr_t* frames_; + int* sizes_; + + // Have we borrowed a pre-existing buffer (vs. allocated our own)? + bool borrowed_; + + struct FixupStackBuffer; + + void InitViaBorrow(FixupStackBuffer* borrowed_buffer); + void InitViaAllocation(size_t length); + + // Returns a non-null pointer to a buffer that could be potentially borrowed. + FixupStackBuffer* Find(); + + // Attempts to opportunistically borrow a small buffer in a thread- and + // signal-safe manner. Returns nullptr on failure. + [[nodiscard]] FixupStackBuffer* TryLock(); + + // Returns the borrowed buffer. + void Unlock(); + + BorrowedFixupBuffer(const BorrowedFixupBuffer&) = delete; + BorrowedFixupBuffer& operator=(const BorrowedFixupBuffer&) = delete; +}; + +} // namespace internal_stacktrace +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_BORROWED_FIXUP_BUFFER_H_
diff --git a/absl/debugging/internal/borrowed_fixup_buffer_test.cc b/absl/debugging/internal/borrowed_fixup_buffer_test.cc new file mode 100644 index 0000000..a856c5d --- /dev/null +++ b/absl/debugging/internal/borrowed_fixup_buffer_test.cc
@@ -0,0 +1,97 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/debugging/internal/borrowed_fixup_buffer.h" + +#include <stddef.h> +#include <stdint.h> + +#include <algorithm> +#include <functional> +#include <memory> + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal_stacktrace { +namespace { + +TEST(BorrowedFixupBuffer, ProperReuse) { + uintptr_t first_borrowed_frame = 0; + uintptr_t first_borrowed_size = 0; + + // Ensure that we borrow the same buffer each time, indicating proper reuse. + // Disable loop unrolling. We need all iterations to match exactly, to coax + // reuse of the the same underlying buffer. +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC unroll 1 // <= 1 disables unrolling +#endif + for (int i = 0; i < 100; ++i) { + BorrowedFixupBuffer buf0(0); + EXPECT_EQ(buf0.frames(), nullptr); + EXPECT_EQ(buf0.sizes(), nullptr); + + BorrowedFixupBuffer buf1(1); + EXPECT_NE(buf1.frames(), nullptr); + EXPECT_NE(buf1.sizes(), nullptr); + if (first_borrowed_frame == 0) { + first_borrowed_frame = reinterpret_cast<uintptr_t>(buf1.frames()); + } else { + EXPECT_EQ(reinterpret_cast<uintptr_t>(buf1.frames()), + first_borrowed_frame); + } + if (first_borrowed_size == 0) { + first_borrowed_size = reinterpret_cast<uintptr_t>(buf1.sizes()); + } else { + EXPECT_EQ(reinterpret_cast<uintptr_t>(buf1.sizes()), first_borrowed_size); + } + + BorrowedFixupBuffer buf2(2); + EXPECT_NE(buf2.frames(), buf1.frames()); + EXPECT_NE(buf2.sizes(), buf1.sizes()); + EXPECT_NE(buf2.frames(), nullptr); + EXPECT_NE(buf2.sizes(), nullptr); + } +} + +TEST(BorrowedFixupBuffer, NoOverlap) { + using BufferPtr = std::unique_ptr<BorrowedFixupBuffer>; + static constexpr std::less<const void*> less; + static constexpr size_t kBufLen = 5; + static constexpr size_t kNumBuffers = + BorrowedFixupBuffer::kNumStaticBuffers * 37 + 1; + + auto bufs = std::make_unique<BufferPtr[]>(kNumBuffers); + for (size_t i = 0; i < kNumBuffers; ++i) { + bufs[i] = std::make_unique<BorrowedFixupBuffer>(kBufLen); + } + + std::sort(bufs.get(), bufs.get() + kNumBuffers, + [](const BufferPtr& a, const BufferPtr& b) { + return less(a->frames(), b->frames()); + }); + + // Verify there are no overlaps + for (size_t i = 1; i < kNumBuffers; ++i) { + EXPECT_FALSE(less(bufs[i]->frames(), bufs[i - 1]->frames() + kBufLen)); + EXPECT_FALSE(less(bufs[i]->sizes(), bufs[i - 1]->sizes() + kBufLen)); + } +} + +} // namespace +} // namespace internal_stacktrace +ABSL_NAMESPACE_END +} // namespace absl
diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc index acc8b66..aee065d 100644 --- a/absl/debugging/stacktrace.cc +++ b/absl/debugging/stacktrace.cc
@@ -42,14 +42,12 @@ #include <algorithm> #include <atomic> -#include <iterator> -#include <type_traits> #include "absl/base/attributes.h" #include "absl/base/config.h" -#include "absl/base/internal/low_level_alloc.h" #include "absl/base/optimization.h" #include "absl/base/port.h" +#include "absl/debugging/internal/borrowed_fixup_buffer.h" #include "absl/debugging/internal/stacktrace_config.h" #if defined(ABSL_STACKTRACE_INL_HEADER) @@ -75,37 +73,14 @@ typedef int (*Unwinder)(void**, int*, int, int, const void*, int*); std::atomic<Unwinder> custom; -constexpr size_t kMinPageSize = 4096; - -struct FixupBuffer { - static constexpr size_t kMaxStackElements = 128; // Can be reduced if needed - uintptr_t frames[kMaxStackElements]; - int sizes[kMaxStackElements]; -}; -static_assert(std::is_trivially_default_constructible_v<FixupBuffer>); -static_assert(sizeof(FixupBuffer) < kMinPageSize / 2, - "buffer size should no larger than a small fraction of a page, " - "to avoid stack overflows"); - template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> -ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind( - void** result, uintptr_t* frames, int* sizes, size_t max_depth, - int skip_count, const void* uc, int* min_dropped_frames, - FixupBuffer* fixup_buffer /* if NULL, fixups are skipped */) { - // Allocate a buffer dynamically, using the signal-safe allocator. - static constexpr auto allocate = [](size_t num_bytes) -> void* { - base_internal::InitSigSafeArena(); - return base_internal::LowLevelAlloc::AllocWithArena( - num_bytes, base_internal::SigSafeArena()); - }; - - // We only need to free the buffers if we allocated them with the signal-safe - // allocator. - bool must_free_frames = false; - bool must_free_sizes = false; - - bool unwind_with_fixup = - fixup_buffer != nullptr && internal_stacktrace::ShouldFixUpStack(); +ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames, + int* sizes, size_t max_depth, + int skip_count, const void* uc, + int* min_dropped_frames, + bool unwind_with_fixup = true) { + unwind_with_fixup = + unwind_with_fixup && internal_stacktrace::ShouldFixUpStack(); #ifdef _WIN32 if (unwind_with_fixup) { @@ -117,29 +92,17 @@ } #endif - if (unwind_with_fixup) { - // Some implementations of FixUpStack may need to be passed frame - // information from Unwind, even if the caller doesn't need that - // information. We allocate the necessary buffers for such implementations - // here. - - if (frames == nullptr) { - if (max_depth <= std::size(fixup_buffer->frames)) { - frames = fixup_buffer->frames; - } else { - frames = static_cast<uintptr_t*>(allocate(max_depth * sizeof(*frames))); - must_free_frames = true; - } - } - - if (sizes == nullptr) { - if (max_depth <= std::size(fixup_buffer->sizes)) { - sizes = fixup_buffer->sizes; - } else { - sizes = static_cast<int*>(allocate(max_depth * sizeof(*sizes))); - must_free_sizes = true; - } - } + // Some implementations of FixUpStack may need to be passed frame + // information from Unwind, even if the caller doesn't need that + // information. We allocate the necessary buffers for such implementations + // here. + const internal_stacktrace::BorrowedFixupBuffer fixup_buffer( + unwind_with_fixup ? max_depth : 0); + if (frames == nullptr) { + frames = fixup_buffer.frames(); + } + if (sizes == nullptr) { + sizes = fixup_buffer.sizes(); } Unwinder g = custom.load(std::memory_order_acquire); @@ -167,14 +130,6 @@ internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size); } - if (must_free_sizes) { - base_internal::LowLevelAlloc::Free(sizes); - } - - if (must_free_frames) { - base_internal::LowLevelAlloc::Free(frames); - } - ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); return static_cast<int>(size); } @@ -184,10 +139,9 @@ ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames, int* sizes, int max_depth, int skip_count) { - FixupBuffer fixup_stack_buf; return Unwind<true, false>(result, frames, sizes, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr, &fixup_stack_buf); + nullptr, nullptr); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int @@ -195,10 +149,9 @@ int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { - FixupBuffer fixup_stack_buf; return Unwind<true, true>(result, frames, sizes, static_cast<size_t>(max_depth), skip_count, uc, - min_dropped_frames, &fixup_stack_buf); + min_dropped_frames); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int @@ -206,24 +159,22 @@ int skip_count) { return Unwind<false, false>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr, nullptr); + nullptr, nullptr, /*unwind_with_fixup=*/false); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace( void** result, int max_depth, int skip_count) { - FixupBuffer fixup_stack_buf; return Unwind<false, false>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, - nullptr, nullptr, &fixup_stack_buf); + nullptr, nullptr); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { - FixupBuffer fixup_stack_buf; return Unwind<false, true>(result, nullptr, nullptr, static_cast<size_t>(max_depth), skip_count, uc, - min_dropped_frames, &fixup_stack_buf); + min_dropped_frames); } void SetStackUnwinder(Unwinder w) {
diff --git a/absl/debugging/stacktrace_benchmark.cc b/absl/debugging/stacktrace_benchmark.cc index 9360baf..eef9850 100644 --- a/absl/debugging/stacktrace_benchmark.cc +++ b/absl/debugging/stacktrace_benchmark.cc
@@ -12,12 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include <stddef.h> +#include <stdint.h> + #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/optimization.h" +#include "absl/cleanup/cleanup.h" #include "absl/debugging/stacktrace.h" #include "benchmark/benchmark.h" +static bool g_enable_fixup = false; + +#if ABSL_HAVE_ATTRIBUTE_WEAK +// Override these weak symbols if possible. +bool absl::internal_stacktrace::ShouldFixUpStack() { return g_enable_fixup; } +void absl::internal_stacktrace::FixUpStack(void**, uintptr_t*, int*, size_t, + size_t&) {} +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace { @@ -42,14 +55,24 @@ func(state, --x, depth); } +template <bool EnableFixup> void BM_GetStackTrace(benchmark::State& state) { + const Cleanup restore_state( + [prev = g_enable_fixup]() { g_enable_fixup = prev; }); + g_enable_fixup = EnableFixup; int depth = state.range(0); for (auto s : state) { func(state, depth, depth); } } -BENCHMARK(BM_GetStackTrace)->DenseRange(10, kMaxStackDepth, 10); +#if ABSL_HAVE_ATTRIBUTE_WEAK +auto& BM_GetStackTraceWithFixup = BM_GetStackTrace<true>; +BENCHMARK(BM_GetStackTraceWithFixup)->DenseRange(10, kMaxStackDepth, 10); +#endif + +auto& BM_GetStackTraceWithoutFixup = BM_GetStackTrace<false>; +BENCHMARK(BM_GetStackTraceWithoutFixup)->DenseRange(10, kMaxStackDepth, 10); } // namespace ABSL_NAMESPACE_END } // namespace absl
diff --git a/absl/profiling/BUILD.bazel b/absl/profiling/BUILD.bazel index 5afdb96..00571b2 100644 --- a/absl/profiling/BUILD.bazel +++ b/absl/profiling/BUILD.bazel
@@ -161,6 +161,7 @@ "//absl/container:btree", "//absl/container:flat_hash_map", "//absl/strings", + "//absl/strings:str_format", "//absl/types:span", ], )
diff --git a/absl/profiling/CMakeLists.txt b/absl/profiling/CMakeLists.txt index 4807f0d..6441dae 100644 --- a/absl/profiling/CMakeLists.txt +++ b/absl/profiling/CMakeLists.txt
@@ -109,6 +109,7 @@ absl::flat_hash_map absl::btree absl::strings + absl::str_format absl::span )
diff --git a/absl/profiling/hashtable.cc b/absl/profiling/hashtable.cc index 407c6b4..17148d1 100644 --- a/absl/profiling/hashtable.cc +++ b/absl/profiling/hashtable.cc
@@ -60,6 +60,7 @@ const auto capacity_id = builder.InternString("capacity"); const auto size_id = builder.InternString("size"); const auto num_erases_id = builder.InternString("num_erases"); + const auto num_insert_hits_id = builder.InternString("num_insert_hits"); const auto num_rehashes_id = builder.InternString("num_rehashes"); const auto max_probe_length_id = builder.InternString("max_probe_length"); const auto total_probe_length_id = builder.InternString("total_probe_length"); @@ -89,6 +90,9 @@ add_label(size_id, info.size.load(std::memory_order_relaxed)); add_label(num_erases_id, info.num_erases.load(std::memory_order_relaxed)); + // TODO(b/436909492): Revisit whether this value is useful. + add_label(num_insert_hits_id, + info.num_insert_hits.load(std::memory_order_relaxed)); add_label(num_rehashes_id, info.num_rehashes.load(std::memory_order_relaxed)); add_label(max_probe_length_id,
diff --git a/absl/profiling/internal/profile_builder.cc b/absl/profiling/internal/profile_builder.cc index f0bb40b..1ca0d3b 100644 --- a/absl/profiling/internal/profile_builder.cc +++ b/absl/profiling/internal/profile_builder.cc
@@ -32,6 +32,7 @@ #include "absl/base/internal/raw_logging.h" #include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" #include "absl/types/span.h" namespace absl {
diff --git a/absl/status/status.cc b/absl/status/status.cc index 963dab6..f219933 100644 --- a/absl/status/status.cc +++ b/absl/status/status.cc
@@ -47,6 +47,10 @@ "absl::Status assumes it can use the bottom 2 bits of a StatusRep*."); std::string StatusCodeToString(StatusCode code) { + return std::string(absl::StatusCodeToStringView(code)); +} + +absl::string_view StatusCodeToStringView(StatusCode code) { switch (code) { case StatusCode::kOk: return "OK";
diff --git a/absl/status/status.h b/absl/status/status.h index 4516822..b26d072 100644 --- a/absl/status/status.h +++ b/absl/status/status.h
@@ -284,6 +284,11 @@ // Returns the name for the status code, or "" if it is an unknown value. std::string StatusCodeToString(StatusCode code); +// StatusCodeToStringView() +// +// Same as StatusCodeToString(), but returns a string_view. +absl::string_view StatusCodeToStringView(StatusCode code); + // operator<< // // Streams StatusCodeToString(code) to `os`.
diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc index c3327ad..f6ac0c0 100644 --- a/absl/status/status_test.cc +++ b/absl/status/status_test.cc
@@ -39,6 +39,7 @@ std::ostringstream oss; oss << code; EXPECT_EQ(oss.str(), absl::StatusCodeToString(code)); + EXPECT_EQ(oss.str(), absl::StatusCodeToStringView(code)); } // This structure holds the details for testing a single error code,
diff --git a/absl/strings/str_split.h b/absl/strings/str_split.h index cf53ccf..29fa4f7 100644 --- a/absl/strings/str_split.h +++ b/absl/strings/str_split.h
@@ -382,7 +382,7 @@ // // v[0] == " a ", v[1] == " ", v[2] == "b" struct SkipWhitespace { bool operator()(absl::string_view sp) const { - sp = absl::StripAsciiWhitespace(sp); + sp = absl::StripLeadingAsciiWhitespace(sp); return !sp.empty(); } };
diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h index 2f0a0fd..49df37c 100644 --- a/absl/strings/string_view.h +++ b/absl/strings/string_view.h
@@ -227,7 +227,7 @@ constexpr string_view( // NOLINT(runtime/explicit) const char* absl_nonnull str) : ptr_(str), length_(str ? StrlenInternal(str) : 0) { - assert(str != nullptr); + ABSL_HARDENING_ASSERT(str != nullptr); } // Constructor of a `string_view` from a `const char*` and length.
diff --git a/absl/synchronization/notification.h b/absl/synchronization/notification.h index 1ceffdb..12df31b 100644 --- a/absl/synchronization/notification.h +++ b/absl/synchronization/notification.h
@@ -52,7 +52,7 @@ #include <atomic> -#include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/tracing.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h"