diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index 1eb8f09..f0e3e63 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel
@@ -74,7 +74,10 @@ hdrs = ["no_destructor.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - deps = [":config"], + deps = [ + ":config", + ":nullability", + ], ) cc_library(
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index 4cfc228..09c622a 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt
@@ -62,6 +62,7 @@ "no_destructor.h" DEPS absl::config + absl::nullability COPTS ${ABSL_DEFAULT_COPTS} )
diff --git a/absl/base/no_destructor.h b/absl/base/no_destructor.h index ab68913..7b46456 100644 --- a/absl/base/no_destructor.h +++ b/absl/base/no_destructor.h
@@ -41,6 +41,7 @@ #include <utility> #include "absl/base/config.h" +#include "absl/base/nullability.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -140,11 +141,11 @@ // Pretend to be a smart pointer to T with deep constness. // Never returns a null pointer. T& operator*() { return *get(); } - T* operator->() { return get(); } - T* get() { return impl_.get(); } + absl::Nonnull<T*> operator->() { return get(); } + absl::Nonnull<T*> get() { return impl_.get(); } const T& operator*() const { return *get(); } - const T* operator->() const { return get(); } - const T* get() const { return impl_.get(); } + absl::Nonnull<const T*> operator->() const { return get(); } + absl::Nonnull<const T*> get() const { return impl_.get(); } private: class DirectImpl { @@ -152,8 +153,8 @@ template <typename... Args> explicit constexpr DirectImpl(Args&&... args) : value_(std::forward<Args>(args)...) {} - const T* get() const { return &value_; } - T* get() { return &value_; } + absl::Nonnull<const T*> get() const { return &value_; } + absl::Nonnull<T*> get() { return &value_; } private: T value_; @@ -165,14 +166,14 @@ explicit PlacementImpl(Args&&... args) { new (&space_) T(std::forward<Args>(args)...); } - const T* get() const { + absl::Nonnull<const T*> get() const { return Launder(reinterpret_cast<const T*>(&space_)); } - T* get() { return Launder(reinterpret_cast<T*>(&space_)); } + absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); } private: template <typename P> - static P* Launder(P* p) { + static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) { #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L return std::launder(p); #elif ABSL_HAVE_BUILTIN(__builtin_launder)
diff --git a/absl/cleanup/cleanup_test.cc b/absl/cleanup/cleanup_test.cc index 46b8858..72d7ff2 100644 --- a/absl/cleanup/cleanup_test.cc +++ b/absl/cleanup/cleanup_test.cc
@@ -48,7 +48,7 @@ explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {} FunctorClass(FunctorClass&& other) - : callback_(absl::exchange(other.callback_, Callback())) {} + : callback_(std::exchange(other.callback_, Callback())) {} FunctorClass(const FunctorClass&) = delete;
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc index 9ce9267..b425bc5 100644 --- a/absl/container/flat_hash_set_test.cc +++ b/absl/container/flat_hash_set_test.cc
@@ -181,15 +181,13 @@ } } -class PoisonInline { +class PoisonSoo { int64_t data_; public: - explicit PoisonInline(int64_t d) : data_(d) { - SanitizerPoisonObject(&data_); - } - PoisonInline(const PoisonInline& that) : PoisonInline(*that) {} - ~PoisonInline() { SanitizerUnpoisonObject(&data_); } + explicit PoisonSoo(int64_t d) : data_(d) { SanitizerPoisonObject(&data_); } + PoisonSoo(const PoisonSoo& that) : PoisonSoo(*that) {} + ~PoisonSoo() { SanitizerUnpoisonObject(&data_); } int64_t operator*() const { SanitizerUnpoisonObject(&data_); @@ -198,45 +196,56 @@ return ret; } template <typename H> - friend H AbslHashValue(H h, const PoisonInline& pi) { + friend H AbslHashValue(H h, const PoisonSoo& pi) { return H::combine(std::move(h), *pi); } - bool operator==(const PoisonInline& rhs) const { return **this == *rhs; } + bool operator==(const PoisonSoo& rhs) const { return **this == *rhs; } }; -// Tests that we don't touch the poison_ member of PoisonInline. -TEST(FlatHashSet, PoisonInline) { - PoisonInline a(0), b(1); - { // basic usage - flat_hash_set<PoisonInline> set; - set.insert(a); - EXPECT_THAT(set, UnorderedElementsAre(a)); - set.insert(b); - EXPECT_THAT(set, UnorderedElementsAre(a, b)); - set.erase(a); - EXPECT_THAT(set, UnorderedElementsAre(b)); - set.rehash(0); // shrink to inline - EXPECT_THAT(set, UnorderedElementsAre(b)); - } - { // test move constructor from inline to inline - flat_hash_set<PoisonInline> set; - set.insert(a); - flat_hash_set<PoisonInline> set2(std::move(set)); - EXPECT_THAT(set2, UnorderedElementsAre(a)); - } - { // test move assignment from inline to inline - flat_hash_set<PoisonInline> set, set2; - set.insert(a); - set2 = std::move(set); - EXPECT_THAT(set2, UnorderedElementsAre(a)); - } - { // test alloc move constructor from inline to inline - flat_hash_set<PoisonInline> set; - set.insert(a); - flat_hash_set<PoisonInline> set2(std::move(set), - std::allocator<PoisonInline>()); - EXPECT_THAT(set2, UnorderedElementsAre(a)); - } +TEST(FlatHashSet, PoisonSooBasic) { + PoisonSoo a(0), b(1); + flat_hash_set<PoisonSoo> set; + set.insert(a); + EXPECT_THAT(set, UnorderedElementsAre(a)); + set.insert(b); + EXPECT_THAT(set, UnorderedElementsAre(a, b)); + set.erase(a); + EXPECT_THAT(set, UnorderedElementsAre(b)); + set.rehash(0); // Shrink to SOO. + EXPECT_THAT(set, UnorderedElementsAre(b)); +} + +TEST(FlatHashSet, PoisonSooMoveConstructSooToSoo) { + PoisonSoo a(0); + flat_hash_set<PoisonSoo> set; + set.insert(a); + flat_hash_set<PoisonSoo> set2(std::move(set)); + EXPECT_THAT(set2, UnorderedElementsAre(a)); +} + +TEST(FlatHashSet, PoisonSooAllocMoveConstructSooToSoo) { + PoisonSoo a(0); + flat_hash_set<PoisonSoo> set; + set.insert(a); + flat_hash_set<PoisonSoo> set2(std::move(set), std::allocator<PoisonSoo>()); + EXPECT_THAT(set2, UnorderedElementsAre(a)); +} + +TEST(FlatHashSet, PoisonSooMoveAssignFullSooToEmptySoo) { + PoisonSoo a(0); + flat_hash_set<PoisonSoo> set, set2; + set.insert(a); + set2 = std::move(set); + EXPECT_THAT(set2, UnorderedElementsAre(a)); +} + +TEST(FlatHashSet, PoisonSooMoveAssignFullSooToFullSoo) { + PoisonSoo a(0), b(1); + flat_hash_set<PoisonSoo> set, set2; + set.insert(a); + set2.insert(b); + set2 = std::move(set); + EXPECT_THAT(set2, UnorderedElementsAre(a)); } TEST(FlatHashSet, FlatHashSetPolicyDestroyReturnsTrue) {
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 04e2c38..974b652 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h
@@ -775,7 +775,20 @@ ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos < end()); + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#pragma GCC diagnostic ignored "-Wuninitialized" +#endif return storage_.Erase(pos, pos + 1); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } // Overload of `InlinedVector::erase(...)` that erases every element in the
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index 5ecf88a..6954262 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc
@@ -333,6 +333,57 @@ } } +// Erasing from a container of unique pointers should work fine, with no +// leaks, despite the fact that unique pointers are trivially relocatable but +// not trivially destructible. +// TODO(absl-team): Using unique_ptr here is technically correct, but +// a trivially relocatable struct would be less semantically confusing. +TEST(UniquePtr, EraseSingle) { + for (size_t size = 4; size < 16; ++size) { + absl::InlinedVector<std::unique_ptr<size_t>, 8> a; + for (size_t i = 0; i < size; ++i) { + a.push_back(std::make_unique<size_t>(i)); + } + a.erase(a.begin()); + ASSERT_THAT(a, SizeIs(size - 1)); + for (size_t i = 0; i < size - 1; ++i) { + ASSERT_THAT(a[i], Pointee(i + 1)); + } + a.erase(a.begin() + 2); + ASSERT_THAT(a, SizeIs(size - 2)); + ASSERT_THAT(a[0], Pointee(1)); + ASSERT_THAT(a[1], Pointee(2)); + for (size_t i = 2; i < size - 2; ++i) { + ASSERT_THAT(a[i], Pointee(i + 2)); + } + } +} + +// Erasing from a container of unique pointers should work fine, with no +// leaks, despite the fact that unique pointers are trivially relocatable but +// not trivially destructible. +// TODO(absl-team): Using unique_ptr here is technically correct, but +// a trivially relocatable struct would be less semantically confusing. +TEST(UniquePtr, EraseMulti) { + for (size_t size = 5; size < 16; ++size) { + absl::InlinedVector<std::unique_ptr<size_t>, 8> a; + for (size_t i = 0; i < size; ++i) { + a.push_back(std::make_unique<size_t>(i)); + } + a.erase(a.begin(), a.begin() + 2); + ASSERT_THAT(a, SizeIs(size - 2)); + for (size_t i = 0; i < size - 2; ++i) { + ASSERT_THAT(a[i], Pointee(i + 2)); + } + a.erase(a.begin() + 1, a.begin() + 3); + ASSERT_THAT(a, SizeIs(size - 4)); + ASSERT_THAT(a[0], Pointee(2)); + for (size_t i = 1; i < size - 4; ++i) { + ASSERT_THAT(a[i], Pointee(i + 4)); + } + } +} + // At the end of this test loop, the elements between [erase_begin, erase_end) // should have reference counts == 0, and all others elements should have // reference counts == 1.
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 91df57a..fd7860d 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h
@@ -1407,9 +1407,9 @@ copy_or_move_values_in_order(other); } btree(btree &&other) noexcept - : root_(absl::exchange(other.root_, EmptyNode())), + : root_(std::exchange(other.root_, EmptyNode())), rightmost_(std::move(other.rightmost_)), - size_(absl::exchange(other.size_, 0u)) { + size_(std::exchange(other.size_, 0u)) { other.mutable_rightmost() = EmptyNode(); } btree(btree &&other, const allocator_type &alloc)
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index 59e70eb..f05a1fd 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h
@@ -87,10 +87,10 @@ constexpr Storage() = default; template <typename V> explicit constexpr Storage(absl::in_place_t, V&& v) - : value(absl::forward<V>(v)) {} + : value(std::forward<V>(v)) {} constexpr const T& get() const& { return value; } T& get() & { return value; } - constexpr const T&& get() const&& { return absl::move(*this).value; } + constexpr const T&& get() const&& { return std::move(*this).value; } T&& get() && { return std::move(*this).value; } }; @@ -99,12 +99,11 @@ constexpr Storage() = default; template <typename V> - explicit constexpr Storage(absl::in_place_t, V&& v) - : T(absl::forward<V>(v)) {} + explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward<V>(v)) {} constexpr const T& get() const& { return *this; } T& get() & { return *this; } - constexpr const T&& get() const&& { return absl::move(*this); } + constexpr const T&& get() const&& { return std::move(*this); } T&& get() && { return std::move(*this); } }; @@ -123,7 +122,7 @@ constexpr CompressedTupleImpl() = default; template <typename... Vs> explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {} + : Storage<Ts, I>(absl::in_place, std::forward<Vs>(args))... {} friend CompressedTuple<Ts...>; }; @@ -135,7 +134,7 @@ constexpr CompressedTupleImpl() = default; template <typename... Vs> explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {} + : Storage<Ts, I, false>(absl::in_place, std::forward<Vs>(args))... {} friend CompressedTuple<Ts...>; }; @@ -234,8 +233,8 @@ bool> = true> explicit constexpr CompressedTuple(First&& first, Vs&&... base) : CompressedTuple::CompressedTupleImpl(absl::in_place, - absl::forward<First>(first), - absl::forward<Vs>(base)...) {} + std::forward<First>(first), + std::forward<Vs>(base)...) {} template <int I> ElemT<I>& get() & { @@ -254,7 +253,7 @@ template <int I> constexpr const ElemT<I>&& get() const&& { - return absl::move(*this).StorageT<I>::get(); + return std::move(*this).StorageT<I>::get(); } };
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index da07baa..49818fb 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc
@@ -16,6 +16,7 @@ #include <memory> #include <string> +#include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -384,8 +385,8 @@ #if defined(__clang__) // An apparent bug in earlier versions of gcc claims these are ambiguous. - constexpr int x2m = absl::move(x.get<2>()).get<0>(); - constexpr CallType x3m = absl::move(x).get<3>().value(); + constexpr int x2m = std::move(x.get<2>()).get<0>(); + constexpr CallType x3m = std::move(x).get<3>().value(); EXPECT_EQ(x2m, 5); EXPECT_EQ(x3m, CallType::kConstMove); #endif
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h index 86ffd1b..ec08794 100644 --- a/absl/container/internal/hash_policy_traits.h +++ b/absl/container/internal/hash_policy_traits.h
@@ -168,6 +168,9 @@ #endif } + // Whether small object optimization is enabled. False by default. + static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); } + private: template <class Hash> struct HashElement { @@ -183,6 +186,18 @@ return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)}, Policy::element(static_cast<slot_type*>(slot))); } + + // Use go/ranked-overloads for dispatching. Rank1 is preferred. + struct Rank0 {}; + struct Rank1 : Rank0 {}; + + // Use auto -> decltype as an enabler. + template <class P = Policy> + static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) { + return P::soo_enabled(); + } + + static constexpr bool soo_enabled_impl(Rank0) { return false; } }; } // namespace container_internal
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 90a74dc..a157532 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h
@@ -893,16 +893,30 @@ std::distance(ConstIterator<A>(storage_view.data), from)); SizeType<A> erase_end_index = erase_index + erase_size; - IteratorValueAdapter<A, MoveIterator<A>> move_values( - MoveIterator<A>(storage_view.data + erase_end_index)); + // Fast path: if the value type is trivially relocatable and we know + // the allocator doesn't do anything fancy, then we know it is legal for us to + // simply destroy the elements in the "erasure window" (which cannot throw) + // and then memcpy downward to close the window. + if (absl::is_trivially_relocatable<ValueType<A>>::value && + std::is_nothrow_destructible<ValueType<A>>::value && + std::is_same<A, std::allocator<ValueType<A>>>::value) { + DestroyAdapter<A>::DestroyElements( + GetAllocator(), storage_view.data + erase_index, erase_size); + std::memmove( + reinterpret_cast<char*>(storage_view.data + erase_index), + reinterpret_cast<const char*>(storage_view.data + erase_end_index), + (storage_view.size - erase_end_index) * sizeof(ValueType<A>)); + } else { + IteratorValueAdapter<A, MoveIterator<A>> move_values( + MoveIterator<A>(storage_view.data + erase_end_index)); - AssignElements<A>(storage_view.data + erase_index, move_values, - storage_view.size - erase_end_index); + AssignElements<A>(storage_view.data + erase_index, move_values, + storage_view.size - erase_end_index); - DestroyAdapter<A>::DestroyElements( - GetAllocator(), storage_view.data + (storage_view.size - erase_size), - erase_size); - + DestroyAdapter<A>::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), + erase_size); + } SubtractSize(erase_size); return Iterator<A>(storage_view.data + erase_index); }
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h index a4ba610..1bf739c 100644 --- a/absl/container/internal/layout.h +++ b/absl/container/internal/layout.h
@@ -706,7 +706,7 @@ template <class... Sizes> static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) { static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); - return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...); + return PartialType<sizeof...(Sizes)>(std::forward<Sizes>(sizes)...); } // Creates a layout with the sizes of all arrays specified. If you know
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 02301e1..f0f840d 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc
@@ -30,12 +30,14 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { +// Represents a control byte corresponding to a full slot with arbitrary hash. +constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); } + // We have space for `growth_left` before a single block of control bytes. A // single block of empty control bytes for tables without any slots allocated. // This enables removing a branch in the hot path of find(). In order to ensure // that the control bytes are aligned to 16, we have 16 bytes before the control // bytes even though growth_left only needs 8. -constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); } alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = { ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), @@ -46,6 +48,18 @@ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; +// We need one full byte followed by a sentinel byte for iterator::operator++ to +// work. We have a full group after kSentinel to be safe (in case operator++ is +// changed to read a full group). +ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = { + ZeroCtrlT(), ctrl_t::kSentinel, ZeroCtrlT(), ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty}; +static_assert(NumControlBytes(SooCapacity()) <= 17, + "kSooControl capacity too small"); + #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr size_t Group::kWidth; #endif @@ -104,10 +118,25 @@ return ShouldRehashForBugDetection(ctrl, capacity); } -bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { +bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash, + const ctrl_t* ctrl) { // To avoid problems with weak hashes and single bit tests, we use % 13. // TODO(kfm,sbenza): revisit after we do unconditional mixing - return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; + return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size, + CommonFields& common) { + assert(common.capacity() == NextCapacity(SooCapacity())); + // After resize from capacity 1 to 3, we always have exactly the slot with + // index 1 occupied, so we need to insert either at index 0 or index 2. + assert(HashSetResizeHelper::SooSlotIndex() == 1); + PrepareInsertCommon(common); + const size_t offset = H1(hash, common.control()) & 2; + common.set_growth_left(common.growth_left() - 1); + SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size); + common.infoz().RecordInsert(hash, /*distance_from_desired=*/0); + return offset; } void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { @@ -253,9 +282,10 @@ } void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, - bool reuse) { + bool reuse, bool soo_enabled) { c.set_size(0); if (reuse) { + assert(!soo_enabled || c.capacity() > SooCapacity()); ResetCtrl(c, policy.slot_size); ResetGrowthLeft(c); c.infoz().RecordStorageChanged(0, c.capacity()); @@ -263,12 +293,9 @@ // We need to record infoz before calling dealloc, which will unregister // infoz. c.infoz().RecordClearedReservation(); - c.infoz().RecordStorageChanged(0, 0); + c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0); (*policy.dealloc)(c, policy); - c.set_control(EmptyGroup()); - c.set_generation_ptr(EmptyGeneration()); - c.set_slots(nullptr); - c.set_capacity(0); + c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{}; } } @@ -285,7 +312,7 @@ // Copy second half of bytes to the beginning. // We potentially copy more bytes in order to have compile time known size. - // Mirrored bytes from the old_ctrl_ will also be copied. + // Mirrored bytes from the old_ctrl() will also be copied. // In case of old_capacity_ == 3, we will copy 1st element twice. // Examples: // old_ctrl = 0S0EEEEEEE... @@ -296,7 +323,7 @@ // // old_ctrl = 0123456S0123456EE... // new_ctrl = 456S0123?????????... - std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth); + std::memcpy(new_ctrl, old_ctrl() + half_old_capacity + 1, kHalfWidth); // Clean up copied kSentinel from old_ctrl. new_ctrl[half_old_capacity] = ctrl_t::kEmpty; @@ -347,34 +374,55 @@ new_ctrl[new_capacity] = ctrl_t::kSentinel; } +void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2, + size_t new_capacity) { + assert(is_single_group(new_capacity)); + std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty), + NumControlBytes(new_capacity)); + assert(HashSetResizeHelper::SooSlotIndex() == 1); + // This allows us to avoid branching on had_soo_slot_. + assert(had_soo_slot_ || h2 == ctrl_t::kEmpty); + new_ctrl[1] = new_ctrl[new_capacity + 2] = h2; + new_ctrl[new_capacity] = ctrl_t::kSentinel; +} + void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots( - void* old_slots, void* new_slots, size_t slot_size) const { + void* new_slots, size_t slot_size) const { assert(old_capacity_ > 0); const size_t half_old_capacity = old_capacity_ / 2; - SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_); std::memcpy(new_slots, - SlotAddress(old_slots, half_old_capacity + 1, slot_size), + SlotAddress(old_slots(), half_old_capacity + 1, slot_size), slot_size * half_old_capacity); std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size), - old_slots, slot_size * (half_old_capacity + 1)); + old_slots(), slot_size * (half_old_capacity + 1)); } void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable( - CommonFields& c, void* old_slots, size_t slot_size) { + CommonFields& c, size_t slot_size) { assert(old_capacity_ < Group::kWidth / 2); assert(is_single_group(c.capacity())); assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity()); - GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(), - slot_size); + GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size); // We poison since GrowIntoSingleGroupShuffleTransferableSlots // may leave empty slots unpoisoned. PoisonSingleGroupEmptySlots(c, slot_size); } +void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c, + size_t slot_size) { + assert(was_soo_); + assert(had_soo_slot_); + assert(is_single_group(c.capacity())); + std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size), + old_soo_data(), slot_size); + PoisonSingleGroupEmptySlots(c, slot_size); +} + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 7b33de6..81f9936 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h
@@ -100,6 +100,13 @@ // Storing control bytes in a separate array also has beneficial cache effects, // since more logical slots will fit into a cache line. // +// # Small Object Optimization (SOO) +// +// When the size/alignment of the value_type and the capacity of the table are +// small, we enable small object optimization and store the values inline in +// the raw_hash_set object. This optimization allows us to avoid +// allocation/deallocation as well as cache/dTLB misses. +// // # Hashing // // We compute two separate hashes, `H1` and `H2`, from the hash of an object. @@ -531,10 +538,24 @@ // Returns a pointer to a control byte group that can be used by empty tables. inline ctrl_t* EmptyGroup() { // Const must be cast away here; no uses of this function will actually write - // to it, because it is only used for empty tables. + // to it because it is only used for empty tables. return const_cast<ctrl_t*>(kEmptyGroup + 16); } +// For use in SOO iterators. +// TODO(b/289225379): we could potentially get rid of this by adding an is_soo +// bit in iterators. This would add branches but reduce cache misses. +ABSL_DLL extern const ctrl_t kSooControl[17]; + +// Returns a pointer to a full byte followed by a sentinel byte. +inline ctrl_t* SooControl() { + // Const must be cast away here; no uses of this function will actually write + // to it because it is only used for SOO iterators. + return const_cast<ctrl_t*>(kSooControl); +} +// Whether ctrl is from the SooControl array. +inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); } + // Returns a pointer to a generation to use for an empty hashtable. GenerationType* EmptyGeneration(); @@ -546,7 +567,27 @@ // Mixes a randomly generated per-process seed with `hash` and `ctrl` to // randomize insertion order within groups. -bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); +bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash, + const ctrl_t* ctrl); + +// Returns insert position for the given mask. +// We want to add entropy even when ASLR is not enabled. +// In debug build we will randomly insert in either the front or back of +// the group. +// TODO(kfm,sbenza): revisit after we do unconditional mixing +template <class Mask> +ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset( + Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity, + ABSL_ATTRIBUTE_UNUSED size_t hash, + ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) { +#if defined(NDEBUG) + return mask.LowestBitSet(); +#else + return ShouldInsertBackwardsForDebug(capacity, hash, ctrl) + ? mask.HighestBitSet() + : mask.LowestBitSet(); +#endif +} // Returns a per-table, hash salt, which changes on resize. This gets mixed into // H1 to randomize iteration order per-table. @@ -1025,7 +1066,7 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } // Returns the number of control bytes including cloned. -inline size_t NumControlBytes(size_t capacity) { +constexpr size_t NumControlBytes(size_t capacity) { return capacity + 1 + NumClonedBytes(); } @@ -1076,12 +1117,64 @@ size_t slot_offset_; }; +// We only allow a maximum of 1 SOO element, which makes the implementation +// much simpler. Complications with multiple SOO elements include: +// - Satisfying the guarantee that erasing one element doesn't invalidate +// iterators to other elements means we would probably need actual SOO +// control bytes. +// - In order to prevent user code from depending on iteration order for small +// tables, we would need to randomize the iteration order somehow. +constexpr size_t SooCapacity() { return 1; } +// Sentinel type to indicate SOO CommonFields construction. +struct soo_tag_t {}; +// Sentinel type to indicate SOO CommonFields construction with full size. +struct full_soo_tag_t {}; + +// This allows us to work around an uninitialized memory warning when +// constructing begin() iterators in empty hashtables. +union MaybeInitializedPtr { + void* p; +}; + +struct HeapPtrs { + HeapPtrs() = default; + explicit HeapPtrs(ctrl_t* c) : control(c) {} + + // The control bytes (and, also, a pointer near to the base of the backing + // array). + // + // This contains `capacity + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + // + // Note that growth_left is stored immediately before this pointer. + // May be uninitialized for SOO tables. + ctrl_t* control; + + // The beginning of the slots, located at `SlotOffset()` bytes after + // `control`. May be uninitialized for empty tables. + // Note: we can't use `slots` because Qt defines "slots" as a macro. + MaybeInitializedPtr slot_array; +}; + +// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo +// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`. +union HeapOrSoo { + HeapOrSoo() = default; + explicit HeapOrSoo(ctrl_t* c) : heap(c) {} + + HeapPtrs heap; + unsigned char soo_data[sizeof(HeapPtrs)]; +}; + // CommonFields hold the fields in raw_hash_set that do not depend // on template parameters. This allows us to conveniently pass all // of this state to helper functions as a single argument. class CommonFields : public CommonFieldsGenerationInfo { public: - CommonFields() = default; + CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {} + explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {} + explicit CommonFields(full_soo_tag_t) + : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {} // Not copyable CommonFields(const CommonFields&) = delete; @@ -1091,8 +1184,20 @@ CommonFields(CommonFields&& that) = default; CommonFields& operator=(CommonFields&&) = default; - ctrl_t* control() const { return control_; } - void set_control(ctrl_t* c) { control_ = c; } + template <bool kSooEnabled> + static CommonFields CreateDefault() { + return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{}; + } + + // The inline data for SOO is written on top of control_/slots_. + const void* soo_data() const { return heap_or_soo_.soo_data; } + void* soo_data() { return heap_or_soo_.soo_data; } + + HeapOrSoo heap_or_soo() const { return heap_or_soo_; } + const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; } + + ctrl_t* control() const { return heap_or_soo_.heap.control; } + void set_control(ctrl_t* c) { heap_or_soo_.heap.control = c; } void* backing_array_start() const { // growth_left (and maybe infoz) is stored before control bytes. assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0); @@ -1100,14 +1205,33 @@ } // Note: we can't use slots() because Qt defines "slots" as a macro. - void* slot_array() const { return slots_; } - void set_slots(void* s) { slots_ = s; } + void* slot_array() const { return heap_or_soo_.heap.slot_array.p; } + MaybeInitializedPtr slots_union() const { + // Suppress erroneous uninitialized memory errors on GCC. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return heap_or_soo_.heap.slot_array; +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + void set_slots(void* s) { heap_or_soo_.heap.slot_array.p = s; } // The number of filled slots. size_t size() const { return size_ >> HasInfozShift(); } void set_size(size_t s) { size_ = (s << HasInfozShift()) | (size_ & HasInfozMask()); } + void set_empty_soo() { + AssertInSooMode(); + size_ = 0; + } + void set_full_soo() { + AssertInSooMode(); + size_ = size_t{1} << HasInfozShift(); + } void increment_size() { assert(size() < capacity()); size_ += size_t{1} << HasInfozShift(); @@ -1126,6 +1250,8 @@ // The number of slots we can still fill without needing to rehash. // This is stored in the heap allocation before the control bytes. + // TODO(b/289225379): experiment with moving growth_left back inline to + // increase room for SOO. size_t growth_left() const { const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1; assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0); @@ -1162,10 +1288,6 @@ return CommonFieldsGenerationInfo:: should_rehash_for_bug_detection_on_move(control(), capacity()); } - void maybe_increment_generation_on_move() { - if (capacity() == 0) return; - increment_generation(); - } void reset_reserved_growth(size_t reservation) { CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); } @@ -1176,6 +1298,14 @@ .alloc_size(slot_size); } + // Move fields other than heap_or_soo_. + void move_non_heap_or_soo_fields(CommonFields& that) { + static_cast<CommonFieldsGenerationInfo&>(*this) = + std::move(static_cast<CommonFieldsGenerationInfo&>(that)); + capacity_ = that.capacity_; + size_ = that.size_; + } + // Returns the number of control bytes set to kDeleted. For testing only. size_t TombstonesCount() const { return static_cast<size_t>( @@ -1189,21 +1319,12 @@ return (size_t{1} << HasInfozShift()) - 1; } - // TODO(b/182800944): Investigate removing some of these fields: - // - control/slots can be derived from each other - - // The control bytes (and, also, a pointer near to the base of the backing - // array). - // - // This contains `capacity + 1 + NumClonedBytes()` entries, even - // when the table is empty (hence EmptyGroup). - // - // Note that growth_left is stored immediately before this pointer. - ctrl_t* control_ = EmptyGroup(); - - // The beginning of the slots, located at `SlotOffset()` bytes after - // `control`. May be null for empty tables. - void* slots_ = nullptr; + // We can't assert that SOO is enabled because we don't have SooEnabled(), but + // we assert what we can. + void AssertInSooMode() const { + assert(capacity() == SooCapacity()); + assert(!has_infoz()); + } // The number of slots in the backing array. This is always 2^N-1 for an // integer N. NOTE: we tried experimenting with compressing the capacity and @@ -1211,10 +1332,16 @@ // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of // size_ and storing size in the low bits. Both of these experiments were // regressions, presumably because we need capacity to do find operations. - size_t capacity_ = 0; + size_t capacity_; // The size and also has one bit that stores whether we have infoz. - size_t size_ = 0; + // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_ + // encode the size in SOO case. We would be making size()/capacity() more + // expensive in order to have more SOO space. + size_t size_; + + // Either the control/slots pointers or the SOO slot. + HeapOrSoo heap_or_soo_; }; template <class Policy, class Hash, class Eq, class Alloc> @@ -1377,6 +1504,10 @@ const void* const& slot_b) { // If either control byte is null, then we can't tell. if (ctrl_a == nullptr || ctrl_b == nullptr) return true; + const bool a_is_soo = IsSooControl(ctrl_a); + if (a_is_soo != IsSooControl(ctrl_b)) return false; + if (a_is_soo) return slot_a == slot_b; + const void* low_slot = slot_a; const void* hi_slot = slot_b; if (ctrl_a > ctrl_b) { @@ -1400,41 +1531,45 @@ // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve // the chances that the hot paths will be inlined. + + // fail_if(is_invalid, message) crashes when is_invalid is true and provides + // an error message based on `message`. + const auto fail_if = [](bool is_invalid, const char* message) { + if (ABSL_PREDICT_FALSE(is_invalid)) { + ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message); + } + }; + const bool a_is_default = ctrl_a == EmptyGroup(); const bool b_is_default = ctrl_b == EmptyGroup(); - if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) { - ABSL_RAW_LOG( - FATAL, - "Invalid iterator comparison. Comparing default-constructed iterator " - "with non-default-constructed iterator."); - } if (a_is_default && b_is_default) return; + fail_if(a_is_default != b_is_default, + "Comparing default-constructed hashtable iterator with a " + "non-default-constructed hashtable iterator."); if (SwisstableGenerationsEnabled()) { if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return; + // Users don't need to know whether the tables are SOO so don't mention SOO + // in the debug message. + const bool a_is_soo = IsSooControl(ctrl_a); + const bool b_is_soo = IsSooControl(ctrl_b); + fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo), + "Comparing iterators from different hashtables."); + const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); - if (a_is_empty != b_is_empty) { - ABSL_RAW_LOG(FATAL, - "Invalid iterator comparison. Comparing iterator from a " - "non-empty hashtable with an iterator from an empty " - "hashtable."); - } - if (a_is_empty && b_is_empty) { - ABSL_RAW_LOG(FATAL, - "Invalid iterator comparison. Comparing iterators from " - "different empty hashtables."); - } + fail_if(a_is_empty != b_is_empty, + "Comparing an iterator from an empty hashtable with an iterator " + "from a non-empty hashtable."); + fail_if(a_is_empty && b_is_empty, + "Comparing iterators from different empty hashtables."); + const bool a_is_end = ctrl_a == nullptr; const bool b_is_end = ctrl_b == nullptr; - if (a_is_end || b_is_end) { - ABSL_RAW_LOG(FATAL, - "Invalid iterator comparison. Comparing iterator with an " - "end() iterator from a different hashtable."); - } - ABSL_RAW_LOG(FATAL, - "Invalid iterator comparison. Comparing non-end() iterators " - "from different hashtables."); + fail_if(a_is_end || b_is_end, + "Comparing iterator with an end() iterator from a different " + "hashtable."); + fail_if(true, "Comparing non-end() iterators from different hashtables."); } else { ABSL_HARDENING_ASSERT( AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && @@ -1493,16 +1628,9 @@ GroupFullEmptyOrDeleted g{ctrl + seq.offset()}; auto mask = g.MaskEmptyOrDeleted(); if (mask) { -#if !defined(NDEBUG) - // We want to add entropy even when ASLR is not enabled. - // In debug build we will randomly insert in either the front or back of - // the group. - // TODO(kfm,sbenza): revisit after we do unconditional mixing - if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) { - return {seq.offset(mask.HighestBitSet()), seq.index()}; - } -#endif - return {seq.offset(mask.LowestBitSet()), seq.index()}; + return { + seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)), + seq.index()}; } seq.next(); assert(seq.index() <= common.capacity() && "full table!"); @@ -1533,31 +1661,49 @@ SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); } -// Sets `ctrl[i]` to `h`. -// -// Unlike setting it directly, this function will perform bounds checks and -// mirror the value to the cloned tail if necessary. -inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h, - size_t slot_size) { - const size_t capacity = common.capacity(); - assert(i < capacity); - - auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size; +// Sets sanitizer poisoning for slot corresponding to control byte being set. +inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + assert(i < c.capacity()); + auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size; if (IsFull(h)) { SanitizerUnpoisonMemoryRegion(slot_i, slot_size); } else { SanitizerPoisonMemoryRegion(slot_i, slot_size); } - - ctrl_t* ctrl = common.control(); - ctrl[i] = h; - ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; } -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrl(const CommonFields& common, size_t i, h2_t h, +// Sets `ctrl[i]` to `h`. +// +// Unlike setting it directly, this function will perform bounds checks and +// mirror the value to the cloned tail if necessary. +inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h, size_t slot_size) { - SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size); + DoSanitizeOnSetCtrl(c, i, h, slot_size); + ctrl_t* ctrl = c.control(); + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & c.capacity()) + + (NumClonedBytes() & c.capacity())] = h; +} +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) { + SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size); +} + +// Like SetCtrl, but in a single group table, we can save some operations when +// setting the cloned control byte. +inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + assert(is_single_group(c.capacity())); + DoSanitizeOnSetCtrl(c, i, h, slot_size); + ctrl_t* ctrl = c.control(); + ctrl[i] = h; + ctrl[i + c.capacity() + 1] = h; +} +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h, + size_t slot_size) { + SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size); } // growth_left (which is a size_t) is stored with the backing array. @@ -1618,10 +1764,11 @@ // See GrowIntoSingleGroupShuffleControlBytes for details. class HashSetResizeHelper { public: - explicit HashSetResizeHelper(CommonFields& c) - : old_ctrl_(c.control()), - old_capacity_(c.capacity()), - had_infoz_(c.has_infoz()) {} + explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot) + : old_capacity_(c.capacity()), + had_infoz_(c.has_infoz()), + was_soo_(was_soo), + had_soo_slot_(had_soo_slot) {} // Optimized for small groups version of `find_first_non_full`. // Beneficial only right after calling `raw_hash_set::resize`. @@ -1652,9 +1799,25 @@ return FindInfo{offset, 0}; } - ctrl_t* old_ctrl() const { return old_ctrl_; } + HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; } + void* old_soo_data() { return old_heap_or_soo_.soo_data; } + ctrl_t* old_ctrl() const { + assert(!was_soo_); + return old_heap_or_soo_.heap.control; + } + void* old_slots() const { + assert(!was_soo_); + return old_heap_or_soo_.heap.slot_array.p; + } size_t old_capacity() const { return old_capacity_; } + // Returns the index of the SOO slot when growing from SOO to non-SOO in a + // single group. See also InitControlBytesAfterSoo(). It's important to use + // index 1 so that when resizing from capacity 1 to 3, we can still have + // random iteration order between the first two inserted elements. + // I.e. it allows inserting the second element at either index 0 or 2. + static size_t SooSlotIndex() { return 1; } + // Allocates a backing array for the hashtable. // Reads `capacity` and updates all other fields based on the result of // the allocation. @@ -1690,8 +1853,8 @@ // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation. template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy, size_t AlignOfSlot> - ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots, - Alloc alloc) { + ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc, + ctrl_t soo_slot_h2) { assert(c.capacity()); // Folks with custom allocators often make unwarranted assumptions about the // behavior of their classes vis-a-vis trivial destructability and what @@ -1700,9 +1863,13 @@ // a workaround while we plan the exact guarantee we want to provide. const size_t sample_size = (std::is_same<Alloc, std::allocator<char>>::value && - c.slot_array() == nullptr) + (was_soo_ || old_capacity_ == 0)) ? SizeOfSlot : 0; + // TODO(b/289225379): when SOO is enabled, we should still sample on first + // insertion and if Sample is non-null, then we should force a heap + // allocation. Note that we'll also have to force capacity of 3 so that + // is_soo() still works. HashtablezInfoHandle infoz = sample_size > 0 ? Sample(sample_size) : c.infoz(); @@ -1720,10 +1887,15 @@ const bool grow_single_group = IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity()); - if (old_capacity_ != 0 && grow_single_group) { + if (was_soo_ && grow_single_group) { + InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity()); + if (TransferUsesMemcpy && had_soo_slot_) { + TransferSlotAfterSoo(c, SizeOfSlot); + } + } else if (old_capacity_ != 0 && grow_single_group) { if (TransferUsesMemcpy) { - GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot); - DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots); + GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot); + DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot); } else { GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity()); } @@ -1734,7 +1906,7 @@ c.set_has_infoz(has_infoz); if (has_infoz) { infoz.RecordStorageChanged(c.size(), layout.capacity()); - if (grow_single_group || old_capacity_ == 0) { + if (was_soo_ || grow_single_group || old_capacity_ == 0) { infoz.RecordRehash(0); } c.set_infoz(infoz); @@ -1748,21 +1920,22 @@ // PRECONDITIONS: // 1. GrowIntoSingleGroupShuffleControlBytes was already called. template <class PolicyTraits, class Alloc> - void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref, - typename PolicyTraits::slot_type* old_slots) { + void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) { assert(old_capacity_ < Group::kWidth / 2); assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); using slot_type = typename PolicyTraits::slot_type; assert(is_single_group(c.capacity())); auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array()); + auto* old_slots_ptr = reinterpret_cast<slot_type*>(old_slots()); size_t shuffle_bit = old_capacity_ / 2 + 1; for (size_t i = 0; i < old_capacity_; ++i) { - if (IsFull(old_ctrl_[i])) { + if (IsFull(old_ctrl()[i])) { size_t new_i = i ^ shuffle_bit; SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type)); - PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i); + PolicyTraits::transfer(&alloc_ref, new_slots + new_i, + old_slots_ptr + i); } } PoisonSingleGroupEmptySlots(c, sizeof(slot_type)); @@ -1770,11 +1943,11 @@ // Deallocates old backing array. template <size_t AlignOfSlot, class CharAlloc> - void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) { - SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) { + SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_); auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_); Deallocate<BackingArrayAlignment(AlignOfSlot)>( - &alloc_ref, old_ctrl_ - layout.control_offset(), + &alloc_ref, old_ctrl() - layout.control_offset(), layout.alloc_size(slot_size)); } @@ -1790,8 +1963,12 @@ // Relocates control bytes and slots into new single group for // transferable objects. // Must be called only if IsGrowingIntoSingleGroupApplicable returned true. - void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots, - size_t slot_size); + void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size); + + // If there was an SOO slot and slots are transferable, transfers the SOO slot + // into the new heap allocation. Must be called only if + // IsGrowingIntoSingleGroupApplicable returned true. + void TransferSlotAfterSoo(CommonFields& c, size_t slot_size); // Shuffle control bits deterministically to the next capacity. // Returns offset for newly added element with given hash. @@ -1824,6 +2001,13 @@ void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl, size_t new_capacity) const; + // If the table was SOO, initializes new control bytes. `h2` is the control + // byte corresponding to the full slot. Must be called only if + // IsGrowingIntoSingleGroupApplicable returned true. + // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`. + void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2, + size_t new_capacity); + // Shuffle trivially transferable slots in the way consistent with // GrowIntoSingleGroupShuffleControlBytes. // @@ -1837,8 +2021,7 @@ // 1. new_slots are transferred from old_slots_ consistent with // GrowIntoSingleGroupShuffleControlBytes. // 2. Empty new_slots are *not* poisoned. - void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots, - void* new_slots, + void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots, size_t slot_size) const; // Poison empty slots that were transferred using the deterministic algorithm @@ -1858,11 +2041,22 @@ } } - ctrl_t* old_ctrl_; + HeapOrSoo old_heap_or_soo_; size_t old_capacity_; bool had_infoz_; + bool was_soo_; + bool had_soo_slot_; }; +inline void PrepareInsertCommon(CommonFields& common) { + common.increment_size(); + common.maybe_increment_generation_on_insert(); +} + +// Like prepare_insert, but for the case of inserting into a full SOO table. +size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size, + CommonFields& common); + // PolicyFunctions bundles together some information for a particular // raw_hash_set<T, ...> instantiation. This information is passed to // type-erased functions that want to do small amounts of type-specific @@ -1884,7 +2078,7 @@ // or creating a new one based on the value of "reuse". // REQUIRES: c.capacity > 0 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, - bool reuse); + bool reuse, bool soo_enabled); // Type-erased version of raw_hash_set::erase_meta_only. void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size); @@ -1973,6 +2167,31 @@ using key_arg = typename KeyArgImpl::template type<K, key_type>; private: + // TODO(b/289225379): we could add extra SOO space inside raw_hash_set + // after CommonFields to allow inlining larger slot_types (e.g. std::string), + // but it's a bit complicated if we want to support incomplete mapped_type in + // flat_hash_map. We could potentially do this for flat_hash_set and for an + // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic + // types, strings, cords, and pairs/tuples of allowlisted types. + constexpr static bool SooEnabled() { + return PolicyTraits::soo_enabled() && + sizeof(slot_type) <= sizeof(HeapOrSoo) && + alignof(slot_type) <= alignof(HeapOrSoo); + } + // TODO(b/289225379): this is used for pretty printing in GDB/LLDB, but if we + // use this instead of SooEnabled(), then we get compile errors in some OSS + // compilers due to incomplete mapped_type in flat_hash_map. We need to + // resolve this before launching SOO. + // constexpr static bool kSooEnabled = SooEnabled(); + + // Whether `size` fits in the SOO capacity of this table. + bool fits_in_soo(size_t size) const { + return SooEnabled() && size <= SooCapacity(); + } + // Whether this table is in SOO mode or non-SOO mode. + bool is_soo() const { return fits_in_soo(capacity()); } + bool is_full_soo() const { return is_soo() && !empty(); } + // Give an early error when key_type is not hashable/eq. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); @@ -2087,6 +2306,18 @@ // not equal to any end iterator. ABSL_ASSUME(ctrl != nullptr); } + // This constructor is used in begin() to avoid an MSan + // use-of-uninitialized-value error. Delegating from this constructor to + // the previous one doesn't avoid the error. + iterator(ctrl_t* ctrl, MaybeInitializedPtr slot, + const GenerationType* generation_ptr) + : HashSetIteratorGenerationInfo(generation_ptr), + ctrl_(ctrl), + slot_(to_slot(slot.p)) { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_ASSUME(ctrl != nullptr); + } // For end() iterators. explicit iterator(const GenerationType* generation_ptr) : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {} @@ -2187,8 +2418,9 @@ size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) - : settings_(CommonFields{}, hash, eq, alloc) { - if (bucket_count) { + : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq, + alloc) { + if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) { resize(NormalizeCapacity(bucket_count)); } } @@ -2295,6 +2527,15 @@ if (size == 0) { return; } + // We don't use `that.is_soo()` here because `that` can have non-SOO + // capacity but have a size that fits into SOO capacity. + if (fits_in_soo(size)) { + assert(size == 1); + common().set_full_soo(); + emplace_at(soo_iterator(), *that.begin()); + return; + } + assert(!that.is_soo()); const size_t cap = capacity(); // Note about single group tables: // 1. It is correct to have any order of elements. @@ -2352,16 +2593,22 @@ // would create a nullptr functor that cannot be called. // TODO(b/296061262): move instead of copying hash/eq/alloc. // Note: we avoid using exchange for better generated code. - settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(), - that.alloc_ref()) { - that.common() = CommonFields{}; + settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo() + ? std::move(that.common()) + : CommonFields{full_soo_tag_t{}}, + that.hash_ref(), that.eq_ref(), that.alloc_ref()) { + if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) { + transfer(soo_slot(), that.soo_slot()); + } + that.common() = CommonFields::CreateDefault<SooEnabled()>(); maybe_increment_generation_or_rehash_on_move(); } raw_hash_set(raw_hash_set&& that, const allocator_type& a) - : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) { + : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(), + that.eq_ref(), a) { if (a == that.alloc_ref()) { - std::swap(common(), that.common()); + swap_common(that); maybe_increment_generation_or_rehash_on_move(); } else { move_elements_allocs_unequal(std::move(that)); @@ -2396,9 +2643,10 @@ ~raw_hash_set() { destructor_impl(); } iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { - // TODO(b/324478958): Consider reverting if no impact. if (ABSL_PREDICT_FALSE(empty())) return end(); - auto it = iterator_at(0); + if (is_soo()) return soo_iterator(); + iterator it = {control(), common().slots_union(), + common().generation_ptr()}; it.skip_empty_or_deleted(); assert(IsFull(*it.control())); return it; @@ -2420,7 +2668,15 @@ bool empty() const { return !size(); } size_t size() const { return common().size(); } - size_t capacity() const { return common().capacity(); } + size_t capacity() const { + const size_t cap = common().capacity(); + // Use local variables because compiler complains about using functions in + // assume. + ABSL_ATTRIBUTE_UNUSED static constexpr bool kSooEnabled = SooEnabled(); + ABSL_ATTRIBUTE_UNUSED static constexpr size_t kSooCapacity = SooCapacity(); + ABSL_ASSUME(!kSooEnabled || cap >= kSooCapacity); + return cap; + } size_t max_size() const { return (std::numeric_limits<size_t>::max)(); } ABSL_ATTRIBUTE_REINITIALIZES void clear() { @@ -2434,9 +2690,13 @@ const size_t cap = capacity(); if (cap == 0) { // Already guaranteed to be empty; so nothing to do. + } else if (is_soo()) { + if (!empty()) destroy(soo_slot()); + common().set_empty_soo(); } else { destroy_slots(); - ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128, + SooEnabled()); } common().set_reserved_growth(0); common().set_reservation_size(0); @@ -2567,7 +2827,7 @@ std::pair<iterator, bool> emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { alignas(slot_type) unsigned char raw[sizeof(slot_type)]; - slot_type* slot = reinterpret_cast<slot_type*>(&raw); + slot_type* slot = to_slot(&raw); construct(slot, std::forward<Args>(args)...); const auto& elem = PolicyTraits::element(slot); @@ -2675,7 +2935,11 @@ void erase(iterator it) { AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()"); destroy(it.slot()); - erase_meta_only(it); + if (is_soo()) { + common().set_empty_soo(); + } else { + erase_meta_only(it); + } } iterator erase(const_iterator first, @@ -2683,12 +2947,19 @@ // We check for empty first because ClearBackingArray requires that // capacity() > 0 as a precondition. if (empty()) return end(); + if (first == last) return last.inner_; + if (is_soo()) { + destroy(soo_slot()); + common().set_empty_soo(); + return end(); + } if (first == begin() && last == end()) { // TODO(ezb): we access control bytes in destroy_slots so it could make // sense to combine destroy_slots and ClearBackingArray to avoid cache // misses when the table is large. Note that we also do this in clear(). destroy_slots(); - ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true, + SooEnabled()); common().set_reserved_growth(common().reservation_size()); return end(); } @@ -2703,13 +2974,21 @@ template <typename H, typename E> void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT assert(this != &src); + // Returns whether insertion took place. + const auto insert_slot = [this](slot_type* src_slot) { + return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)}, + PolicyTraits::element(src_slot)) + .second; + }; + + if (src.is_soo()) { + if (src.empty()) return; + if (insert_slot(src.soo_slot())) src.common().set_empty_soo(); + return; + } for (auto it = src.begin(), e = src.end(); it != e;) { auto next = std::next(it); - if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())}, - PolicyTraits::element(it.slot())) - .second) { - src.erase_meta_only(it); - } + if (insert_slot(it.slot())) src.erase_meta_only(it); it = next; } } @@ -2723,7 +3002,11 @@ AssertIsFull(position.control(), position.inner_.generation(), position.inner_.generation_ptr(), "extract()"); auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot()); - erase_meta_only(position); + if (is_soo()) { + common().set_empty_soo(); + } else { + erase_meta_only(position); + } return node; } @@ -2740,7 +3023,7 @@ IsNoThrowSwappable<allocator_type>( typename AllocTraits::propagate_on_container_swap{})) { using std::swap; - swap(common(), that.common()); + swap_common(that); swap(hash_ref(), that.hash_ref()); swap(eq_ref(), that.eq_ref()); SwapAlloc(alloc_ref(), that.alloc_ref(), @@ -2748,17 +3031,31 @@ } void rehash(size_t n) { - if (n == 0 && capacity() == 0) return; - if (n == 0 && size() == 0) { - ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false); - return; + const size_t cap = capacity(); + if (n == 0) { + if (cap == 0 || is_soo()) return; + if (empty()) { + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false, + SooEnabled()); + return; + } + if (SooEnabled() && size() <= SooCapacity()) { + alignas(slot_type) unsigned char slot_space[sizeof(slot_type)]; + slot_type* tmp_slot = to_slot(slot_space); + transfer(tmp_slot, begin().slot()); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false, + SooEnabled()); + transfer(soo_slot(), tmp_slot); + common().set_full_soo(); + return; + } } // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. - if (n == 0 || m > capacity()) { + if (n == 0 || m > cap) { resize(m); // This is after resize, to ensure that we have completed the allocation @@ -2768,7 +3065,9 @@ } void reserve(size_t n) { - if (n > size() + growth_left()) { + const size_t max_size_before_growth = + is_soo() ? SooCapacity() : size() + growth_left(); + if (n > max_size_before_growth) { size_t m = GrowthToLowerboundCapacity(n); resize(NormalizeCapacity(m)); @@ -2801,6 +3100,7 @@ // specific benchmarks indicating its importance. template <class K = key_type> void prefetch(const key_arg<K>& key) const { + if (SooEnabled() ? is_soo() : capacity() == 0) return; (void)key; // Avoid probing if we won't be able to prefetch the addresses received. #ifdef ABSL_HAVE_PREFETCH @@ -2821,26 +3121,14 @@ template <class K = key_type> iterator find(const key_arg<K>& key, size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND { - auto seq = probe(common(), hash); - slot_type* slot_ptr = slot_array(); - const ctrl_t* ctrl = control(); - while (true) { - Group g{ctrl + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { - if (ABSL_PREDICT_TRUE(PolicyTraits::apply( - EqualElement<K>{key, eq_ref()}, - PolicyTraits::element(slot_ptr + seq.offset(i))))) - return iterator_at(seq.offset(i)); - } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); - seq.next(); - assert(seq.index() <= capacity() && "full table!"); - } + if (is_soo()) return find_soo(key); + return find_non_soo(key, hash); } template <class K = key_type> iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (is_soo()) return find_soo(key); prefetch_heap_block(); - return find(key, hash_ref()(key)); + return find_non_soo(key, hash_ref()(key)); } template <class K = key_type> @@ -2992,7 +3280,38 @@ PolicyTraits::transfer(&alloc_ref(), to, from); } + // TODO(b/289225379): consider having a helper class that has the impls for + // SOO functionality. + template <class K = key_type> + iterator find_soo(const key_arg<K>& key) { + assert(is_soo()); + return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, + PolicyTraits::element(soo_slot())) + ? end() + : soo_iterator(); + } + + template <class K = key_type> + iterator find_non_soo(const key_arg<K>& key, size_t hash) { + assert(!is_soo()); + auto seq = probe(common(), hash); + const ctrl_t* ctrl = control(); + while (true) { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement<K>{key, eq_ref()}, + PolicyTraits::element(slot_array() + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + } + inline void destroy_slots() { + assert(!is_soo()); if (PolicyTraits::template destroy_is_trivial<Alloc>()) return; IterateOverFullSlots( common(), slot_array(), @@ -3012,6 +3331,10 @@ inline void destructor_impl() { if (capacity() == 0) return; + if (is_soo()) { + if (!empty()) destroy(soo_slot()); + return; + } destroy_slots(); dealloc(); } @@ -3021,10 +3344,16 @@ // This merely updates the pertinent control byte. This can be used in // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { + assert(!is_soo()); EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()), sizeof(slot_type)); } + size_t hash_of(slot_type* slot) const { + return PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slot)); + } + // Resizes table to the new capacity and move all elements to the new // positions accordingly. // @@ -3035,8 +3364,23 @@ // can be called right after `resize`. ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) { assert(IsValidCapacity(new_capacity)); - HashSetResizeHelper resize_helper(common()); - auto* old_slots = slot_array(); + assert(!fits_in_soo(new_capacity)); + const bool was_soo = is_soo(); + const bool had_soo_slot = was_soo && !empty(); + const ctrl_t soo_slot_h2 = + had_soo_slot ? static_cast<ctrl_t>(H2(hash_of(soo_slot()))) + : ctrl_t::kEmpty; + HashSetResizeHelper resize_helper(common(), was_soo, had_soo_slot); + // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in + // HashSetResizeHelper constructor because it can't transfer slots when + // transfer_uses_memcpy is false. + // TODO(b/289225379): try to handle more of the SOO cases inside + // InitializeSlots. See comment on cl/555990034 snapshot #63. + if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) { + resize_helper.old_heap_or_soo() = common().heap_or_soo(); + } else { + transfer(to_slot(resize_helper.old_soo_data()), soo_slot()); + } common().set_capacity(new_capacity); // Note that `InitializeSlots` does different number initialization steps // depending on the values of `transfer_uses_memcpy` and capacities. @@ -3045,43 +3389,60 @@ resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type), PolicyTraits::transfer_uses_memcpy(), alignof(slot_type)>( - common(), const_cast<std::remove_const_t<slot_type>*>(old_slots), - CharAlloc(alloc_ref())); + common(), CharAlloc(alloc_ref()), soo_slot_h2); - if (resize_helper.old_capacity() == 0) { + // In the SooEnabled() case, capacity is never 0 so we don't check. + if (!SooEnabled() && resize_helper.old_capacity() == 0) { // InitializeSlots did all the work including infoz().RecordRehash(). return; } + assert(resize_helper.old_capacity() > 0); + // Nothing more to do in this case. + if (was_soo && !had_soo_slot) return; + slot_type* new_slots = slot_array(); if (grow_single_group) { if (PolicyTraits::transfer_uses_memcpy()) { // InitializeSlots did all the work. return; } - // We want GrowSizeIntoSingleGroup to be called here in order to make - // InitializeSlots not depend on PolicyTraits. - resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(), - old_slots); + if (was_soo) { + transfer(new_slots + resize_helper.SooSlotIndex(), + to_slot(resize_helper.old_soo_data())); + return; + } else { + // We want GrowSizeIntoSingleGroup to be called here in order to make + // InitializeSlots not depend on PolicyTraits. + resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), + alloc_ref()); + } } else { // InitializeSlots prepares control bytes to correspond to empty table. - auto* new_slots = slot_array(); - size_t total_probe_length = 0; - for (size_t i = 0; i != resize_helper.old_capacity(); ++i) { - if (IsFull(resize_helper.old_ctrl()[i])) { - size_t hash = PolicyTraits::apply( - HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(common(), hash); - size_t new_i = target.offset; - total_probe_length += target.probe_length; - SetCtrl(common(), new_i, H2(hash), sizeof(slot_type)); - transfer(new_slots + new_i, old_slots + i); + const auto insert_slot = [&](slot_type* slot) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slot)); + auto target = find_first_non_full(common(), hash); + SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); + transfer(new_slots + target.offset, slot); + return target.probe_length; + }; + if (was_soo) { + insert_slot(to_slot(resize_helper.old_soo_data())); + return; + } else { + auto* old_slots = + reinterpret_cast<slot_type*>(resize_helper.old_slots()); + size_t total_probe_length = 0; + for (size_t i = 0; i != resize_helper.old_capacity(); ++i) { + if (IsFull(resize_helper.old_ctrl()[i])) { + total_probe_length += insert_slot(old_slots + i); + } } + infoz().RecordRehash(total_probe_length); } - infoz().RecordRehash(total_probe_length); } - resize_helper.DeallocateOld<alignof(slot_type)>( - CharAlloc(alloc_ref()), sizeof(slot_type), - const_cast<std::remove_const_t<slot_type>*>(old_slots)); + resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(alloc_ref()), + sizeof(slot_type)); } // Prunes control bytes to remove as many tombstones as possible. @@ -3151,8 +3512,46 @@ } } + // Casting directly from e.g. char* to slot_type* can cause compilation errors + // on objective-C. This function converts to void* first, avoiding the issue. + static slot_type* to_slot(void* buf) { + return reinterpret_cast<slot_type*>(buf); + } + + // Requires that lhs does not have a full SOO slot. + static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc, + CommonFields& lhs, CommonFields&& rhs) { + if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) { + lhs = std::move(rhs); + } else { + lhs.move_non_heap_or_soo_fields(rhs); + // TODO(b/303305702): add reentrancy guard. + PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()), + to_slot(rhs.soo_data())); + } + } + + // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we + // aren't allowed to do so. + void swap_common(raw_hash_set& that) { + using std::swap; + if (PolicyTraits::transfer_uses_memcpy()) { + swap(common(), that.common()); + return; + } + CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>(); + const bool that_is_full_soo = that.is_full_soo(); + move_common(that_is_full_soo, that.alloc_ref(), tmp, + std::move(that.common())); + move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common())); + move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp)); + } + void maybe_increment_generation_or_rehash_on_move() { - common().maybe_increment_generation_on_move(); + if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) { + return; + } + common().increment_generation(); if (!empty() && common().should_rehash_for_bug_detection_on_move()) { resize(capacity()); } @@ -3163,13 +3562,14 @@ // We don't bother checking for this/that aliasing. We just need to avoid // breaking the invariants in that case. destructor_impl(); - common() = std::move(that.common()); + move_common(that.is_full_soo(), that.alloc_ref(), common(), + std::move(that.common())); // TODO(b/296061262): move instead of copying hash/eq/alloc. hash_ref() = that.hash_ref(); eq_ref() = that.eq_ref(); CopyAlloc(alloc_ref(), that.alloc_ref(), std::integral_constant<bool, propagate_alloc>()); - that.common() = CommonFields{}; + that.common() = CommonFields::CreateDefault<SooEnabled()>(); maybe_increment_generation_or_rehash_on_move(); return *this; } @@ -3182,8 +3582,8 @@ insert(std::move(PolicyTraits::element(it.slot()))); that.destroy(it.slot()); } - that.dealloc(); - that.common() = CommonFields{}; + if (!that.is_soo()) that.dealloc(); + that.common() = CommonFields::CreateDefault<SooEnabled()>(); maybe_increment_generation_or_rehash_on_move(); return *this; } @@ -3215,6 +3615,20 @@ // `key`'s H2. Returns a bool indicating whether an insertion can take place. template <class K> std::pair<iterator, bool> find_or_prepare_insert(const K& key) { + if (is_soo()) { + if (empty()) { + common().set_full_soo(); + return {soo_iterator(), true}; + } + if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, + PolicyTraits::element(soo_slot()))) { + return {soo_iterator(), false}; + } + resize(NextCapacity(SooCapacity())); + const size_t index = + PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common()); + return {iterator_at(index), true}; + } prefetch_heap_block(); auto hash = hash_ref()(key); auto seq = probe(common(), hash); @@ -3239,6 +3653,7 @@ // // REQUIRES: At least one non-full slot available. size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + assert(!is_soo()); const bool rehash_for_bug_detection = common().should_rehash_for_bug_detection_on_insert(); if (rehash_for_bug_detection) { @@ -3262,10 +3677,9 @@ } else { target = find_first_non_full(common(), hash); } - common().increment_size(); + PrepareInsertCommon(common()); set_growth_left(growth_left() - IsEmpty(control()[target.offset])); SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); - common().maybe_increment_generation_on_insert(); infoz().RecordInsert(hash, target.probe_length); return target.offset; } @@ -3290,7 +3704,7 @@ return {control() + i, slot_array() + i, common().generation_ptr()}; } const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { - return {control() + i, slot_array() + i, common().generation_ptr()}; + return const_cast<raw_hash_set*>(this)->iterator_at(i); } reference unchecked_deref(iterator it) { return it.unchecked_deref(); } @@ -3308,13 +3722,20 @@ // side-effect. // // See `CapacityToGrowth()`. - size_t growth_left() const { return common().growth_left(); } - void set_growth_left(size_t gl) { return common().set_growth_left(gl); } + size_t growth_left() const { + assert(!is_soo()); + return common().growth_left(); + } + void set_growth_left(size_t gl) { + assert(!is_soo()); + return common().set_growth_left(gl); + } // Prefetch the heap-allocated memory region to resolve potential TLB and // cache misses. This is intended to overlap with execution of calculating the // hash for a key. void prefetch_heap_block() const { + if (is_soo()) return; #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) __builtin_prefetch(control(), 0, 1); #endif @@ -3323,11 +3744,43 @@ CommonFields& common() { return settings_.template get<0>(); } const CommonFields& common() const { return settings_.template get<0>(); } - ctrl_t* control() const { return common().control(); } - slot_type* slot_array() const { - return static_cast<slot_type*>(common().slot_array()); + ctrl_t* control() const { + assert(!is_soo()); + return common().control(); } - HashtablezInfoHandle infoz() { return common().infoz(); } + slot_type* slot_array() const { + assert(!is_soo()); + // Suppress erroneous uninitialized memory errors on GCC. GCC thinks that + // the call to slot_array() in find_or_prepare_insert() is reading + // uninitialized memory, but slot_array is only called there when the table + // is non-empty and this memory is initialized when the table is non-empty. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#pragma GCC diagnostic ignored "-Wuninitialized" +#endif + return static_cast<slot_type*>(common().slot_array()); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + slot_type* soo_slot() { + assert(is_soo()); + return static_cast<slot_type*>(common().soo_data()); + } + const slot_type* soo_slot() const { + return reinterpret_cast<raw_hash_set*>(this)->soo_slot(); + } + iterator soo_iterator() { + return {SooControl(), soo_slot(), common().generation_ptr()}; + } + const_iterator soo_iterator() const { + return reinterpret_cast<raw_hash_set*>(this)->soo_iterator(); + } + HashtablezInfoHandle infoz() { + assert(!is_soo()); + return common().infoz(); + } hasher& hash_ref() { return settings_.template get<1>(); } const hasher& hash_ref() const { return settings_.template get<1>(); } @@ -3375,7 +3828,8 @@ // fields that occur after CommonFields. absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal, allocator_type> - settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}}; + settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{}, + key_equal{}, allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. @@ -3401,6 +3855,7 @@ static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) { + if (set.is_soo()) return 0; size_t num_probes = 0; size_t hash = set.hash_ref()(key); auto seq = probe(set.common(), hash); @@ -3424,7 +3879,8 @@ static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity(); if (capacity == 0) return 0; - size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot)); + size_t m = + c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); if (per_slot != ~size_t{}) {
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc index 8cd43fb..0fa9c71 100644 --- a/absl/container/internal/raw_hash_set_benchmark.cc +++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -457,6 +457,19 @@ } BENCHMARK(BM_Group_Match); +void BM_GroupPortable_Match(benchmark::State& state) { + std::array<ctrl_t, GroupPortableImpl::kWidth> group; + Iota(group.begin(), group.end(), -4); + GroupPortableImpl g{group.data()}; + h2_t h = 1; + for (auto _ : state) { + ::benchmark::DoNotOptimize(h); + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.Match(h)); + } +} +BENCHMARK(BM_GroupPortable_Match); + void BM_Group_MaskEmpty(benchmark::State& state) { std::array<ctrl_t, Group::kWidth> group; Iota(group.begin(), group.end(), -4);
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 109340c..9180db5 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc
@@ -394,7 +394,7 @@ } } -template <class T, bool kTransferable = false> +template <class T, bool kTransferable = false, bool kSoo = false> struct ValuePolicy { using slot_type = T; using key_type = T; @@ -433,6 +433,8 @@ static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } + + static constexpr bool soo_enabled() { return kSoo; } }; using IntPolicy = ValuePolicy<int64_t>; @@ -440,6 +442,44 @@ using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>; +// For testing SOO. +template <int N> +class SizedValue { + public: + SizedValue(int64_t v) { // NOLINT + vals_[0] = v; + } + SizedValue() : SizedValue(0) {} + SizedValue(const SizedValue&) = default; + SizedValue& operator=(const SizedValue&) = default; + + int64_t operator*() const { + // Suppress erroneous uninitialized memory errors on GCC. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return vals_[0]; +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + explicit operator int() const { return **this; } + explicit operator int64_t() const { return **this; } + + template <typename H> + friend H AbslHashValue(H h, SizedValue sv) { + return H::combine(std::move(h), *sv); + } + bool operator==(const SizedValue& rhs) const { return **this == *rhs; } + + private: + int64_t vals_[N / sizeof(int64_t)]; +}; +template <int N, bool kSoo> +using SizedValuePolicy = + ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>; + class StringPolicy { template <class F, class K, class V, class = typename std::enable_if< @@ -517,9 +557,9 @@ using Base::Base; }; -template <typename T, bool kTransferable = false> +template <typename T, bool kTransferable = false, bool kSoo = false> struct ValueTable - : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>, + : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>, std::equal_to<T>, std::allocator<T>> { using Base = typename ValueTable::raw_hash_set; using Base::Base; @@ -530,6 +570,11 @@ using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>; +constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8; +static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small"); +using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>; +using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>; + template <typename T> struct CustomAlloc : std::allocator<T> { CustomAlloc() = default; @@ -579,6 +624,16 @@ bool* frozen; }; +template <int N> +struct FreezableSizedValueSooTable + : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>, + container_internal::hash_default_hash<SizedValue<N>>, + std::equal_to<SizedValue<N>>, + FreezableAlloc<SizedValue<N>>> { + using Base = typename FreezableSizedValueSooTable::raw_hash_set; + using Base::Base; +}; + struct BadFastHash { template <class T> size_t operator()(const T&) const { @@ -649,20 +704,25 @@ std::equal_to<absl::string_view>, std::allocator<int>>)); } -TEST(Table, Empty) { - IntTable t; +template <class TableType> +class SooTest : public testing::Test {}; + +TYPED_TEST_SUITE_P(SooTest); + +TYPED_TEST_P(SooTest, Empty) { + TypeParam t; EXPECT_EQ(0, t.size()); EXPECT_TRUE(t.empty()); } -TEST(Table, LookupEmpty) { - IntTable t; +TYPED_TEST_P(SooTest, LookupEmpty) { + TypeParam t; auto it = t.find(0); EXPECT_TRUE(it == t.end()); } -TEST(Table, Insert1) { - IntTable t; +TYPED_TEST_P(SooTest, Insert1) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); @@ -671,8 +731,8 @@ EXPECT_THAT(*t.find(0), 0); } -TEST(Table, Insert2) { - IntTable t; +TYPED_TEST_P(SooTest, Insert2) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); @@ -734,9 +794,9 @@ EXPECT_TRUE(t.empty()); } -TEST(Table, EraseInSmallTables) { +TYPED_TEST_P(SooTest, EraseInSmallTables) { for (int64_t size = 0; size < 64; ++size) { - IntTable t; + TypeParam t; for (int64_t i = 0; i < size; ++i) { t.insert(i); } @@ -751,8 +811,8 @@ } } -TEST(Table, InsertWithinCapacity) { - IntTable t; +TYPED_TEST_P(SooTest, InsertWithinCapacity) { + TypeParam t; t.reserve(10); const size_t original_capacity = t.capacity(); const auto addr = [&](int i) { @@ -841,7 +901,8 @@ REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable, ResizeGrowSmallTables, ResizeReduceSmallTables); -using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>; +using SmallTableTypes = + ::testing::Types<IntTable, TransferableIntTable, SooIntTable>; INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest, SmallTableResizeTest, SmallTableTypes); @@ -863,14 +924,14 @@ EXPECT_THAT(*it, Pair("abc", "ABC")); } -TEST(Table, ContainsEmpty) { - IntTable t; +TYPED_TEST_P(SooTest, ContainsEmpty) { + TypeParam t; EXPECT_FALSE(t.contains(0)); } -TEST(Table, Contains1) { - IntTable t; +TYPED_TEST_P(SooTest, Contains1) { + TypeParam t; EXPECT_TRUE(t.insert(0).second); EXPECT_TRUE(t.contains(0)); @@ -880,8 +941,8 @@ EXPECT_FALSE(t.contains(0)); } -TEST(Table, Contains2) { - IntTable t; +TYPED_TEST_P(SooTest, Contains2) { + TypeParam t; EXPECT_TRUE(t.insert(0).second); EXPECT_TRUE(t.contains(0)); @@ -1178,8 +1239,8 @@ } } -TEST(Table, InsertEraseStressTest) { - IntTable t; +TYPED_TEST_P(SooTest, InsertEraseStressTest) { + TypeParam t; const size_t kMinElementCount = 250; std::deque<int> keys; size_t i = 0; @@ -1207,32 +1268,33 @@ Pair("DEF", "!!!"))); } -TEST(Table, LargeTable) { - IntTable t; +TYPED_TEST_P(SooTest, LargeTable) { + TypeParam t; for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); - for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); + for (int64_t i = 0; i != 100000; ++i) + ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40))); } // Timeout if copy is quadratic as it was in Rust. -TEST(Table, EnsureNonQuadraticAsInRust) { +TYPED_TEST_P(SooTest, EnsureNonQuadraticAsInRust) { static const size_t kLargeSize = 1 << 15; - IntTable t; + TypeParam t; for (size_t i = 0; i != kLargeSize; ++i) { t.insert(i); } // If this is quadratic, the test will timeout. - IntTable t2; + TypeParam t2; for (const auto& entry : t) t2.insert(entry); } -TEST(Table, ClearBug) { +TYPED_TEST_P(SooTest, ClearBug) { if (SwisstableGenerationsEnabled()) { GTEST_SKIP() << "Generations being enabled causes extra rehashes."; } - IntTable t; + TypeParam t; constexpr size_t capacity = container_internal::Group::kWidth - 1; constexpr size_t max_size = capacity / 2 + 1; for (size_t i = 0; i < max_size; ++i) { @@ -1251,11 +1313,11 @@ // that they are probably still in the same group. This is not strictly // guaranteed. EXPECT_LT(static_cast<size_t>(std::abs(original - second)), - capacity * sizeof(IntTable::value_type)); + capacity * sizeof(typename TypeParam::value_type)); } -TEST(Table, Erase) { - IntTable t; +TYPED_TEST_P(SooTest, Erase) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); @@ -1265,8 +1327,8 @@ EXPECT_TRUE(t.find(0) == t.end()); } -TEST(Table, EraseMaintainsValidIterator) { - IntTable t; +TYPED_TEST_P(SooTest, EraseMaintainsValidIterator) { + TypeParam t; const int kNumElements = 100; for (int i = 0; i < kNumElements; i++) { EXPECT_TRUE(t.emplace(i).second); @@ -1284,8 +1346,8 @@ EXPECT_EQ(num_erase_calls, kNumElements); } -TEST(Table, EraseBeginEnd) { - IntTable t; +TYPED_TEST_P(SooTest, EraseBeginEnd) { + TypeParam t; for (int i = 0; i < 10; ++i) t.insert(i); EXPECT_EQ(t.size(), 10); t.erase(t.begin(), t.end()); @@ -1684,8 +1746,8 @@ EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); } -TEST(Table, Clear) { - IntTable t; +TYPED_TEST_P(SooTest, Clear) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); t.clear(); EXPECT_TRUE(t.find(0) == t.end()); @@ -1697,13 +1759,13 @@ EXPECT_TRUE(t.find(0) == t.end()); } -TEST(Table, Swap) { - IntTable t; +TYPED_TEST_P(SooTest, Swap) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_EQ(1, t.size()); - IntTable u; + TypeParam u; t.swap(u); EXPECT_EQ(0, t.size()); EXPECT_EQ(1, u.size()); @@ -1711,8 +1773,8 @@ EXPECT_THAT(*u.find(0), 0); } -TEST(Table, Rehash) { - IntTable t; +TYPED_TEST_P(SooTest, Rehash) { + TypeParam t; EXPECT_TRUE(t.find(0) == t.end()); t.emplace(0); t.emplace(1); @@ -1723,8 +1785,8 @@ EXPECT_THAT(*t.find(1), 1); } -TEST(Table, RehashDoesNotRehashWhenNotNecessary) { - IntTable t; +TYPED_TEST_P(SooTest, RehashDoesNotRehashWhenNotNecessary) { + TypeParam t; t.emplace(0); t.emplace(1); auto* p = &*t.find(0); @@ -1732,14 +1794,15 @@ EXPECT_EQ(p, &*t.find(0)); } +// Following two tests use non-SOO table because they test for 0 capacity. TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { - IntTable t; + NonSooIntTable t; t.rehash(0); EXPECT_EQ(0, t.bucket_count()); } TEST(Table, RehashZeroDeallocatesEmptyTable) { - IntTable t; + NonSooIntTable t; t.emplace(0); t.clear(); EXPECT_NE(0, t.bucket_count()); @@ -1747,8 +1810,8 @@ EXPECT_EQ(0, t.bucket_count()); } -TEST(Table, RehashZeroForcesRehash) { - IntTable t; +TYPED_TEST_P(SooTest, RehashZeroForcesRehash) { + TypeParam t; t.emplace(0); t.emplace(1); auto* p = &*t.find(0); @@ -1764,33 +1827,33 @@ StringTable t = {P(), Q(), {}, {{}, {}}}; } -TEST(Table, CopyConstruct) { - IntTable t; +TYPED_TEST_P(SooTest, CopyConstruct) { + TypeParam t; t.emplace(0); EXPECT_EQ(1, t.size()); { - IntTable u(t); + TypeParam u(t); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } { - IntTable u{t}; + TypeParam u{t}; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } { - IntTable u = t; + TypeParam u = t; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } } -TEST(Table, CopyDifferentSizes) { - IntTable t; +TYPED_TEST_P(SooTest, CopyDifferentSizes) { + TypeParam t; for (int i = 0; i < 100; ++i) { t.emplace(i); - IntTable c = t; + TypeParam c = t; for (int j = 0; j <= i; ++j) { ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j; } @@ -1799,16 +1862,16 @@ } } -TEST(Table, CopyDifferentCapacities) { +TYPED_TEST_P(SooTest, CopyDifferentCapacities) { for (int cap = 1; cap < 100; cap = cap * 2 + 1) { - IntTable t; + TypeParam t; t.reserve(static_cast<size_t>(cap)); for (int i = 0; i <= cap; ++i) { t.emplace(i); if (i != cap && i % 5 != 0) { continue; } - IntTable c = t; + TypeParam c = t; for (int j = 0; j <= i; ++j) { ASSERT_TRUE(c.find(j) != c.end()) << "cap=" << cap << " i=" << i << " j=" << j; @@ -1948,8 +2011,8 @@ EXPECT_NE(u, t); } -TEST(Table, NumDeletedRegression) { - IntTable t; +TYPED_TEST_P(SooTest, NumDeletedRegression) { + TypeParam t; t.emplace(0); t.erase(t.find(0)); // construct over a deleted slot. @@ -1957,8 +2020,8 @@ t.clear(); } -TEST(Table, FindFullDeletedRegression) { - IntTable t; +TYPED_TEST_P(SooTest, FindFullDeletedRegression) { + TypeParam t; for (int i = 0; i < 1000; ++i) { t.emplace(i); t.erase(t.find(i)); @@ -1966,17 +2029,17 @@ EXPECT_EQ(0, t.size()); } -TEST(Table, ReplacingDeletedSlotDoesNotRehash) { +TYPED_TEST_P(SooTest, ReplacingDeletedSlotDoesNotRehash) { size_t n; { // Compute n such that n is the maximum number of elements before rehash. - IntTable t; + TypeParam t; t.emplace(0); size_t c = t.bucket_count(); for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); --n; } - IntTable t; + TypeParam t; t.rehash(n); const size_t c = t.bucket_count(); for (size_t i = 0; i != n; ++i) t.emplace(i); @@ -2227,8 +2290,8 @@ EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move) } -TEST(Nodes, HintInsert) { - IntTable t = {1, 2, 3}; +TYPED_TEST_P(SooTest, HintInsert) { + TypeParam t = {1, 2, 3}; auto node = t.extract(1); EXPECT_THAT(t, UnorderedElementsAre(2, 3)); auto it = t.insert(t.begin(), std::move(node)); @@ -2247,14 +2310,18 @@ EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move) } -IntTable MakeSimpleTable(size_t size) { - IntTable t; +template <typename T> +T MakeSimpleTable(size_t size) { + T t; while (t.size() < size) t.insert(t.size()); return t; } -std::vector<int> OrderOfIteration(const IntTable& t) { - return {t.begin(), t.end()}; +template <typename T> +std::vector<int> OrderOfIteration(const T& t) { + std::vector<int> res; + for (auto i : t) res.push_back(static_cast<int>(i)); + return res; } // These IterationOrderChanges tests depend on non-deterministic behavior. @@ -2263,15 +2330,15 @@ // we are touching different memory pages to cause the ordering to change. // We also need to keep the old tables around to avoid getting the same memory // blocks over and over. -TEST(Table, IterationOrderChangesByInstance) { +TYPED_TEST_P(SooTest, IterationOrderChangesByInstance) { for (size_t size : {2, 6, 12, 20}) { - const auto reference_table = MakeSimpleTable(size); + const auto reference_table = MakeSimpleTable<TypeParam>(size); const auto reference = OrderOfIteration(reference_table); - std::vector<IntTable> tables; + std::vector<TypeParam> tables; bool found_difference = false; for (int i = 0; !found_difference && i < 5000; ++i) { - tables.push_back(MakeSimpleTable(size)); + tables.push_back(MakeSimpleTable<TypeParam>(size)); found_difference = OrderOfIteration(tables.back()) != reference; } if (!found_difference) { @@ -2282,7 +2349,7 @@ } } -TEST(Table, IterationOrderChangesOnRehash) { +TYPED_TEST_P(SooTest, IterationOrderChangesOnRehash) { // We test different sizes with many small numbers, because small table // resize has a different codepath. // Note: iteration order for size() <= 1 is always the same. @@ -2291,10 +2358,10 @@ size_t{0}, // Force rehash is guaranteed. size * 10 // Rehash to the larger capacity is guaranteed. }) { - std::vector<IntTable> garbage; + std::vector<TypeParam> garbage; bool ok = false; for (int i = 0; i < 5000; ++i) { - auto t = MakeSimpleTable(size); + auto t = MakeSimpleTable<TypeParam>(size); const auto reference = OrderOfIteration(t); // Force rehash. t.rehash(rehash_size); @@ -2315,8 +2382,8 @@ // Verify that pointers are invalidated as soon as a second element is inserted. // This prevents dependency on pointer stability on small tables. -TEST(Table, UnstablePointers) { - IntTable table; +TYPED_TEST_P(SooTest, UnstablePointers) { + TypeParam table; const auto addr = [&](int i) { return reinterpret_cast<uintptr_t>(&*table.find(i)); @@ -2335,11 +2402,11 @@ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled()) GTEST_SKIP() << "Assertions not enabled."; - IntTable t; + NonSooIntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), "erase.* called on end.. iterator."); - typename IntTable::iterator iter; + typename NonSooIntTable::iterator iter; EXPECT_DEATH_IF_SUPPORTED( ++iter, "operator.* called on default-constructed iterator."); t.insert(0); @@ -2353,6 +2420,22 @@ EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage); } +TEST(TableDeathTest, InvalidIteratorAssertsSoo) { + if (!IsAssertEnabled() && !SwisstableGenerationsEnabled()) + GTEST_SKIP() << "Assertions not enabled."; + + SooIntTable t; + // Extra simple "regexp" as regexp support is highly varied across platforms. + EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), + "erase.* called on end.. iterator."); + typename SooIntTable::iterator iter; + EXPECT_DEATH_IF_SUPPORTED( + ++iter, "operator.* called on default-constructed iterator."); + + // We can't detect the erased iterator case as invalid in SOO mode because + // the control is static constant. +} + // Invalid iterator use can trigger use-after-free in asan/hwasan, // use-of-uninitialized-value in msan, or invalidated iterator assertions. constexpr const char* kInvalidIteratorDeathMessage = @@ -2366,11 +2449,11 @@ constexpr bool kMsvc = false; #endif -TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) { +TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperator) { if (!IsAssertEnabled() && !SwisstableGenerationsEnabled()) GTEST_SKIP() << "Assertions not enabled."; - IntTable t; + TypeParam t; t.insert(1); t.insert(2); t.insert(3); @@ -2389,35 +2472,50 @@ t.erase(iter2); EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage); - IntTable t1, t2; + TypeParam t1, t2; t1.insert(0); t2.insert(0); iter1 = t1.begin(); iter2 = t2.begin(); const char* const kContainerDiffDeathMessage = SwisstableGenerationsEnabled() - ? "Invalid iterator comparison.*iterators from different hashtables" + ? "Invalid iterator comparison.*iterators from different.* hashtables" : "Invalid iterator comparison.*may be from different " ".*containers.*config=asan"; EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage); EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage); +} - for (int i = 0; i < 10; ++i) t1.insert(i); - // There should have been a rehash in t1. - if (kMsvc) return; // MSVC doesn't support | in regex. +TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) { + if (!IsAssertEnabled() && !SwisstableGenerationsEnabled()) + GTEST_SKIP() << "Assertions not enabled."; + if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex."; +#ifdef ABSL_HAVE_THREAD_SANITIZER + GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in " + "EXPECT_DEATH."; +#endif - // NOTE(b/293887834): After rehashing, iterators will contain pointers to - // freed memory, which may be detected by ThreadSanitizer. + TypeParam t; + t.insert(0); + auto iter = t.begin(); + + // Trigger a rehash in t. + for (int i = 0; i < 10; ++i) t.insert(i); + const char* const kRehashedDeathMessage = SwisstableGenerationsEnabled() ? kInvalidIteratorDeathMessage - : "Invalid iterator comparison.*might have rehashed.*config=asan" - "|ThreadSanitizer: heap-use-after-free"; - EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage); + : "Invalid iterator comparison.*might have rehashed.*config=asan"; + EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage); } #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -TEST(RawHashSamplerTest, Sample) { +template <typename T> +class RawHashSamplerTest : public testing::Test {}; +TYPED_TEST_SUITE_P(RawHashSamplerTest); + +TYPED_TEST_P(RawHashSamplerTest, Sample) { + constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value; // Enable the feature even if the prod default is off. SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); @@ -2430,7 +2528,7 @@ ++start_size; }); - std::vector<IntTable> tables; + std::vector<TypeParam> tables; for (int i = 0; i < 1000000; ++i) { tables.emplace_back(); @@ -2459,15 +2557,20 @@ std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } - EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); + EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type)); ++end_size; }); EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), 0.01, 0.005); - EXPECT_EQ(observed_checksums.size(), 5); - for (const auto& [_, count] : observed_checksums) { - EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05); + if (soo_enabled) { + EXPECT_EQ(observed_checksums.size(), 9); + } else { + EXPECT_EQ(observed_checksums.size(), 5); + for (const auto& [_, count] : observed_checksums) { + EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, + 0.05); + } } EXPECT_EQ(reservations.size(), 10); @@ -2479,6 +2582,10 @@ << reservation; } } + +REGISTER_TYPED_TEST_SUITE_P(RawHashSamplerTest, Sample); +using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, RawHashSamplerTest, RawHashSamplerTestTypes); #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { @@ -2531,9 +2638,10 @@ INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest, SanitizerTableTypes); +// TODO(b/289225379): poison inline space when empty SOO. TEST(Sanitizer, PoisoningOnErase) { - IntTable t; - int64_t& v = *t.insert(0).first; + NonSooIntTable t; + auto& v = *t.insert(0).first; EXPECT_FALSE(__asan_address_is_poisoned(&v)); t.erase(0); @@ -2581,7 +2689,7 @@ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; - IntTable t; + NonSooIntTable t; // Start with 1 element so that `it` is never an end iterator. t.insert(-1); for (int i = 0; i < 10; ++i) { @@ -2628,11 +2736,11 @@ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; - IntTable t1, t2; + NonSooIntTable t1, t2; t1.insert(1); auto it = t1.begin(); // ptr will become invalidated on rehash. - const int64_t* ptr = &*it; + const auto* ptr = &*it; (void)ptr; t2 = std::move(t1); @@ -2640,12 +2748,12 @@ EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()), kInvalidIteratorDeathMessage); #ifdef ABSL_HAVE_ADDRESS_SANITIZER - EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free"); + EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free"); #endif } -TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) { - IntTable t; +TYPED_TEST_P(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) { + TypeParam t; for (int i = 0; i < 8; ++i) t.insert(i); // Want to insert twice without invalidating iterators so reserve. const size_t cap = t.capacity(); @@ -2687,7 +2795,7 @@ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; - IntTable t; + NonSooIntTable t; for (int i = 0; i < 1000; ++i) t.insert(i); t.reserve(t.size() + 100); @@ -2705,24 +2813,24 @@ GTEST_SKIP() << "MSan fails to detect some of these rehashes."; #endif - IntTable t; + NonSooIntTable t; t.insert(0); // Rehashing is guaranteed on every insertion while capacity is less than // RehashProbabilityConstant(). - int64_t i = 0; + int i = 0; while (t.capacity() <= RehashProbabilityConstant()) { // ptr will become invalidated on rehash. - const int64_t* ptr = &*t.begin(); + const auto* ptr = &*t.begin(); t.insert(++i); - EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i; + EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i; } } TEST(Iterator, InvalidComparisonDifferentTables) { if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; - IntTable t1, t2; - IntTable::iterator default_constructed_iter; + NonSooIntTable t1, t2; + NonSooIntTable::iterator default_constructed_iter; // We randomly use one of N empty generations for generations from empty // hashtables. In general, we won't always detect when iterators from // different empty hashtables are compared, but in this test case, we @@ -2813,10 +2921,11 @@ } } +// IterateOverFullSlots doesn't support SOO. TEST(Table, IterateOverFullSlotsEmpty) { - IntTable t; - auto fail_if_any = [](const ctrl_t*, int64_t* i) { - FAIL() << "expected no slots " << i; + NonSooIntTable t; + auto fail_if_any = [](const ctrl_t*, auto* i) { + FAIL() << "expected no slots " << **i; }; container_internal::IterateOverFullSlots( RawHashSetTestOnlyAccess::GetCommon(t), @@ -2830,7 +2939,7 @@ } TEST(Table, IterateOverFullSlotsFull) { - IntTable t; + NonSooIntTable t; std::vector<int64_t> expected_slots; for (int64_t i = 0; i < 128; ++i) { @@ -2841,17 +2950,106 @@ container_internal::IterateOverFullSlots( RawHashSetTestOnlyAccess::GetCommon(t), RawHashSetTestOnlyAccess::GetSlots(t), - [&t, &slots](const ctrl_t* ctrl, int64_t* i) { + [&t, &slots](const ctrl_t* ctrl, auto* i) { ptrdiff_t ctrl_offset = ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control(); ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t); ASSERT_EQ(ctrl_offset, slot_offset); - slots.push_back(*i); + slots.push_back(**i); }); EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots)); } } +template <typename T> +class SooTable : public testing::Test {}; +TYPED_TEST_SUITE_P(SooTable); + +TYPED_TEST_P(SooTable, Basic) { + bool frozen = true; + TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)}; + if (t.capacity() != SooCapacity()) { + EXPECT_LT(sizeof(void*), 8); + GTEST_SKIP() << "not SOO on this platform"; + } + + t.insert(0); + EXPECT_EQ(t.capacity(), 1); + auto it = t.find(0); + EXPECT_EQ(it, t.begin()); + ASSERT_NE(it, t.end()); + EXPECT_EQ(*it, 0); + EXPECT_EQ(++it, t.end()); + EXPECT_EQ(t.find(1), t.end()); + EXPECT_EQ(t.size(), 1); + + t.erase(0); + EXPECT_EQ(t.size(), 0); + t.insert(1); + it = t.find(1); + EXPECT_EQ(it, t.begin()); + ASSERT_NE(it, t.end()); + EXPECT_EQ(*it, 1); + + t.clear(); + EXPECT_EQ(t.size(), 0); +} + +REGISTER_TYPED_TEST_SUITE_P(SooTable, Basic); +using FreezableSooTableTypes = + ::testing::Types<FreezableSizedValueSooTable<8>, + FreezableSizedValueSooTable<16>>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTable, FreezableSooTableTypes); + +TEST(Table, RehashToSoo) { + SooIntTable t; + if (t.capacity() != SooCapacity()) { + EXPECT_LT(sizeof(void*), 8); + GTEST_SKIP() << "not SOO on this platform"; + } + + t.reserve(10); + t.insert(0); + EXPECT_EQ(*t.begin(), 0); + + t.rehash(0); // Rehash back down to SOO table. + + EXPECT_EQ(t.capacity(), SooCapacity()); + EXPECT_EQ(t.size(), 1); + EXPECT_EQ(*t.begin(), 0); + EXPECT_EQ(t.find(0), t.begin()); + EXPECT_EQ(t.find(1), t.end()); +} + +TEST(Table, ResizeToNonSoo) { + for (int reserve_capacity : {8, 100000}) { + SooIntTable t; + t.insert(0); + + t.reserve(reserve_capacity); + + EXPECT_EQ(t.find(0), t.begin()); + EXPECT_EQ(t.size(), 1); + EXPECT_EQ(*t.begin(), 0); + EXPECT_EQ(t.find(1), t.end()); + } +} + +REGISTER_TYPED_TEST_SUITE_P( + SooTest, Empty, Clear, ClearBug, Contains1, Contains2, ContainsEmpty, + CopyConstruct, CopyDifferentCapacities, CopyDifferentSizes, + EnsureNonQuadraticAsInRust, Erase, EraseBeginEnd, EraseInSmallTables, + EraseMaintainsValidIterator, FindFullDeletedRegression, HintInsert, Insert1, + Insert2, InsertEraseStressTest, InsertWithinCapacity, + IterationOrderChangesByInstance, IterationOrderChangesOnRehash, + IteratorInvalidAssertsEqualityOperator, + IteratorInvalidAssertsEqualityOperatorRehash, LargeTable, LookupEmpty, + NumDeletedRegression, Rehash, RehashDoesNotRehashWhenNotNecessary, + RehashZeroForcesRehash, ReplacingDeletedSlotDoesNotRehash, + ReservedGrowthUpdatesWhenTableDoesntGrow, Swap, UnstablePointers); +using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTest, SooTableTypes); + } // namespace } // namespace container_internal ABSL_NAMESPACE_END
diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc index b23626b..22470b4 100644 --- a/absl/container/sample_element_size_test.cc +++ b/absl/container/sample_element_size_test.cc
@@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include <cstddef> +#include <unordered_set> +#include <utility> +#include <vector> + #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/flat_hash_map.h" @@ -38,15 +43,16 @@ // set cannot be flat_hash_set, however, since that would introduce a mutex // deadlock. std::unordered_set<const HashtablezInfo*>& preexisting_info, // NOLINT - std::vector<Table>& tables, const typename Table::value_type& elt, + std::vector<Table>& tables, + const std::vector<typename Table::value_type>& values, size_t expected_element_size) { for (int i = 0; i < 10; ++i) { // We create a new table and must store it somewhere so that when we store // a pointer to the resulting `HashtablezInfo` into `preexisting_info` // that we aren't storing a dangling pointer. tables.emplace_back(); - // We must insert an element to get a hashtablez to instantiate. - tables.back().insert(elt); + // We must insert elements to get a hashtablez to instantiate. + tables.back().insert(values.begin(), values.end()); } size_t new_count = 0; sampler.Iterate([&](const HashtablezInfo& info) { @@ -82,6 +88,9 @@ std::vector<flat_hash_set<bigstruct>> flat_set_tables; std::vector<node_hash_map<int, bigstruct>> node_map_tables; std::vector<node_hash_set<bigstruct>> node_set_tables; + std::vector<bigstruct> set_values = {bigstruct{{0}}, bigstruct{{1}}}; + std::vector<std::pair<const int, bigstruct>> map_values = {{0, bigstruct{}}, + {1, bigstruct{}}}; // It takes thousands of new tables after changing the sampling parameters // before you actually get some instrumentation. And if you must actually @@ -97,14 +106,14 @@ std::unordered_set<const HashtablezInfo*> preexisting_info; // NOLINT sampler.Iterate( [&](const HashtablezInfo& info) { preexisting_info.insert(&info); }); - TestInlineElementSize(sampler, preexisting_info, flat_map_tables, - {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct)); - TestInlineElementSize(sampler, preexisting_info, node_map_tables, - {0, bigstruct{}}, sizeof(void*)); - TestInlineElementSize(sampler, preexisting_info, flat_set_tables, // - bigstruct{}, sizeof(bigstruct)); - TestInlineElementSize(sampler, preexisting_info, node_set_tables, // - bigstruct{}, sizeof(void*)); + TestInlineElementSize(sampler, preexisting_info, flat_map_tables, map_values, + sizeof(int) + sizeof(bigstruct)); + TestInlineElementSize(sampler, preexisting_info, node_map_tables, map_values, + sizeof(void*)); + TestInlineElementSize(sampler, preexisting_info, flat_set_tables, set_values, + sizeof(bigstruct)); + TestInlineElementSize(sampler, preexisting_info, node_set_tables, set_values, + sizeof(void*)); #endif }
diff --git a/absl/functional/bind_front.h b/absl/functional/bind_front.h index a956eb0..885f24b 100644 --- a/absl/functional/bind_front.h +++ b/absl/functional/bind_front.h
@@ -34,6 +34,8 @@ #include <functional> // For std::bind_front. #endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#include <utility> + #include "absl/functional/internal/front_binder.h" #include "absl/utility/utility.h" @@ -182,8 +184,7 @@ constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front( F&& func, BoundArgs&&... args) { return functional_internal::bind_front_t<F, BoundArgs...>( - absl::in_place, absl::forward<F>(func), - absl::forward<BoundArgs>(args)...); + absl::in_place, std::forward<F>(func), std::forward<BoundArgs>(args)...); } #endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
diff --git a/absl/functional/internal/front_binder.h b/absl/functional/internal/front_binder.h index 45f52de..44a5492 100644 --- a/absl/functional/internal/front_binder.h +++ b/absl/functional/internal/front_binder.h
@@ -34,8 +34,8 @@ template <class R, class Tuple, size_t... Idx, class... Args> R Apply(Tuple&& bound, absl::index_sequence<Idx...>, Args&&... free) { return base_internal::invoke( - absl::forward<Tuple>(bound).template get<Idx>()..., - absl::forward<Args>(free)...); + std::forward<Tuple>(bound).template get<Idx>()..., + std::forward<Args>(free)...); } template <class F, class... BoundArgs> @@ -48,13 +48,13 @@ public: template <class... Ts> constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) - : bound_args_(absl::forward<Ts>(ts)...) {} + : bound_args_(std::forward<Ts>(ts)...) {} template <class... FreeArgs, class R = base_internal::invoke_result_t< F&, BoundArgs&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) & { return functional_internal::Apply<R>(bound_args_, Idx(), - absl::forward<FreeArgs>(free_args)...); + std::forward<FreeArgs>(free_args)...); } template <class... FreeArgs, @@ -62,7 +62,7 @@ const F&, const BoundArgs&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) const& { return functional_internal::Apply<R>(bound_args_, Idx(), - absl::forward<FreeArgs>(free_args)...); + std::forward<FreeArgs>(free_args)...); } template <class... FreeArgs, class R = base_internal::invoke_result_t< @@ -70,8 +70,8 @@ R operator()(FreeArgs&&... free_args) && { // This overload is called when *this is an rvalue. If some of the bound // arguments are stored by value or rvalue reference, we move them. - return functional_internal::Apply<R>(absl::move(bound_args_), Idx(), - absl::forward<FreeArgs>(free_args)...); + return functional_internal::Apply<R>(std::move(bound_args_), Idx(), + std::forward<FreeArgs>(free_args)...); } template <class... FreeArgs, @@ -80,8 +80,8 @@ R operator()(FreeArgs&&... free_args) const&& { // This overload is called when *this is an rvalue. If some of the bound // arguments are stored by value or rvalue reference, we move them. - return functional_internal::Apply<R>(absl::move(bound_args_), Idx(), - absl::forward<FreeArgs>(free_args)...); + return functional_internal::Apply<R>(std::move(bound_args_), Idx(), + std::forward<FreeArgs>(free_args)...); } };
diff --git a/absl/functional/overload.h b/absl/functional/overload.h index 4651f14..732795f 100644 --- a/absl/functional/overload.h +++ b/absl/functional/overload.h
@@ -35,7 +35,14 @@ // [](double) -> absl::string_view { return "double"; }), // v) == "int"); // -// Note: This requires C++17. +// One of the lambda may specify overload for several types via generic lambda. +// +// absl::variant<std::string, int32_t, int64_t> v(int32_t{1}); +// assert(std::visit(absl::Overload( +// [](const std::string& s) { return s.size(); }, +// [](const auto& s) { return sizeof(s); }), v) == 4); +// +// Note: absl::Overload requires C++17. #ifndef ABSL_FUNCTIONAL_OVERLOAD_H_ #define ABSL_FUNCTIONAL_OVERLOAD_H_
diff --git a/absl/functional/overload_test.cc b/absl/functional/overload_test.cc index 739c4c4..040bf83 100644 --- a/absl/functional/overload_test.cc +++ b/absl/functional/overload_test.cc
@@ -30,7 +30,7 @@ namespace { -TEST(OverloadTest, DispatchConsidersType) { +TEST(OverloadTest, DispatchConsidersTypeWithAutoFallback) { auto overloaded = absl::Overload( [](int v) -> std::string { return absl::StrCat("int ", v); }, // [](double v) -> std::string { return absl::StrCat("double ", v); }, // @@ -103,6 +103,35 @@ static_assert(!std::is_invocable_v<decltype(overloaded), int>); } +TEST(OverloadTest, AmbiguousConversionWithAutoNotInvocable) { + auto overloaded = absl::Overload( // + [](auto a) { return a; }, // + [](auto c) { return c; } // + ); + static_assert(!std::is_invocable_v<decltype(overloaded), int>); +} + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + +TEST(OverloadTest, AmbiguousConversionWithAutoAndTemplateNotInvocable) { + auto overloaded = absl::Overload( // + [](auto a) { return a; }, // + []<class T>(T c) { return c; } // + ); + static_assert(!std::is_invocable_v<decltype(overloaded), int>); +} + +TEST(OverloadTest, DispatchConsidersTypeWithTemplateFallback) { + auto overloaded = absl::Overload( // + [](int a) { return a; }, // + []<class T>(T c) { return c * 2; } // + ); + EXPECT_EQ(7, overloaded(7)); + EXPECT_EQ(14.0, overloaded(7.0)); +} + +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + TEST(OverloadTest, DispatchConsidersSfinae) { auto overloaded = absl::Overload( // [](auto a) -> decltype(a + 1) { return a + 1; } // @@ -125,6 +154,19 @@ EXPECT_EQ("string", absl::visit(overloaded, v)); } +TEST(OverloadTest, VariantVisitWithAutoFallbackDispatchesCorrectly) { + absl::variant<std::string, int32_t, int64_t> v(int32_t{1}); + auto overloaded = absl::Overload( + [](const std::string& s) { return s.size(); }, // + [](const auto& s) { return sizeof(s); } // + ); + EXPECT_EQ(4, absl::visit(overloaded, v)); + v = int64_t{1}; + EXPECT_EQ(8, absl::visit(overloaded, v)); + v = std::string("hello"); + EXPECT_EQ(5, absl::visit(overloaded, v)); +} + } // namespace #endif
diff --git a/absl/log/internal/log_impl.h b/absl/log/internal/log_impl.h index b44ed06..99de6db 100644 --- a/absl/log/internal/log_impl.h +++ b/absl/log/internal/log_impl.h
@@ -35,11 +35,11 @@ #ifndef NDEBUG #define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #else #define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #endif // The `switch` ensures that this expansion is the begnning of a statement (as @@ -58,7 +58,7 @@ switch (const int absl_logging_internal_verbose_level = (verbose_level)) \ case 0: \ default: \ - ABSL_LOG_INTERNAL_DLOG_IF_IMPL( \ + ABSL_LOG_INTERNAL_LOG_IF_IMPL( \ _INFO, ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level)) \ .WithVerbosity(absl_logging_internal_verbose_level) #else @@ -66,7 +66,7 @@ switch (const int absl_logging_internal_verbose_level = (verbose_level)) \ case 0: \ default: \ - ABSL_LOG_INTERNAL_DLOG_IF_IMPL( \ + ABSL_LOG_INTERNAL_LOG_IF_IMPL( \ _INFO, false && ABSL_VLOG_IS_ON(absl_logging_internal_verbose_level)) \ .WithVerbosity(absl_logging_internal_verbose_level) #endif @@ -82,11 +82,11 @@ #ifndef NDEBUG #define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #else #define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false && (condition)) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #endif // ABSL_LOG_EVERY_N @@ -132,36 +132,36 @@ #ifndef NDEBUG #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ - (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ - (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ - (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \ - (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #else // def NDEBUG #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ - (EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ - (FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ - (EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \ - (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + (EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #endif // def NDEBUG #define ABSL_LOG_INTERNAL_VLOG_EVERY_N_IMPL(verbose_level, n) \ @@ -243,40 +243,40 @@ #ifndef NDEBUG #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \ n_seconds) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \ n_seconds) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #else // def NDEBUG #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \ - EveryN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \ - FirstN, n) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \ - EveryPow2) ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \ n_seconds) \ ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \ EveryNSec, n_seconds) \ - ABSL_LOGGING_INTERNAL_DLOG##severity.InternalStream() + ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() #endif // def NDEBUG #endif // ABSL_LOG_INTERNAL_LOG_IMPL_H_
diff --git a/absl/log/internal/log_message.cc b/absl/log/internal/log_message.cc index e8a8111..10ac245 100644 --- a/absl/log/internal/log_message.cc +++ b/absl/log/internal/log_message.cc
@@ -240,8 +240,6 @@ : LogMessage(file, line, absl::LogSeverity::kWarning) {} LogMessage::LogMessage(const char* file, int line, ErrorTag) : LogMessage(file, line, absl::LogSeverity::kError) {} -LogMessage::LogMessage(const char* file, int line, FatalTag) - : LogMessage(file, line, absl::LogSeverity::kFatal) {} LogMessage::~LogMessage() { #ifdef ABSL_MIN_LOG_LEVEL @@ -589,8 +587,8 @@ *this << "Check failed: " << failure_msg << " "; } -// We intentionally don't return from these destructors. Disable MSVC's warning -// about the destructor never returning as we do so intentionally here. +// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so +// disable msvc's warning about the d'tor never returning. #if defined(_MSC_VER) && !defined(__clang__) #pragma warning(push) #pragma warning(disable : 4722) @@ -599,26 +597,28 @@ Flush(); FailWithoutStackTrace(); } +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif -LogMessageQuietly::LogMessageQuietly(const char* file, int line) +LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line) : LogMessage(file, line, absl::LogSeverity::kFatal) { SetFailQuietly(); } -LogMessageQuietly::~LogMessageQuietly() { - Flush(); - FailQuietly(); -} - -LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line) - : LogMessageQuietly(file, line) {} - LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* file, int line, absl::string_view failure_msg) - : LogMessageQuietly(file, line) { - *this << "Check failed: " << failure_msg << " "; + : LogMessage(file, line, absl::LogSeverity::kFatal) { + SetFailQuietly(); + *this << "Check failed: " << failure_msg << " "; } +// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so +// disable msvc's warning about the d'tor never returning. +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif LogMessageQuietlyFatal::~LogMessageQuietlyFatal() { Flush(); FailQuietly();
diff --git a/absl/log/internal/log_message.h b/absl/log/internal/log_message.h index 737efa1..4ecb8a1 100644 --- a/absl/log/internal/log_message.h +++ b/absl/log/internal/log_message.h
@@ -54,7 +54,6 @@ struct InfoTag {}; struct WarningTag {}; struct ErrorTag {}; - struct FatalTag {}; // Used for `LOG`. LogMessage(const char* file, int line, @@ -67,8 +66,6 @@ WarningTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE; LogMessage(const char* file, int line, ErrorTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE; - LogMessage(const char* file, int line, - FatalTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE; LogMessage(const LogMessage&) = delete; LogMessage& operator=(const LogMessage&) = delete; ~LogMessage() ABSL_ATTRIBUTE_COLD; @@ -360,17 +357,7 @@ ABSL_ATTRIBUTE_NORETURN ~LogMessageFatal(); }; -class LogMessageQuietly : public LogMessage { - public: - // All instances of LogMessageQuietly are FATAL. DLOG(QFATAL) calls this - // directly instead of LogMessageQuietlyFatal to make sure the destructor is - // not [[noreturn]] even if this is always FATAL. - LogMessageQuietly(const char* file, int line) ABSL_ATTRIBUTE_COLD; - ~LogMessageQuietly(); -}; - -// Used for LOG(QFATAL) to make sure it's properly understood as [[noreturn]]. -class LogMessageQuietlyFatal final : public LogMessageQuietly { +class LogMessageQuietlyFatal final : public LogMessage { public: LogMessageQuietlyFatal(const char* file, int line) ABSL_ATTRIBUTE_COLD; LogMessageQuietlyFatal(const char* file, int line,
diff --git a/absl/log/internal/strip.h b/absl/log/internal/strip.h index f944f44..f8d2786 100644 --- a/absl/log/internal/strip.h +++ b/absl/log/internal/strip.h
@@ -38,11 +38,6 @@ ::absl::log_internal::NullStreamMaybeFatal(::absl::kLogDebugFatal) #define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \ ::absl::log_internal::NullStreamMaybeFatal(absl_log_internal_severity) -// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`. -#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \ - ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal) -#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \ - ::absl::log_internal::NullStreamMaybeFatal(::absl::LogSeverity::kFatal) #define ABSL_LOG_INTERNAL_CHECK(failure_message) ABSL_LOGGING_INTERNAL_LOG_FATAL #define ABSL_LOG_INTERNAL_QCHECK(failure_message) \ ABSL_LOGGING_INTERNAL_LOG_QFATAL @@ -65,14 +60,6 @@ #define ABSL_LOGGING_INTERNAL_LOG_LEVEL(severity) \ ::absl::log_internal::LogMessage(__FILE__, __LINE__, \ absl_log_internal_severity) - -// Fatal `DLOG`s expand a little differently to avoid being `[[noreturn]]`. -#define ABSL_LOGGING_INTERNAL_DLOG_QFATAL \ - ::absl::log_internal::LogMessageQuietly(__FILE__, __LINE__) -#define ABSL_LOGGING_INTERNAL_DLOG_FATAL \ - ::absl::log_internal::LogMessage(__FILE__, __LINE__, \ - ::absl::log_internal::LogMessage::FatalTag{}) - // These special cases dispatch to special-case constructors that allow us to // avoid an extra function call and shrink non-LTO binaries by a percent or so. #define ABSL_LOG_INTERNAL_CHECK(failure_message) \ @@ -82,11 +69,4 @@ failure_message) #endif // !defined(STRIP_LOG) || !STRIP_LOG -// This part of a non-fatal `DLOG`s expands the same as `LOG`. -#define ABSL_LOGGING_INTERNAL_DLOG_INFO ABSL_LOGGING_INTERNAL_LOG_INFO -#define ABSL_LOGGING_INTERNAL_DLOG_WARNING ABSL_LOGGING_INTERNAL_LOG_WARNING -#define ABSL_LOGGING_INTERNAL_DLOG_ERROR ABSL_LOGGING_INTERNAL_LOG_ERROR -#define ABSL_LOGGING_INTERNAL_DLOG_DFATAL ABSL_LOGGING_INTERNAL_LOG_DFATAL -#define ABSL_LOGGING_INTERNAL_DLOG_LEVEL ABSL_LOGGING_INTERNAL_LOG_LEVEL - #endif // ABSL_LOG_INTERNAL_STRIP_H_
diff --git a/absl/random/bit_gen_ref.h b/absl/random/bit_gen_ref.h index e475221..2160a8d 100644 --- a/absl/random/bit_gen_ref.h +++ b/absl/random/bit_gen_ref.h
@@ -28,6 +28,7 @@ #include <type_traits> #include <utility> +#include "absl/base/attributes.h" #include "absl/base/internal/fast_type_id.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" @@ -114,7 +115,7 @@ (!std::is_same<URBG, BitGenRef>::value && random_internal::is_urbg<URBG>::value && !HasInvokeMock<URBG>::value)>* = nullptr> - BitGenRef(URBG& gen) // NOLINT + BitGenRef(URBG& gen ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT : t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)), mock_call_(NotAMock), generate_impl_fn_(ImplFn<URBG>) {} @@ -123,7 +124,7 @@ typename absl::enable_if_t<(!std::is_same<URBG, BitGenRef>::value && random_internal::is_urbg<URBG>::value && HasInvokeMock<URBG>::value)>* = nullptr> - BitGenRef(URBG& gen) // NOLINT + BitGenRef(URBG& gen ABSL_ATTRIBUTE_LIFETIME_BOUND) // NOLINT : t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)), mock_call_(&MockCall<URBG>), generate_impl_fn_(ImplFn<URBG>) {}
diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc index 585e780..c3327ad 100644 --- a/absl/status/status_test.cc +++ b/absl/status/status_test.cc
@@ -497,8 +497,8 @@ { absl::Status status(absl::StatusCode::kInvalidArgument, "message"); absl::Status copy(status); - status = static_cast<absl::Status&&>(status); - EXPECT_EQ(status, copy); + assignee = static_cast<absl::Status&&>(status); + EXPECT_EQ(assignee, copy); } }
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 942c532..8b30783 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel
@@ -899,6 +899,7 @@ cc_test( name = "cord_test", size = "medium", + timeout = "long", srcs = ["cord_test.cc"], copts = ABSL_TEST_COPTS, visibility = ["//visibility:private"], @@ -1377,6 +1378,7 @@ cc_test( name = "str_format_convert_test", size = "medium", + timeout = "long", srcs = ["internal/str_format/convert_test.cc"], copts = ABSL_TEST_COPTS, visibility = ["//visibility:private"],
diff --git a/absl/strings/internal/str_split_internal.h b/absl/strings/internal/str_split_internal.h index 081ad85..11ea96f 100644 --- a/absl/strings/internal/str_split_internal.h +++ b/absl/strings/internal/str_split_internal.h
@@ -30,6 +30,7 @@ #define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_ #include <array> +#include <cstddef> #include <initializer_list> #include <iterator> #include <tuple> @@ -402,7 +403,10 @@ ar[index].size = it->size(); ++it; } while (++index != ar.size() && !it.at_end()); - v.insert(v.end(), ar.begin(), ar.begin() + index); + // We static_cast index to a signed type to work around overzealous + // compiler warnings about signedness. + v.insert(v.end(), ar.begin(), + ar.begin() + static_cast<ptrdiff_t>(index)); } return v; }
diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h index bc8ea7d..33355bf 100644 --- a/absl/strings/str_cat.h +++ b/absl/strings/str_cat.h
@@ -606,18 +606,17 @@ ptrdiff_t n; // The length of the current argument typename String::pointer pos = &(*str)[old_size]; using SomeTrivialEmptyType = std::false_type; - // Ugly code due to the lack of C++14 fold expression makes us. - const SomeTrivialEmptyType dummy1; - for (const SomeTrivialEmptyType& dummy2 : - {(/* Comma expressions are poor man's C++17 fold expression for C++14 */ - (void)(n = lengths[i]), - (void)(n < 0 ? (void)(*pos++ = '-'), (n = ~n) : 0), - (void)absl::numbers_internal::FastIntToBufferBackward( - absl::numbers_internal::UnsignedAbsoluteValue(std::move(args)), - pos += n, static_cast<uint32_t>(n)), - (void)++i, dummy1)...}) { - (void)dummy2; // Remove & migrate to fold expressions in C++17 - } + const SomeTrivialEmptyType dummy; + // Ugly code due to the lack of C++17 fold expressions + const SomeTrivialEmptyType dummies[] = { + (/* Comma expressions are poor man's C++17 fold expression for C++14 */ + (void)(n = lengths[i]), + (void)(n < 0 ? (void)(*pos++ = '-'), (n = ~n) : 0), + (void)absl::numbers_internal::FastIntToBufferBackward( + absl::numbers_internal::UnsignedAbsoluteValue(std::move(args)), + pos += n, static_cast<uint32_t>(n)), + (void)++i, dummy)...}; + (void)dummies; // Remove & migrate to fold expressions in C++17 } // Helper function for the future StrCat default floating-point format, %.6g
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 39b1848..61b4ec0 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc
@@ -333,7 +333,7 @@ private: // Number of buckets in hash table for pointer lookups. - static constexpr uint32_t kHashTableSize = 8171; // should be prime + static constexpr uint32_t kHashTableSize = 262139; // should be prime const Vec<Node*>* nodes_; std::array<int32_t, kHashTableSize> table_;
diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc index 24a6b54..c66bca8 100644 --- a/absl/synchronization/internal/per_thread_sem_test.cc +++ b/absl/synchronization/internal/per_thread_sem_test.cc
@@ -159,7 +159,11 @@ const absl::Duration elapsed = absl::Now() - start; // Allow for a slight early return, to account for quality of implementation // issues on various platforms. - const absl::Duration slop = absl::Milliseconds(1); + absl::Duration slop = absl::Milliseconds(1); +#ifdef _MSC_VER + // Use higher slop on MSVC due to flaky test failures. + slop = absl::Milliseconds(4); +#endif EXPECT_LE(delay - slop, elapsed) << "Wait returned " << delay - elapsed << " early (with " << slop << " slop), start time was " << start;
diff --git a/absl/types/internal/optional.h b/absl/types/internal/optional.h index a96d260..5731a5b 100644 --- a/absl/types/internal/optional.h +++ b/absl/types/internal/optional.h
@@ -81,7 +81,7 @@ template <typename... Args> constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) - : engaged_(true), data_(absl::forward<Args>(args)...) {} + : engaged_(true), data_(std::forward<Args>(args)...) {} ~optional_data_dtor_base() { destruct(); } }; @@ -110,7 +110,7 @@ template <typename... Args> constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) - : engaged_(true), data_(absl::forward<Args>(args)...) {} + : engaged_(true), data_(std::forward<Args>(args)...) {} }; template <typename T>
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h index 263d7b0..40f57c4 100644 --- a/absl/types/internal/variant.h +++ b/absl/types/internal/variant.h
@@ -26,6 +26,7 @@ #include <stdexcept> #include <tuple> #include <type_traits> +#include <utility> #include "absl/base/config.h" #include "absl/base/internal/identity.h" @@ -214,7 +215,7 @@ std::is_same<ReturnType, decltype(std::declval<FunctionObject>()( SizeT<Indices>()...))>::value, "Not all visitation overloads have the same return type."); - return absl::forward<FunctionObject>(function)(SizeT<Indices>()...); + return std::forward<FunctionObject>(function)(SizeT<Indices>()...); } template <class ReturnType, class FunctionObject, std::size_t... BoundIndices> @@ -284,7 +285,7 @@ assert(false); // NOLINT // Hack to silence potential no return warning -- cause an infinite loop. - return Run(absl::forward<Op>(op)); + return Run(std::forward<Op>(op)); #endif // Checks for __builtin_unreachable } }; @@ -292,7 +293,7 @@ template <class Op, std::size_t I> struct ReachableSwitchCase { static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) { - return absl::base_internal::invoke(absl::forward<Op>(op), SizeT<I>()); + return absl::base_internal::invoke(std::forward<Op>(op), SizeT<I>()); } }; @@ -357,74 +358,74 @@ static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) { switch (i) { case 0: - return PickCase<Op, 0, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 0, EndIndex>::Run(std::forward<Op>(op)); case 1: - return PickCase<Op, 1, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 1, EndIndex>::Run(std::forward<Op>(op)); case 2: - return PickCase<Op, 2, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 2, EndIndex>::Run(std::forward<Op>(op)); case 3: - return PickCase<Op, 3, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 3, EndIndex>::Run(std::forward<Op>(op)); case 4: - return PickCase<Op, 4, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 4, EndIndex>::Run(std::forward<Op>(op)); case 5: - return PickCase<Op, 5, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 5, EndIndex>::Run(std::forward<Op>(op)); case 6: - return PickCase<Op, 6, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 6, EndIndex>::Run(std::forward<Op>(op)); case 7: - return PickCase<Op, 7, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 7, EndIndex>::Run(std::forward<Op>(op)); case 8: - return PickCase<Op, 8, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 8, EndIndex>::Run(std::forward<Op>(op)); case 9: - return PickCase<Op, 9, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 9, EndIndex>::Run(std::forward<Op>(op)); case 10: - return PickCase<Op, 10, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 10, EndIndex>::Run(std::forward<Op>(op)); case 11: - return PickCase<Op, 11, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 11, EndIndex>::Run(std::forward<Op>(op)); case 12: - return PickCase<Op, 12, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 12, EndIndex>::Run(std::forward<Op>(op)); case 13: - return PickCase<Op, 13, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 13, EndIndex>::Run(std::forward<Op>(op)); case 14: - return PickCase<Op, 14, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 14, EndIndex>::Run(std::forward<Op>(op)); case 15: - return PickCase<Op, 15, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 15, EndIndex>::Run(std::forward<Op>(op)); case 16: - return PickCase<Op, 16, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 16, EndIndex>::Run(std::forward<Op>(op)); case 17: - return PickCase<Op, 17, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 17, EndIndex>::Run(std::forward<Op>(op)); case 18: - return PickCase<Op, 18, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 18, EndIndex>::Run(std::forward<Op>(op)); case 19: - return PickCase<Op, 19, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 19, EndIndex>::Run(std::forward<Op>(op)); case 20: - return PickCase<Op, 20, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 20, EndIndex>::Run(std::forward<Op>(op)); case 21: - return PickCase<Op, 21, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 21, EndIndex>::Run(std::forward<Op>(op)); case 22: - return PickCase<Op, 22, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 22, EndIndex>::Run(std::forward<Op>(op)); case 23: - return PickCase<Op, 23, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 23, EndIndex>::Run(std::forward<Op>(op)); case 24: - return PickCase<Op, 24, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 24, EndIndex>::Run(std::forward<Op>(op)); case 25: - return PickCase<Op, 25, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 25, EndIndex>::Run(std::forward<Op>(op)); case 26: - return PickCase<Op, 26, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 26, EndIndex>::Run(std::forward<Op>(op)); case 27: - return PickCase<Op, 27, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 27, EndIndex>::Run(std::forward<Op>(op)); case 28: - return PickCase<Op, 28, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 28, EndIndex>::Run(std::forward<Op>(op)); case 29: - return PickCase<Op, 29, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 29, EndIndex>::Run(std::forward<Op>(op)); case 30: - return PickCase<Op, 30, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 30, EndIndex>::Run(std::forward<Op>(op)); case 31: - return PickCase<Op, 31, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 31, EndIndex>::Run(std::forward<Op>(op)); case 32: - return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op)); + return PickCase<Op, 32, EndIndex>::Run(std::forward<Op>(op)); default: ABSL_ASSERT(i == variant_npos); - return absl::base_internal::invoke(absl::forward<Op>(op), NPos()); + return absl::base_internal::invoke(std::forward<Op>(op), NPos()); } } }; @@ -437,7 +438,7 @@ MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op, index_sequence<(EndIndices + 1)...>, index_sequence<>>::Run(), - (indices + 1)...)(absl::forward<Op>(op)); + (indices + 1)...)(std::forward<Op>(op)); } }; @@ -489,7 +490,7 @@ VisitIndicesResultT<Op, decltype(EndIndices)...> operator()( SizeT<I> /*index*/) && { return base_internal::invoke( - absl::forward<Op>(op), + std::forward<Op>(op), SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value - std::size_t{1}>()...); } @@ -501,7 +502,7 @@ static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(Op&& op, SizeType... i) { return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run( - FlattenedOp<Op>{absl::forward<Op>(op)}, + FlattenedOp<Op>{std::forward<Op>(op)}, FlattenIndices<(EndIndices + std::size_t{1})...>::Run( (i + std::size_t{1})...)); } @@ -612,7 +613,7 @@ TypedThrowBadVariantAccess<VariantAccessResult<I, Variant>>(); } - return Access<I>(absl::forward<Variant>(self)); + return Access<I>(std::forward<Variant>(self)); } // The implementation of the move-assignment operation for a variant. @@ -684,7 +685,7 @@ void operator()(SizeT<NewIndex::value> /*old_i*/ ) const { - Access<NewIndex::value>(*left) = absl::forward<QualifiedNew>(other); + Access<NewIndex::value>(*left) = std::forward<QualifiedNew>(other); } template <std::size_t OldIndex> @@ -695,13 +696,13 @@ if (std::is_nothrow_constructible<New, QualifiedNew>::value || !std::is_nothrow_move_constructible<New>::value) { left->template emplace<NewIndex::value>( - absl::forward<QualifiedNew>(other)); + std::forward<QualifiedNew>(other)); } else { // the standard says "equivalent to // operator=(variant(std::forward<T>(t)))", but we use `emplace` here // because the variant's move assignment operator could be deleted. left->template emplace<NewIndex::value>( - New(absl::forward<QualifiedNew>(other))); + New(std::forward<QualifiedNew>(other))); } } @@ -712,7 +713,7 @@ template <class Left, class QualifiedNew> static ConversionAssignVisitor<Left, QualifiedNew> MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) { - return {left, absl::forward<QualifiedNew>(qual)}; + return {left, std::forward<QualifiedNew>(qual)}; } // Backend for operations for `emplace()` which destructs `*self` then @@ -723,7 +724,7 @@ Destroy(*self); using New = typename absl::variant_alternative<NewIndex, Self>::type; New* const result = ::new (static_cast<void*>(&self->state_)) - New(absl::forward<Args>(args)...); + New(std::forward<Args>(args)...); self->index_ = NewIndex; return *result; } @@ -919,9 +920,9 @@ Is, QualifiedVariants>...)>>::value, "All visitation overloads must have the same return type."); return absl::base_internal::invoke( - absl::forward<Op>(op), + std::forward<Op>(op), VariantCoreAccess::Access<Is>( - absl::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...); + std::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...); } template <std::size_t... TupIs, std::size_t... Is> @@ -969,11 +970,11 @@ template <class... P> explicit constexpr Union(EmplaceTag<0>, P&&... args) - : head(absl::forward<P>(args)...) {} + : head(std::forward<P>(args)...) {} template <std::size_t I, class... P> explicit constexpr Union(EmplaceTag<I>, P&&... args) - : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {} + : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {} Head head; TailUnion tail; @@ -1001,11 +1002,11 @@ template <class... P> explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args) - : head(absl::forward<P>(args)...) {} + : head(std::forward<P>(args)...) {} template <std::size_t I, class... P> explicit constexpr DestructibleUnionImpl(EmplaceTag<I>, P&&... args) - : tail(EmplaceTag<I - 1>{}, absl::forward<P>(args)...) {} + : tail(EmplaceTag<I - 1>{}, std::forward<P>(args)...) {} ~DestructibleUnionImpl() {} @@ -1036,7 +1037,7 @@ template <std::size_t I, class... P> explicit constexpr VariantStateBase(EmplaceTag<I> tag, P&&... args) - : state_(tag, absl::forward<P>(args)...), index_(I) {} + : state_(tag, std::forward<P>(args)...), index_(I) {} explicit constexpr VariantStateBase(NoopConstructorTag) : state_(NoopConstructorTag()), index_(variant_npos) {} @@ -1321,7 +1322,7 @@ using Alternative = typename absl::variant_alternative<I, variant<T...>>::type; ::new (static_cast<void*>(&self->state_)) Alternative( - variant_internal::AccessUnion(absl::move(other->state_), i)); + variant_internal::AccessUnion(std::move(other->state_), i)); } void operator()(SizeT<absl::variant_npos> /*i*/) const {}
diff --git a/absl/types/optional.h b/absl/types/optional.h index 395fe62..cf7249c 100644 --- a/absl/types/optional.h +++ b/absl/types/optional.h
@@ -151,7 +151,7 @@ std::is_same<InPlaceT, in_place_t>, std::is_constructible<T, Args&&...> >::value>* = nullptr> constexpr explicit optional(InPlaceT, Args&&... args) - : data_base(in_place_t(), absl::forward<Args>(args)...) {} + : data_base(in_place_t(), std::forward<Args>(args)...) {} // Constructs a non-empty `optional` direct-initialized value of type `T` from // the arguments of an initializer_list and `std::forward<Args>(args)...`. @@ -162,8 +162,7 @@ T, std::initializer_list<U>&, Args&&...>::value>::type> constexpr explicit optional(in_place_t, std::initializer_list<U> il, Args&&... args) - : data_base(in_place_t(), il, absl::forward<Args>(args)...) { - } + : data_base(in_place_t(), il, std::forward<Args>(args)...) {} // Value constructor (implicit) template < @@ -176,21 +175,21 @@ std::is_convertible<U&&, T>, std::is_constructible<T, U&&> >::value, bool>::type = false> - constexpr optional(U&& v) : data_base(in_place_t(), absl::forward<U>(v)) {} + constexpr optional(U&& v) : data_base(in_place_t(), std::forward<U>(v)) {} // Value constructor (explicit) template < typename U = T, typename std::enable_if< absl::conjunction<absl::negation<std::is_same< - in_place_t, typename std::decay<U>::type>>, + in_place_t, typename std::decay<U>::type> >, absl::negation<std::is_same< - optional<T>, typename std::decay<U>::type>>, - absl::negation<std::is_convertible<U&&, T>>, - std::is_constructible<T, U&&>>::value, + optional<T>, typename std::decay<U>::type> >, + absl::negation<std::is_convertible<U&&, T> >, + std::is_constructible<T, U&&> >::value, bool>::type = false> explicit constexpr optional(U&& v) - : data_base(in_place_t(), absl::forward<U>(v)) {} + : data_base(in_place_t(), std::forward<U>(v)) {} // Converting copy constructor (implicit) template <typename U, @@ -437,7 +436,7 @@ return reference(); } constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { - return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference()); + return ABSL_HARDENING_ASSERT(this->engaged_), std::move(reference()); } T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND { ABSL_HARDENING_ASSERT(this->engaged_); @@ -492,7 +491,7 @@ } constexpr const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11) - return absl::move( + return std::move( static_cast<bool>(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference())); @@ -511,9 +510,8 @@ "optional<T>::value_or: T must be copy constructible"); static_assert(std::is_convertible<U&&, value_type>::value, "optional<T>::value_or: U must be convertible to T"); - return static_cast<bool>(*this) - ? **this - : static_cast<T>(absl::forward<U>(v)); + return static_cast<bool>(*this) ? **this + : static_cast<T>(std::forward<U>(v)); } template <typename U> T value_or(U&& v) && { // NOLINT(build/c++11) @@ -573,19 +571,18 @@ // static_assert(opt.value() == 1, ""); template <typename T> constexpr optional<typename std::decay<T>::type> make_optional(T&& v) { - return optional<typename std::decay<T>::type>(absl::forward<T>(v)); + return optional<typename std::decay<T>::type>(std::forward<T>(v)); } template <typename T, typename... Args> constexpr optional<T> make_optional(Args&&... args) { - return optional<T>(in_place_t(), absl::forward<Args>(args)...); + return optional<T>(in_place_t(), std::forward<Args>(args)...); } template <typename T, typename U, typename... Args> constexpr optional<T> make_optional(std::initializer_list<U> il, Args&&... args) { - return optional<T>(in_place_t(), il, - absl::forward<Args>(args)...); + return optional<T>(in_place_t(), il, std::forward<Args>(args)...); } // Relational operators [optional.relops]
diff --git a/absl/types/variant.h b/absl/types/variant.h index ac93464..56a7e05 100644 --- a/absl/types/variant.h +++ b/absl/types/variant.h
@@ -303,11 +303,10 @@ } // Overload for getting a variant's rvalue by type. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template <class T, class... Types> constexpr T&& get(variant<Types...>&& v) { return variant_internal::VariantCoreAccess::CheckedAccess< - variant_internal::IndexOf<T, Types...>::value>(absl::move(v)); + variant_internal::IndexOf<T, Types...>::value>(std::move(v)); } // Overload for getting a variant's const lvalue by type. @@ -318,11 +317,10 @@ } // Overload for getting a variant's const rvalue by type. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template <class T, class... Types> constexpr const T&& get(const variant<Types...>&& v) { return variant_internal::VariantCoreAccess::CheckedAccess< - variant_internal::IndexOf<T, Types...>::value>(absl::move(v)); + variant_internal::IndexOf<T, Types...>::value>(std::move(v)); } // Overload for getting a variant's lvalue by index. @@ -333,11 +331,10 @@ } // Overload for getting a variant's rvalue by index. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template <std::size_t I, class... Types> constexpr variant_alternative_t<I, variant<Types...>>&& get( variant<Types...>&& v) { - return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v)); + return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v)); } // Overload for getting a variant's const lvalue by index. @@ -348,11 +345,10 @@ } // Overload for getting a variant's const rvalue by index. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template <std::size_t I, class... Types> constexpr const variant_alternative_t<I, variant<Types...>>&& get( const variant<Types...>&& v) { - return variant_internal::VariantCoreAccess::CheckedAccess<I>(absl::move(v)); + return variant_internal::VariantCoreAccess::CheckedAccess<I>(std::move(v)); } // get_if() @@ -432,8 +428,8 @@ return variant_internal:: VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run( variant_internal::PerformVisitation<Visitor, Variants...>{ - std::forward_as_tuple(absl::forward<Variants>(vars)...), - absl::forward<Visitor>(vis)}, + std::forward_as_tuple(std::forward<Variants>(vars)...), + std::forward<Visitor>(vis)}, vars.index()...); } @@ -504,13 +500,12 @@ class T, std::size_t I = std::enable_if< variant_internal::IsNeitherSelfNorInPlace<variant, - absl::decay_t<T>>::value, - variant_internal::IndexOfConstructedType<variant, T>>::type::value, + absl::decay_t<T> >::value, + variant_internal::IndexOfConstructedType<variant, T> >::type::value, class Tj = absl::variant_alternative_t<I, variant>, - absl::enable_if_t<std::is_constructible<Tj, T>::value>* = - nullptr> + absl::enable_if_t<std::is_constructible<Tj, T>::value>* = nullptr> constexpr variant(T&& t) noexcept(std::is_nothrow_constructible<Tj, T>::value) - : Base(variant_internal::EmplaceTag<I>(), absl::forward<T>(t)) {} + : Base(variant_internal::EmplaceTag<I>(), std::forward<T>(t)) {} // Constructs a variant of an alternative type from the arguments through // direct-initialization. @@ -524,7 +519,7 @@ constexpr explicit variant(in_place_type_t<T>, Args&&... args) : Base(variant_internal::EmplaceTag< variant_internal::UnambiguousIndexOf<variant, T>::value>(), - absl::forward<Args>(args)...) {} + std::forward<Args>(args)...) {} // Constructs a variant of an alternative type from an initializer list // and other arguments through direct-initialization. @@ -539,7 +534,7 @@ Args&&... args) : Base(variant_internal::EmplaceTag< variant_internal::UnambiguousIndexOf<variant, T>::value>(), - il, absl::forward<Args>(args)...) {} + il, std::forward<Args>(args)...) {} // Constructs a variant of an alternative type from a provided index, // through value-initialization using the provided forwarded arguments. @@ -548,7 +543,7 @@ variant_internal::VariantAlternativeSfinaeT<I, variant>, Args...>::value>::type* = nullptr> constexpr explicit variant(in_place_index_t<I>, Args&&... args) - : Base(variant_internal::EmplaceTag<I>(), absl::forward<Args>(args)...) {} + : Base(variant_internal::EmplaceTag<I>(), std::forward<Args>(args)...) {} // Constructs a variant of an alternative type from a provided index, // through value-initialization of an initializer list and the provided @@ -560,7 +555,7 @@ constexpr explicit variant(in_place_index_t<I>, std::initializer_list<U> il, Args&&... args) : Base(variant_internal::EmplaceTag<I>(), il, - absl::forward<Args>(args)...) {} + std::forward<Args>(args)...) {} // Destructors @@ -595,7 +590,7 @@ std::is_nothrow_constructible<Tj, T>::value) { variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run( variant_internal::VariantCoreAccess::MakeConversionAssignVisitor( - this, absl::forward<T>(t)), + this, std::forward<T>(t)), index()); return *this; @@ -623,7 +618,7 @@ T& emplace(Args&&... args) { return variant_internal::VariantCoreAccess::Replace< variant_internal::UnambiguousIndexOf<variant, T>::value>( - this, absl::forward<Args>(args)...); + this, std::forward<Args>(args)...); } // Constructs a value of the given alternative type T within the variant using @@ -644,7 +639,7 @@ T& emplace(std::initializer_list<U> il, Args&&... args) { return variant_internal::VariantCoreAccess::Replace< variant_internal::UnambiguousIndexOf<variant, T>::value>( - this, il, absl::forward<Args>(args)...); + this, il, std::forward<Args>(args)...); } // Destroys the current value of the variant (provided that @@ -663,7 +658,7 @@ Args...>::value>::type* = nullptr> absl::variant_alternative_t<I, variant>& emplace(Args&&... args) { return variant_internal::VariantCoreAccess::Replace<I>( - this, absl::forward<Args>(args)...); + this, std::forward<Args>(args)...); } // Destroys the current value of the variant (provided that @@ -681,7 +676,7 @@ absl::variant_alternative_t<I, variant>& emplace(std::initializer_list<U> il, Args&&... args) { return variant_internal::VariantCoreAccess::Replace<I>( - this, il, absl::forward<Args>(args)...); + this, il, std::forward<Args>(args)...); } // variant::valueless_by_exception()
diff --git a/absl/types/variant_test.cc b/absl/types/variant_test.cc index 4cd5b7a..91b142c 100644 --- a/absl/types/variant_test.cc +++ b/absl/types/variant_test.cc
@@ -389,7 +389,7 @@ using V = variant<MoveOnly<class A>, MoveOnly<class B>, MoveOnly<class C>>; V v(in_place_index<1>, 10); - V v2 = absl::move(v); + V v2 = std::move(v); EXPECT_EQ(10, absl::get<1>(v2).value); } @@ -1209,60 +1209,60 @@ Var v(absl::in_place_index<0>, 0); using LValueGetType = decltype(absl::get<0>(v)); - using RValueGetType = decltype(absl::get<0>(absl::move(v))); + using RValueGetType = decltype(absl::get<0>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, int&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value)); EXPECT_EQ(absl::get<0>(v), 0); - EXPECT_EQ(absl::get<0>(absl::move(v)), 0); + EXPECT_EQ(absl::get<0>(std::move(v)), 0); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<0>(const_v)); - using ConstRValueGetType = decltype(absl::get<0>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<0>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value)); EXPECT_EQ(absl::get<0>(const_v), 0); - EXPECT_EQ(absl::get<0>(absl::move(const_v)), 0); + EXPECT_EQ(absl::get<0>(std::move(const_v)), 0); } { Var v = std::string("Hello"); using LValueGetType = decltype(absl::get<1>(v)); - using RValueGetType = decltype(absl::get<1>(absl::move(v))); + using RValueGetType = decltype(absl::get<1>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, std::string&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, std::string&&>::value)); EXPECT_EQ(absl::get<1>(v), "Hello"); - EXPECT_EQ(absl::get<1>(absl::move(v)), "Hello"); + EXPECT_EQ(absl::get<1>(std::move(v)), "Hello"); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<1>(const_v)); - using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const std::string&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const std::string&&>::value)); EXPECT_EQ(absl::get<1>(const_v), "Hello"); - EXPECT_EQ(absl::get<1>(absl::move(const_v)), "Hello"); + EXPECT_EQ(absl::get<1>(std::move(const_v)), "Hello"); } { Var v = 2.0; using LValueGetType = decltype(absl::get<2>(v)); - using RValueGetType = decltype(absl::get<2>(absl::move(v))); + using RValueGetType = decltype(absl::get<2>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, double&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, double&&>::value)); EXPECT_EQ(absl::get<2>(v), 2.); - EXPECT_EQ(absl::get<2>(absl::move(v)), 2.); + EXPECT_EQ(absl::get<2>(std::move(v)), 2.); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<2>(const_v)); - using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const double&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const double&&>::value)); EXPECT_EQ(absl::get<2>(const_v), 2.); - EXPECT_EQ(absl::get<2>(absl::move(const_v)), 2.); + EXPECT_EQ(absl::get<2>(std::move(const_v)), 2.); } { @@ -1270,20 +1270,20 @@ v.emplace<3>(1); using LValueGetType = decltype(absl::get<3>(v)); - using RValueGetType = decltype(absl::get<3>(absl::move(v))); + using RValueGetType = decltype(absl::get<3>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, int&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value)); EXPECT_EQ(absl::get<3>(v), 1); - EXPECT_EQ(absl::get<3>(absl::move(v)), 1); + EXPECT_EQ(absl::get<3>(std::move(v)), 1); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<3>(const_v)); - using ConstRValueGetType = decltype(absl::get<3>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<3>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value)); EXPECT_EQ(absl::get<3>(const_v), 1); - EXPECT_EQ(absl::get<3>(absl::move(const_v)), 1); // NOLINT + EXPECT_EQ(absl::get<3>(std::move(const_v)), 1); // NOLINT } } @@ -1322,60 +1322,60 @@ Var v = 1; using LValueGetType = decltype(absl::get<int>(v)); - using RValueGetType = decltype(absl::get<int>(absl::move(v))); + using RValueGetType = decltype(absl::get<int>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, int&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, int&&>::value)); EXPECT_EQ(absl::get<int>(v), 1); - EXPECT_EQ(absl::get<int>(absl::move(v)), 1); + EXPECT_EQ(absl::get<int>(std::move(v)), 1); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<int>(const_v)); - using ConstRValueGetType = decltype(absl::get<int>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<int>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const int&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const int&&>::value)); EXPECT_EQ(absl::get<int>(const_v), 1); - EXPECT_EQ(absl::get<int>(absl::move(const_v)), 1); + EXPECT_EQ(absl::get<int>(std::move(const_v)), 1); } { Var v = std::string("Hello"); using LValueGetType = decltype(absl::get<1>(v)); - using RValueGetType = decltype(absl::get<1>(absl::move(v))); + using RValueGetType = decltype(absl::get<1>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, std::string&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, std::string&&>::value)); EXPECT_EQ(absl::get<std::string>(v), "Hello"); - EXPECT_EQ(absl::get<std::string>(absl::move(v)), "Hello"); + EXPECT_EQ(absl::get<std::string>(std::move(v)), "Hello"); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<1>(const_v)); - using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const std::string&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const std::string&&>::value)); EXPECT_EQ(absl::get<std::string>(const_v), "Hello"); - EXPECT_EQ(absl::get<std::string>(absl::move(const_v)), "Hello"); + EXPECT_EQ(absl::get<std::string>(std::move(const_v)), "Hello"); } { Var v = 2.0; using LValueGetType = decltype(absl::get<2>(v)); - using RValueGetType = decltype(absl::get<2>(absl::move(v))); + using RValueGetType = decltype(absl::get<2>(std::move(v))); EXPECT_TRUE((std::is_same<LValueGetType, double&>::value)); EXPECT_TRUE((std::is_same<RValueGetType, double&&>::value)); EXPECT_EQ(absl::get<double>(v), 2.); - EXPECT_EQ(absl::get<double>(absl::move(v)), 2.); + EXPECT_EQ(absl::get<double>(std::move(v)), 2.); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<2>(const_v)); - using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v))); EXPECT_TRUE((std::is_same<ConstLValueGetType, const double&>::value)); EXPECT_TRUE((std::is_same<ConstRValueGetType, const double&&>::value)); EXPECT_EQ(absl::get<double>(const_v), 2.); - EXPECT_EQ(absl::get<double>(absl::move(const_v)), 2.); + EXPECT_EQ(absl::get<double>(std::move(const_v)), 2.); } } @@ -1825,13 +1825,13 @@ int operator()(std::string&&, std::string&&) const { return 3; } // NOLINT }; EXPECT_FALSE(absl::visit(Visitor{}, v)); - EXPECT_TRUE(absl::visit(Visitor{}, absl::move(v))); + EXPECT_TRUE(absl::visit(Visitor{}, std::move(v))); // Also test the variadic overload. EXPECT_EQ(0, absl::visit(Visitor{}, v, v)); - EXPECT_EQ(1, absl::visit(Visitor{}, v, absl::move(v))); - EXPECT_EQ(2, absl::visit(Visitor{}, absl::move(v), v)); - EXPECT_EQ(3, absl::visit(Visitor{}, absl::move(v), absl::move(v))); + EXPECT_EQ(1, absl::visit(Visitor{}, v, std::move(v))); + EXPECT_EQ(2, absl::visit(Visitor{}, std::move(v), v)); + EXPECT_EQ(3, absl::visit(Visitor{}, std::move(v), std::move(v))); } TEST(VariantTest, VisitRValueVisitor) { @@ -1862,12 +1862,12 @@ (std::is_same<LValue_LValue, decltype(absl::visit(visitor, v))>::value)); EXPECT_TRUE( (std::is_same<RValue_LValue, - decltype(absl::visit(visitor, absl::move(v)))>::value)); + decltype(absl::visit(visitor, std::move(v)))>::value)); EXPECT_TRUE(( std::is_same<LValue_RValue, decltype(absl::visit(Visitor{}, v))>::value)); EXPECT_TRUE( (std::is_same<RValue_RValue, - decltype(absl::visit(Visitor{}, absl::move(v)))>::value)); + decltype(absl::visit(Visitor{}, std::move(v)))>::value)); } TEST(VariantTest, VisitVariadic) { @@ -2225,7 +2225,7 @@ EXPECT_TRUE(absl::holds_alternative<std::unique_ptr<int>>(v)); // Construct a variant by moving from another variant. - Variant v2(absl::move(v)); + Variant v2(std::move(v)); ASSERT_TRUE(absl::holds_alternative<std::unique_ptr<int>>(v2)); ASSERT_NE(nullptr, absl::get<std::unique_ptr<int>>(v2)); EXPECT_EQ(10, *absl::get<std::unique_ptr<int>>(v2)); @@ -2242,7 +2242,7 @@ EXPECT_EQ("foo", *absl::get<std::unique_ptr<std::string>>(v)); // Move-assign a variant. - v2 = absl::move(v); + v2 = std::move(v); ASSERT_TRUE(absl::holds_alternative<std::unique_ptr<std::string>>(v2)); EXPECT_EQ("foo", *absl::get<std::unique_ptr<std::string>>(v2)); EXPECT_TRUE(absl::holds_alternative<std::unique_ptr<std::string>>(v)); @@ -2568,7 +2568,7 @@ vec.push_back(absl::make_unique<int>(42)); vec.emplace_back("Hello"); vec.reserve(3); - auto another_vec = absl::move(vec); + auto another_vec = std::move(vec); // As a sanity check, verify vector contents. ASSERT_EQ(2u, another_vec.size()); EXPECT_EQ(42, *absl::get<std::unique_ptr<int>>(another_vec[0]));