From f7d2b13ef2466a5a3b7bcc535c49d3962a5c89f9 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Mon, 22 Jan 2024 09:09:23 -0800 Subject: Remove code pieces for no longer supported GCC versions. The minimum supported version today is GCC 7 (`__GNUC__ >= 7`). PiperOrigin-RevId: 600475215 Change-Id: I1aa46384f1e75f268649a48dbe2b42f3475bb07f --- absl/container/internal/compressed_tuple_test.cc | 2 -- 1 file changed, 2 deletions(-) (limited to 'absl/container/internal/compressed_tuple_test.cc') diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index 74111f97..da07baab 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -358,7 +358,6 @@ TEST(CompressedTupleTest, Constexpr) { EXPECT_EQ(x2, 5); EXPECT_EQ(x3, CallType::kConstRef); -#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4 constexpr CompressedTuple, TrivialStruct, int> trivial = {}; constexpr CallType trivial0 = trivial.get<0>().value(); constexpr int trivial1 = trivial.get<1>().value(); @@ -367,7 +366,6 @@ TEST(CompressedTupleTest, Constexpr) { EXPECT_EQ(trivial0, CallType::kConstRef); EXPECT_EQ(trivial1, 0); EXPECT_EQ(trivial2, 0); -#endif constexpr CompressedTuple, NonTrivialStruct, absl::optional> non_trivial = {}; -- cgit v1.2.3 From d802708117c6ef6b9783efe499b2a2d0d0536c77 Mon Sep 17 00:00:00 2001 From: Derek Mauro Date: Mon, 11 Mar 2024 09:14:45 -0700 Subject: Replace usages of absl::move, absl::forward, and absl::exchange with their std:: equivalents PiperOrigin-RevId: 614687225 Change-Id: I07421db08ee9c221e561f42e3bf8345fb5321401 --- absl/cleanup/cleanup_test.cc | 2 +- absl/container/internal/btree.h | 4 +- absl/container/internal/compressed_tuple.h | 19 ++-- absl/container/internal/compressed_tuple_test.cc | 5 +- absl/container/internal/layout.h | 2 +- absl/functional/bind_front.h | 5 +- absl/functional/internal/front_binder.h | 18 ++-- absl/types/internal/optional.h | 4 +- absl/types/internal/variant.h | 109 ++++++++++++----------- absl/types/optional.h | 33 ++++--- absl/types/variant.h | 43 ++++----- absl/types/variant_test.cc | 76 ++++++++-------- 12 files changed, 157 insertions(+), 163 deletions(-) (limited to 'absl/container/internal/compressed_tuple_test.cc') diff --git a/absl/cleanup/cleanup_test.cc b/absl/cleanup/cleanup_test.cc index 46b88589..72d7ff2a 100644 --- a/absl/cleanup/cleanup_test.cc +++ b/absl/cleanup/cleanup_test.cc @@ -48,7 +48,7 @@ class FunctorClass { explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {} FunctorClass(FunctorClass&& other) - : callback_(absl::exchange(other.callback_, Callback())) {} + : callback_(std::exchange(other.callback_, Callback())) {} FunctorClass(const FunctorClass&) = delete; diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 91df57a3..fd7860da 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -1407,9 +1407,9 @@ class btree { copy_or_move_values_in_order(other); } btree(btree &&other) noexcept - : root_(absl::exchange(other.root_, EmptyNode())), + : root_(std::exchange(other.root_, EmptyNode())), rightmost_(std::move(other.rightmost_)), - size_(absl::exchange(other.size_, 0u)) { + size_(std::exchange(other.size_, 0u)) { other.mutable_rightmost() = EmptyNode(); } btree(btree &&other, const allocator_type &alloc) diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index 59e70eb2..f05a1fdc 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h @@ -87,10 +87,10 @@ struct Storage { constexpr Storage() = default; template explicit constexpr Storage(absl::in_place_t, V&& v) - : value(absl::forward(v)) {} + : value(std::forward(v)) {} constexpr const T& get() const& { return value; } T& get() & { return value; } - constexpr const T&& get() const&& { return absl::move(*this).value; } + constexpr const T&& get() const&& { return std::move(*this).value; } T&& get() && { return std::move(*this).value; } }; @@ -99,12 +99,11 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { constexpr Storage() = default; template - explicit constexpr Storage(absl::in_place_t, V&& v) - : T(absl::forward(v)) {} + explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward(v)) {} constexpr const T& get() const& { return *this; } T& get() & { return *this; } - constexpr const T&& get() const&& { return absl::move(*this); } + constexpr const T&& get() const&& { return std::move(*this); } T&& get() && { return std::move(*this); } }; @@ -123,7 +122,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, absl::forward(args))... {} + : Storage(absl::in_place, std::forward(args))... {} friend CompressedTuple; }; @@ -135,7 +134,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, absl::forward(args))... {} + : Storage(absl::in_place, std::forward(args))... {} friend CompressedTuple; }; @@ -234,8 +233,8 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple bool> = true> explicit constexpr CompressedTuple(First&& first, Vs&&... base) : CompressedTuple::CompressedTupleImpl(absl::in_place, - absl::forward(first), - absl::forward(base)...) {} + std::forward(first), + std::forward(base)...) {} template ElemT& get() & { @@ -254,7 +253,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple template constexpr const ElemT&& get() const&& { - return absl::move(*this).StorageT::get(); + return std::move(*this).StorageT::get(); } }; diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index da07baab..49818fb8 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -16,6 +16,7 @@ #include #include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -384,8 +385,8 @@ TEST(CompressedTupleTest, Constexpr) { #if defined(__clang__) // An apparent bug in earlier versions of gcc claims these are ambiguous. - constexpr int x2m = absl::move(x.get<2>()).get<0>(); - constexpr CallType x3m = absl::move(x).get<3>().value(); + constexpr int x2m = std::move(x.get<2>()).get<0>(); + constexpr CallType x3m = std::move(x).get<3>().value(); EXPECT_EQ(x2m, 5); EXPECT_EQ(x3m, CallType::kConstMove); #endif diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h index a4ba6101..1bf739cc 100644 --- a/absl/container/internal/layout.h +++ b/absl/container/internal/layout.h @@ -706,7 +706,7 @@ class Layout : public internal_layout::LayoutType { template static constexpr PartialType Partial(Sizes&&... sizes) { static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); - return PartialType(absl::forward(sizes)...); + return PartialType(std::forward(sizes)...); } // Creates a layout with the sizes of all arrays specified. If you know diff --git a/absl/functional/bind_front.h b/absl/functional/bind_front.h index a956eb02..885f24b8 100644 --- a/absl/functional/bind_front.h +++ b/absl/functional/bind_front.h @@ -34,6 +34,8 @@ #include // For std::bind_front. #endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#include + #include "absl/functional/internal/front_binder.h" #include "absl/utility/utility.h" @@ -182,8 +184,7 @@ template constexpr functional_internal::bind_front_t bind_front( F&& func, BoundArgs&&... args) { return functional_internal::bind_front_t( - absl::in_place, absl::forward(func), - absl::forward(args)...); + absl::in_place, std::forward(func), std::forward(args)...); } #endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L diff --git a/absl/functional/internal/front_binder.h b/absl/functional/internal/front_binder.h index 45f52de7..44a54928 100644 --- a/absl/functional/internal/front_binder.h +++ b/absl/functional/internal/front_binder.h @@ -34,8 +34,8 @@ namespace functional_internal { template R Apply(Tuple&& bound, absl::index_sequence, Args&&... free) { return base_internal::invoke( - absl::forward(bound).template get()..., - absl::forward(free)...); + std::forward(bound).template get()..., + std::forward(free)...); } template @@ -48,13 +48,13 @@ class FrontBinder { public: template constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) - : bound_args_(absl::forward(ts)...) {} + : bound_args_(std::forward(ts)...) {} template > R operator()(FreeArgs&&... free_args) & { return functional_internal::Apply(bound_args_, Idx(), - absl::forward(free_args)...); + std::forward(free_args)...); } template > R operator()(FreeArgs&&... free_args) const& { return functional_internal::Apply(bound_args_, Idx(), - absl::forward(free_args)...); + std::forward(free_args)...); } template (absl::move(bound_args_), Idx(), - absl::forward(free_args)...); + return functional_internal::Apply(std::move(bound_args_), Idx(), + std::forward(free_args)...); } template (absl::move(bound_args_), Idx(), - absl::forward(free_args)...); + return functional_internal::Apply(std::move(bound_args_), Idx(), + std::forward(free_args)...); } }; diff --git a/absl/types/internal/optional.h b/absl/types/internal/optional.h index a96d260a..5731a5bc 100644 --- a/absl/types/internal/optional.h +++ b/absl/types/internal/optional.h @@ -81,7 +81,7 @@ class optional_data_dtor_base { template constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) - : engaged_(true), data_(absl::forward(args)...) {} + : engaged_(true), data_(std::forward(args)...) {} ~optional_data_dtor_base() { destruct(); } }; @@ -110,7 +110,7 @@ class optional_data_dtor_base { template constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) - : engaged_(true), data_(absl::forward(args)...) {} + : engaged_(true), data_(std::forward(args)...) {} }; template diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h index 263d7b09..40f57c40 100644 --- a/absl/types/internal/variant.h +++ b/absl/types/internal/variant.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "absl/base/config.h" #include "absl/base/internal/identity.h" @@ -214,7 +215,7 @@ constexpr ReturnType call_with_indices(FunctionObject&& function) { std::is_same()( SizeT()...))>::value, "Not all visitation overloads have the same return type."); - return absl::forward(function)(SizeT()...); + return std::forward(function)(SizeT()...); } template @@ -284,7 +285,7 @@ struct UnreachableSwitchCase { assert(false); // NOLINT // Hack to silence potential no return warning -- cause an infinite loop. - return Run(absl::forward(op)); + return Run(std::forward(op)); #endif // Checks for __builtin_unreachable } }; @@ -292,7 +293,7 @@ struct UnreachableSwitchCase { template struct ReachableSwitchCase { static VisitIndicesResultT Run(Op&& op) { - return absl::base_internal::invoke(absl::forward(op), SizeT()); + return absl::base_internal::invoke(std::forward(op), SizeT()); } }; @@ -357,74 +358,74 @@ struct VisitIndicesSwitch { static VisitIndicesResultT Run(Op&& op, std::size_t i) { switch (i) { case 0: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 1: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 2: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 3: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 4: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 5: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 6: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 7: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 8: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 9: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 10: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 11: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 12: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 13: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 14: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 15: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 16: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 17: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 18: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 19: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 20: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 21: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 22: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 23: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 24: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 25: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 26: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 27: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 28: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 29: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 30: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 31: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); case 32: - return PickCase::Run(absl::forward(op)); + return PickCase::Run(std::forward(op)); default: ABSL_ASSERT(i == variant_npos); - return absl::base_internal::invoke(absl::forward(op), NPos()); + return absl::base_internal::invoke(std::forward(op), NPos()); } } }; @@ -437,7 +438,7 @@ struct VisitIndicesFallback { MakeVisitationMatrix, Op, index_sequence<(EndIndices + 1)...>, index_sequence<>>::Run(), - (indices + 1)...)(absl::forward(op)); + (indices + 1)...)(std::forward(op)); } }; @@ -489,7 +490,7 @@ struct VisitIndicesVariadicImpl, EndIndices...> { VisitIndicesResultT operator()( SizeT /*index*/) && { return base_internal::invoke( - absl::forward(op), + std::forward(op), SizeT::value - std::size_t{1}>()...); } @@ -501,7 +502,7 @@ struct VisitIndicesVariadicImpl, EndIndices...> { static VisitIndicesResultT Run(Op&& op, SizeType... i) { return VisitIndicesSwitch::value>::Run( - FlattenedOp{absl::forward(op)}, + FlattenedOp{std::forward(op)}, FlattenIndices<(EndIndices + std::size_t{1})...>::Run( (i + std::size_t{1})...)); } @@ -612,7 +613,7 @@ struct VariantCoreAccess { TypedThrowBadVariantAccess>(); } - return Access(absl::forward(self)); + return Access(std::forward(self)); } // The implementation of the move-assignment operation for a variant. @@ -684,7 +685,7 @@ struct VariantCoreAccess { void operator()(SizeT /*old_i*/ ) const { - Access(*left) = absl::forward(other); + Access(*left) = std::forward(other); } template @@ -695,13 +696,13 @@ struct VariantCoreAccess { if (std::is_nothrow_constructible::value || !std::is_nothrow_move_constructible::value) { left->template emplace( - absl::forward(other)); + std::forward(other)); } else { // the standard says "equivalent to // operator=(variant(std::forward(t)))", but we use `emplace` here // because the variant's move assignment operator could be deleted. left->template emplace( - New(absl::forward(other))); + New(std::forward(other))); } } @@ -712,7 +713,7 @@ struct VariantCoreAccess { template static ConversionAssignVisitor MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) { - return {left, absl::forward(qual)}; + return {left, std::forward(qual)}; } // Backend for operations for `emplace()` which destructs `*self` then @@ -723,7 +724,7 @@ struct VariantCoreAccess { Destroy(*self); using New = typename absl::variant_alternative::type; New* const result = ::new (static_cast(&self->state_)) - New(absl::forward(args)...); + New(std::forward(args)...); self->index_ = NewIndex; return *result; } @@ -919,9 +920,9 @@ struct PerformVisitation { Is, QualifiedVariants>...)>>::value, "All visitation overloads must have the same return type."); return absl::base_internal::invoke( - absl::forward(op), + std::forward(op), VariantCoreAccess::Access( - absl::forward(std::get(variant_tup)))...); + std::forward(std::get(variant_tup)))...); } template @@ -969,11 +970,11 @@ union Union { template explicit constexpr Union(EmplaceTag<0>, P&&... args) - : head(absl::forward

(args)...) {} + : head(std::forward

(args)...) {} template explicit constexpr Union(EmplaceTag, P&&... args) - : tail(EmplaceTag{}, absl::forward

(args)...) {} + : tail(EmplaceTag{}, std::forward

(args)...) {} Head head; TailUnion tail; @@ -1001,11 +1002,11 @@ union DestructibleUnionImpl { template explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args) - : head(absl::forward

(args)...) {} + : head(std::forward

(args)...) {} template explicit constexpr DestructibleUnionImpl(EmplaceTag, P&&... args) - : tail(EmplaceTag{}, absl::forward

(args)...) {} + : tail(EmplaceTag{}, std::forward

(args)...) {} ~DestructibleUnionImpl() {} @@ -1036,7 +1037,7 @@ class VariantStateBase { template explicit constexpr VariantStateBase(EmplaceTag tag, P&&... args) - : state_(tag, absl::forward

(args)...), index_(I) {} + : state_(tag, std::forward

(args)...), index_(I) {} explicit constexpr VariantStateBase(NoopConstructorTag) : state_(NoopConstructorTag()), index_(variant_npos) {} @@ -1321,7 +1322,7 @@ class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor { using Alternative = typename absl::variant_alternative>::type; ::new (static_cast(&self->state_)) Alternative( - variant_internal::AccessUnion(absl::move(other->state_), i)); + variant_internal::AccessUnion(std::move(other->state_), i)); } void operator()(SizeT /*i*/) const {} diff --git a/absl/types/optional.h b/absl/types/optional.h index 395fe62f..cf7249cb 100644 --- a/absl/types/optional.h +++ b/absl/types/optional.h @@ -151,7 +151,7 @@ class optional : private optional_internal::optional_data, std::is_same, std::is_constructible >::value>* = nullptr> constexpr explicit optional(InPlaceT, Args&&... args) - : data_base(in_place_t(), absl::forward(args)...) {} + : data_base(in_place_t(), std::forward(args)...) {} // Constructs a non-empty `optional` direct-initialized value of type `T` from // the arguments of an initializer_list and `std::forward(args)...`. @@ -162,8 +162,7 @@ class optional : private optional_internal::optional_data, T, std::initializer_list&, Args&&...>::value>::type> constexpr explicit optional(in_place_t, std::initializer_list il, Args&&... args) - : data_base(in_place_t(), il, absl::forward(args)...) { - } + : data_base(in_place_t(), il, std::forward(args)...) {} // Value constructor (implicit) template < @@ -176,21 +175,21 @@ class optional : private optional_internal::optional_data, std::is_convertible, std::is_constructible >::value, bool>::type = false> - constexpr optional(U&& v) : data_base(in_place_t(), absl::forward(v)) {} + constexpr optional(U&& v) : data_base(in_place_t(), std::forward(v)) {} // Value constructor (explicit) template < typename U = T, typename std::enable_if< absl::conjunction::type>>, + in_place_t, typename std::decay::type> >, absl::negation, typename std::decay::type>>, - absl::negation>, - std::is_constructible>::value, + optional, typename std::decay::type> >, + absl::negation >, + std::is_constructible >::value, bool>::type = false> explicit constexpr optional(U&& v) - : data_base(in_place_t(), absl::forward(v)) {} + : data_base(in_place_t(), std::forward(v)) {} // Converting copy constructor (implicit) template , return reference(); } constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { - return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference()); + return ABSL_HARDENING_ASSERT(this->engaged_), std::move(reference()); } T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND { ABSL_HARDENING_ASSERT(this->engaged_); @@ -492,7 +491,7 @@ class optional : private optional_internal::optional_data, } constexpr const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11) - return absl::move( + return std::move( static_cast(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference())); @@ -511,9 +510,8 @@ class optional : private optional_internal::optional_data, "optional::value_or: T must be copy constructible"); static_assert(std::is_convertible::value, "optional::value_or: U must be convertible to T"); - return static_cast(*this) - ? **this - : static_cast(absl::forward(v)); + return static_cast(*this) ? **this + : static_cast(std::forward(v)); } template T value_or(U&& v) && { // NOLINT(build/c++11) @@ -573,19 +571,18 @@ void swap(optional& a, optional& b) noexcept(noexcept(a.swap(b))) { // static_assert(opt.value() == 1, ""); template constexpr optional::type> make_optional(T&& v) { - return optional::type>(absl::forward(v)); + return optional::type>(std::forward(v)); } template constexpr optional make_optional(Args&&... args) { - return optional(in_place_t(), absl::forward(args)...); + return optional(in_place_t(), std::forward(args)...); } template constexpr optional make_optional(std::initializer_list il, Args&&... args) { - return optional(in_place_t(), il, - absl::forward(args)...); + return optional(in_place_t(), il, std::forward(args)...); } // Relational operators [optional.relops] diff --git a/absl/types/variant.h b/absl/types/variant.h index ac93464b..56a7e05e 100644 --- a/absl/types/variant.h +++ b/absl/types/variant.h @@ -303,11 +303,10 @@ constexpr T& get(variant& v) { // NOLINT } // Overload for getting a variant's rvalue by type. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template constexpr T&& get(variant&& v) { return variant_internal::VariantCoreAccess::CheckedAccess< - variant_internal::IndexOf::value>(absl::move(v)); + variant_internal::IndexOf::value>(std::move(v)); } // Overload for getting a variant's const lvalue by type. @@ -318,11 +317,10 @@ constexpr const T& get(const variant& v) { } // Overload for getting a variant's const rvalue by type. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template constexpr const T&& get(const variant&& v) { return variant_internal::VariantCoreAccess::CheckedAccess< - variant_internal::IndexOf::value>(absl::move(v)); + variant_internal::IndexOf::value>(std::move(v)); } // Overload for getting a variant's lvalue by index. @@ -333,11 +331,10 @@ constexpr variant_alternative_t>& get( } // Overload for getting a variant's rvalue by index. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template constexpr variant_alternative_t>&& get( variant&& v) { - return variant_internal::VariantCoreAccess::CheckedAccess(absl::move(v)); + return variant_internal::VariantCoreAccess::CheckedAccess(std::move(v)); } // Overload for getting a variant's const lvalue by index. @@ -348,11 +345,10 @@ constexpr const variant_alternative_t>& get( } // Overload for getting a variant's const rvalue by index. -// Note: `absl::move()` is required to allow use of constexpr in C++11. template constexpr const variant_alternative_t>&& get( const variant&& v) { - return variant_internal::VariantCoreAccess::CheckedAccess(absl::move(v)); + return variant_internal::VariantCoreAccess::CheckedAccess(std::move(v)); } // get_if() @@ -432,8 +428,8 @@ variant_internal::VisitResult visit(Visitor&& vis, return variant_internal:: VisitIndices >::value...>::Run( variant_internal::PerformVisitation{ - std::forward_as_tuple(absl::forward(vars)...), - absl::forward(vis)}, + std::forward_as_tuple(std::forward(vars)...), + std::forward(vis)}, vars.index()...); } @@ -504,13 +500,12 @@ class variant : private variant_internal::VariantBase { class T, std::size_t I = std::enable_if< variant_internal::IsNeitherSelfNorInPlace>::value, - variant_internal::IndexOfConstructedType>::type::value, + absl::decay_t >::value, + variant_internal::IndexOfConstructedType >::type::value, class Tj = absl::variant_alternative_t, - absl::enable_if_t::value>* = - nullptr> + absl::enable_if_t::value>* = nullptr> constexpr variant(T&& t) noexcept(std::is_nothrow_constructible::value) - : Base(variant_internal::EmplaceTag(), absl::forward(t)) {} + : Base(variant_internal::EmplaceTag(), std::forward(t)) {} // Constructs a variant of an alternative type from the arguments through // direct-initialization. @@ -524,7 +519,7 @@ class variant : private variant_internal::VariantBase { constexpr explicit variant(in_place_type_t, Args&&... args) : Base(variant_internal::EmplaceTag< variant_internal::UnambiguousIndexOf::value>(), - absl::forward(args)...) {} + std::forward(args)...) {} // Constructs a variant of an alternative type from an initializer list // and other arguments through direct-initialization. @@ -539,7 +534,7 @@ class variant : private variant_internal::VariantBase { Args&&... args) : Base(variant_internal::EmplaceTag< variant_internal::UnambiguousIndexOf::value>(), - il, absl::forward(args)...) {} + il, std::forward(args)...) {} // Constructs a variant of an alternative type from a provided index, // through value-initialization using the provided forwarded arguments. @@ -548,7 +543,7 @@ class variant : private variant_internal::VariantBase { variant_internal::VariantAlternativeSfinaeT, Args...>::value>::type* = nullptr> constexpr explicit variant(in_place_index_t, Args&&... args) - : Base(variant_internal::EmplaceTag(), absl::forward(args)...) {} + : Base(variant_internal::EmplaceTag(), std::forward(args)...) {} // Constructs a variant of an alternative type from a provided index, // through value-initialization of an initializer list and the provided @@ -560,7 +555,7 @@ class variant : private variant_internal::VariantBase { constexpr explicit variant(in_place_index_t, std::initializer_list il, Args&&... args) : Base(variant_internal::EmplaceTag(), il, - absl::forward(args)...) {} + std::forward(args)...) {} // Destructors @@ -595,7 +590,7 @@ class variant : private variant_internal::VariantBase { std::is_nothrow_constructible::value) { variant_internal::VisitIndices::Run( variant_internal::VariantCoreAccess::MakeConversionAssignVisitor( - this, absl::forward(t)), + this, std::forward(t)), index()); return *this; @@ -623,7 +618,7 @@ class variant : private variant_internal::VariantBase { T& emplace(Args&&... args) { return variant_internal::VariantCoreAccess::Replace< variant_internal::UnambiguousIndexOf::value>( - this, absl::forward(args)...); + this, std::forward(args)...); } // Constructs a value of the given alternative type T within the variant using @@ -644,7 +639,7 @@ class variant : private variant_internal::VariantBase { T& emplace(std::initializer_list il, Args&&... args) { return variant_internal::VariantCoreAccess::Replace< variant_internal::UnambiguousIndexOf::value>( - this, il, absl::forward(args)...); + this, il, std::forward(args)...); } // Destroys the current value of the variant (provided that @@ -663,7 +658,7 @@ class variant : private variant_internal::VariantBase { Args...>::value>::type* = nullptr> absl::variant_alternative_t& emplace(Args&&... args) { return variant_internal::VariantCoreAccess::Replace( - this, absl::forward(args)...); + this, std::forward(args)...); } // Destroys the current value of the variant (provided that @@ -681,7 +676,7 @@ class variant : private variant_internal::VariantBase { absl::variant_alternative_t& emplace(std::initializer_list il, Args&&... args) { return variant_internal::VariantCoreAccess::Replace( - this, il, absl::forward(args)...); + this, il, std::forward(args)...); } // variant::valueless_by_exception() diff --git a/absl/types/variant_test.cc b/absl/types/variant_test.cc index 4cd5b7a3..91b142ca 100644 --- a/absl/types/variant_test.cc +++ b/absl/types/variant_test.cc @@ -389,7 +389,7 @@ TEST(VariantTest, TestMoveConstruct) { using V = variant, MoveOnly, MoveOnly>; V v(in_place_index<1>, 10); - V v2 = absl::move(v); + V v2 = std::move(v); EXPECT_EQ(10, absl::get<1>(v2).value); } @@ -1209,60 +1209,60 @@ TEST(VariantTest, GetIndex) { Var v(absl::in_place_index<0>, 0); using LValueGetType = decltype(absl::get<0>(v)); - using RValueGetType = decltype(absl::get<0>(absl::move(v))); + using RValueGetType = decltype(absl::get<0>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<0>(v), 0); - EXPECT_EQ(absl::get<0>(absl::move(v)), 0); + EXPECT_EQ(absl::get<0>(std::move(v)), 0); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<0>(const_v)); - using ConstRValueGetType = decltype(absl::get<0>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<0>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<0>(const_v), 0); - EXPECT_EQ(absl::get<0>(absl::move(const_v)), 0); + EXPECT_EQ(absl::get<0>(std::move(const_v)), 0); } { Var v = std::string("Hello"); using LValueGetType = decltype(absl::get<1>(v)); - using RValueGetType = decltype(absl::get<1>(absl::move(v))); + using RValueGetType = decltype(absl::get<1>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<1>(v), "Hello"); - EXPECT_EQ(absl::get<1>(absl::move(v)), "Hello"); + EXPECT_EQ(absl::get<1>(std::move(v)), "Hello"); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<1>(const_v)); - using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<1>(const_v), "Hello"); - EXPECT_EQ(absl::get<1>(absl::move(const_v)), "Hello"); + EXPECT_EQ(absl::get<1>(std::move(const_v)), "Hello"); } { Var v = 2.0; using LValueGetType = decltype(absl::get<2>(v)); - using RValueGetType = decltype(absl::get<2>(absl::move(v))); + using RValueGetType = decltype(absl::get<2>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<2>(v), 2.); - EXPECT_EQ(absl::get<2>(absl::move(v)), 2.); + EXPECT_EQ(absl::get<2>(std::move(v)), 2.); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<2>(const_v)); - using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<2>(const_v), 2.); - EXPECT_EQ(absl::get<2>(absl::move(const_v)), 2.); + EXPECT_EQ(absl::get<2>(std::move(const_v)), 2.); } { @@ -1270,20 +1270,20 @@ TEST(VariantTest, GetIndex) { v.emplace<3>(1); using LValueGetType = decltype(absl::get<3>(v)); - using RValueGetType = decltype(absl::get<3>(absl::move(v))); + using RValueGetType = decltype(absl::get<3>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<3>(v), 1); - EXPECT_EQ(absl::get<3>(absl::move(v)), 1); + EXPECT_EQ(absl::get<3>(std::move(v)), 1); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<3>(const_v)); - using ConstRValueGetType = decltype(absl::get<3>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<3>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get<3>(const_v), 1); - EXPECT_EQ(absl::get<3>(absl::move(const_v)), 1); // NOLINT + EXPECT_EQ(absl::get<3>(std::move(const_v)), 1); // NOLINT } } @@ -1322,60 +1322,60 @@ TEST(VariantTest, GetType) { Var v = 1; using LValueGetType = decltype(absl::get(v)); - using RValueGetType = decltype(absl::get(absl::move(v))); + using RValueGetType = decltype(absl::get(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(v), 1); - EXPECT_EQ(absl::get(absl::move(v)), 1); + EXPECT_EQ(absl::get(std::move(v)), 1); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get(const_v)); - using ConstRValueGetType = decltype(absl::get(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(const_v), 1); - EXPECT_EQ(absl::get(absl::move(const_v)), 1); + EXPECT_EQ(absl::get(std::move(const_v)), 1); } { Var v = std::string("Hello"); using LValueGetType = decltype(absl::get<1>(v)); - using RValueGetType = decltype(absl::get<1>(absl::move(v))); + using RValueGetType = decltype(absl::get<1>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(v), "Hello"); - EXPECT_EQ(absl::get(absl::move(v)), "Hello"); + EXPECT_EQ(absl::get(std::move(v)), "Hello"); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<1>(const_v)); - using ConstRValueGetType = decltype(absl::get<1>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<1>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(const_v), "Hello"); - EXPECT_EQ(absl::get(absl::move(const_v)), "Hello"); + EXPECT_EQ(absl::get(std::move(const_v)), "Hello"); } { Var v = 2.0; using LValueGetType = decltype(absl::get<2>(v)); - using RValueGetType = decltype(absl::get<2>(absl::move(v))); + using RValueGetType = decltype(absl::get<2>(std::move(v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(v), 2.); - EXPECT_EQ(absl::get(absl::move(v)), 2.); + EXPECT_EQ(absl::get(std::move(v)), 2.); const Var& const_v = v; using ConstLValueGetType = decltype(absl::get<2>(const_v)); - using ConstRValueGetType = decltype(absl::get<2>(absl::move(const_v))); + using ConstRValueGetType = decltype(absl::get<2>(std::move(const_v))); EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); EXPECT_EQ(absl::get(const_v), 2.); - EXPECT_EQ(absl::get(absl::move(const_v)), 2.); + EXPECT_EQ(absl::get(std::move(const_v)), 2.); } } @@ -1825,13 +1825,13 @@ TEST(VariantTest, VisitRValue) { int operator()(std::string&&, std::string&&) const { return 3; } // NOLINT }; EXPECT_FALSE(absl::visit(Visitor{}, v)); - EXPECT_TRUE(absl::visit(Visitor{}, absl::move(v))); + EXPECT_TRUE(absl::visit(Visitor{}, std::move(v))); // Also test the variadic overload. EXPECT_EQ(0, absl::visit(Visitor{}, v, v)); - EXPECT_EQ(1, absl::visit(Visitor{}, v, absl::move(v))); - EXPECT_EQ(2, absl::visit(Visitor{}, absl::move(v), v)); - EXPECT_EQ(3, absl::visit(Visitor{}, absl::move(v), absl::move(v))); + EXPECT_EQ(1, absl::visit(Visitor{}, v, std::move(v))); + EXPECT_EQ(2, absl::visit(Visitor{}, std::move(v), v)); + EXPECT_EQ(3, absl::visit(Visitor{}, std::move(v), std::move(v))); } TEST(VariantTest, VisitRValueVisitor) { @@ -1862,12 +1862,12 @@ TEST(VariantTest, VisitResultTypeDifferent) { (std::is_same::value)); EXPECT_TRUE( (std::is_same::value)); + decltype(absl::visit(visitor, std::move(v)))>::value)); EXPECT_TRUE(( std::is_same::value)); EXPECT_TRUE( (std::is_same::value)); + decltype(absl::visit(Visitor{}, std::move(v)))>::value)); } TEST(VariantTest, VisitVariadic) { @@ -2225,7 +2225,7 @@ TEST(VariantTest, TestMoveSemantics) { EXPECT_TRUE(absl::holds_alternative>(v)); // Construct a variant by moving from another variant. - Variant v2(absl::move(v)); + Variant v2(std::move(v)); ASSERT_TRUE(absl::holds_alternative>(v2)); ASSERT_NE(nullptr, absl::get>(v2)); EXPECT_EQ(10, *absl::get>(v2)); @@ -2242,7 +2242,7 @@ TEST(VariantTest, TestMoveSemantics) { EXPECT_EQ("foo", *absl::get>(v)); // Move-assign a variant. - v2 = absl::move(v); + v2 = std::move(v); ASSERT_TRUE(absl::holds_alternative>(v2)); EXPECT_EQ("foo", *absl::get>(v2)); EXPECT_TRUE(absl::holds_alternative>(v)); @@ -2568,7 +2568,7 @@ TEST(VariantTest, TestVectorOfMoveonlyVariant) { vec.push_back(absl::make_unique(42)); vec.emplace_back("Hello"); vec.reserve(3); - auto another_vec = absl::move(vec); + auto another_vec = std::move(vec); // As a sanity check, verify vector contents. ASSERT_EQ(2u, another_vec.size()); EXPECT_EQ(42, *absl::get>(another_vec[0])); -- cgit v1.2.3 From 6ab5b0aad86dc08d257f6b567611c231c6b8ac31 Mon Sep 17 00:00:00 2001 From: Vitaly Goldshteyn Date: Mon, 20 May 2024 11:57:11 -0700 Subject: Move `prepare_insert` out of the line as type erased `PrepareInsertNonSoo`. This significantly reduces binary size of big binaries and creates a single hot function instead of many cold. That is decreasing cash misses during code execution. We also avoid size related computations for tables with no deleted slots, when resize is necessary. PiperOrigin-RevId: 635527119 Change-Id: I763b135f1f6089051e62e348a07b33536af265ab --- absl/container/internal/compressed_tuple_test.cc | 28 +++ absl/container/internal/raw_hash_set.cc | 168 +++++++++++++++- absl/container/internal/raw_hash_set.h | 244 ++++++++--------------- absl/container/internal/raw_hash_set_test.cc | 16 ++ 4 files changed, 295 insertions(+), 161 deletions(-) (limited to 'absl/container/internal/compressed_tuple_test.cc') diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index 49818fb8..3cd9e18b 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -15,8 +15,11 @@ #include "absl/container/internal/compressed_tuple.h" #include +#include #include +#include #include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -55,6 +58,7 @@ namespace { using absl::test_internal::CopyableMovableInstance; using absl::test_internal::InstanceTracker; +using ::testing::Each; TEST(CompressedTupleTest, Sizeof) { EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); @@ -71,6 +75,30 @@ TEST(CompressedTupleTest, Sizeof) { sizeof(CompressedTuple, NotEmpty, Empty<1>>)); } +TEST(CompressedTupleTest, PointerToEmpty) { + auto to_void_ptrs = [](const auto&... objs) { + return std::vector{static_cast(&objs)...}; + }; + { + using Tuple = CompressedTuple>; + EXPECT_EQ(sizeof(int), sizeof(Tuple)); + Tuple t; + EXPECT_THAT(to_void_ptrs(t.get<1>()), Each(&t)); + } + { + using Tuple = CompressedTuple, Empty<1>>; + EXPECT_EQ(sizeof(int), sizeof(Tuple)); + Tuple t; + EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>()), Each(&t)); + } + { + using Tuple = CompressedTuple, Empty<1>, Empty<2>>; + EXPECT_EQ(sizeof(int), sizeof(Tuple)); + Tuple t; + EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>(), t.get<3>()), Each(&t)); + } +} + TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) { InstanceTracker tracker; CompressedTuple x1(CopyableMovableInstance(1)); diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index c23c1f3b..9d399a1b 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -23,7 +23,9 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" +#include "absl/base/optimization.h" #include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hashtablez_sampler.h" #include "absl/hash/hash.h" namespace absl { @@ -157,6 +159,8 @@ FindInfo find_first_non_full_outofline(const CommonFields& common, return find_first_non_full(common, hash); } +namespace { + // Returns the address of the slot just after slot assuming each slot has the // specified size. static inline void* NextSlot(void* slot, size_t slot_size) { @@ -169,8 +173,22 @@ static inline void* PrevSlot(void* slot, size_t slot_size) { return reinterpret_cast(reinterpret_cast(slot) - slot_size); } -void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn, - const PolicyFunctions& policy, void* tmp_space) { +// Finds guaranteed to exists empty slot from the given position. +// NOTE: this function is almost never triggered inside of the +// DropDeletesWithoutResize, so we keep it simple. +// The table is rather sparse, so empty slot will be found very quickly. +size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) { + for (size_t i = start; i < end; ++i) { + if (IsEmpty(ctrl[i])) { + return i; + } + } + assert(false && "no empty slot"); + return ~size_t{}; +} + +void DropDeletesWithoutResize(CommonFields& common, + const PolicyFunctions& policy) { void* set = &common; void* slot_array = common.slot_array(); const size_t capacity = common.capacity(); @@ -194,15 +212,26 @@ void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn, // repeat procedure for current slot with moved from element (target) ctrl_t* ctrl = common.control(); ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity); + const void* hash_fn = policy.hash_fn(common); auto hasher = policy.hash_slot; auto transfer = policy.transfer; const size_t slot_size = policy.slot_size; size_t total_probe_length = 0; void* slot_ptr = SlotAddress(slot_array, 0, slot_size); + + // The index of an empty slot that can be used as temporary memory for + // the swap operation. + constexpr size_t kUnknownId = ~size_t{}; + size_t tmp_space_id = kUnknownId; + for (size_t i = 0; i != capacity; ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) { assert(slot_ptr == SlotAddress(slot_array, i, slot_size)); + if (IsEmpty(ctrl[i])) { + tmp_space_id = i; + continue; + } if (!IsDeleted(ctrl[i])) continue; const size_t hash = (*hasher)(hash_fn, slot_ptr); const FindInfo target = find_first_non_full(common, hash); @@ -231,16 +260,26 @@ void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn, SetCtrl(common, new_i, H2(hash), slot_size); (*transfer)(set, new_slot_ptr, slot_ptr); SetCtrl(common, i, ctrl_t::kEmpty, slot_size); + // Initialize or change empty space id. + tmp_space_id = i; } else { assert(IsDeleted(ctrl[new_i])); SetCtrl(common, new_i, H2(hash), slot_size); // Until we are done rehashing, DELETED marks previously FULL slots. + if (tmp_space_id == kUnknownId) { + tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl); + } + void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size); + SanitizerUnpoisonMemoryRegion(tmp_space, slot_size); + // Swap i and new_i elements. (*transfer)(set, tmp_space, new_slot_ptr); (*transfer)(set, new_slot_ptr, slot_ptr); (*transfer)(set, slot_ptr, tmp_space); + SanitizerPoisonMemoryRegion(tmp_space, slot_size); + // repeat the processing of the ith slot --i; slot_ptr = PrevSlot(slot_ptr, slot_size); @@ -267,6 +306,8 @@ static bool WasNeverFull(CommonFields& c, size_t index) { Group::kWidth; } +} // namespace + void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) { assert(IsFull(c.control()[index]) && "erasing a dangling iterator"); c.decrement_size(); @@ -424,6 +465,129 @@ void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c, PoisonSingleGroupEmptySlots(c, slot_size); } +namespace { + +// Called whenever the table needs to vacate empty slots either by removing +// tombstones via rehash or growth. +ABSL_ATTRIBUTE_NOINLINE +FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash, + const PolicyFunctions& policy) { + const size_t cap = common.capacity(); + if (cap > Group::kWidth && + // Do these calculations in 64-bit to avoid overflow. + common.size() * uint64_t{32} <= cap * uint64_t{25}) { + // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity. + // Rationale for such a high factor: 1) DropDeletesWithoutResize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity) vs. this code (which rehashes in place + // if we can recover 3/32*capacity). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 + DropDeletesWithoutResize(common, policy); + } else { + // Otherwise grow the container. + policy.resize(common, NextCapacity(cap), HashtablezInfoHandle{}); + } + // This function is typically called with tables containing deleted slots. + // The table will be big and `FindFirstNonFullAfterResize` will always + // fallback to `find_first_non_full`. So using `find_first_non_full` directly. + return find_first_non_full(common, hash); +} + +} // namespace + +const void* GetHashRefForEmptyHasher(const CommonFields& common) { + // Empty base optimization typically make the empty base class address to be + // the same as the first address of the derived class object. + // But we generally assume that for empty hasher we can return any valid + // pointer. + return &common; +} + +size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target, + const PolicyFunctions& policy) { + // When there are no deleted slots in the table + // and growth_left is positive, we can insert at the first + // empty slot in the probe sequence (target). + const bool use_target_hint = + // Optimization is disabled when generations are enabled. + // We have to rehash even sparse tables randomly in such mode. + !SwisstableGenerationsEnabled() && + common.growth_info().HasNoDeletedAndGrowthLeft(); + if (ABSL_PREDICT_FALSE(!use_target_hint)) { + // Notes about optimized mode when generations are disabled: + // We do not enter this branch if table has no deleted slots + // and growth_left is positive. + // We enter this branch in the following cases listed in decreasing + // frequency: + // 1. Table without deleted slots (>95% cases) that needs to be resized. + // 2. Table with deleted slots that has space for the inserting element. + // 3. Table with deleted slots that needs to be rehashed or resized. + if (ABSL_PREDICT_TRUE(common.growth_info().HasNoGrowthLeftAndNoDeleted())) { + const size_t old_capacity = common.capacity(); + policy.resize(common, NextCapacity(old_capacity), HashtablezInfoHandle{}); + target = HashSetResizeHelper::FindFirstNonFullAfterResize( + common, old_capacity, hash); + } else { + // Note: the table may have no deleted slots here when generations + // are enabled. + const bool rehash_for_bug_detection = + common.should_rehash_for_bug_detection_on_insert(); + if (rehash_for_bug_detection) { + // Move to a different heap allocation in order to detect bugs. + const size_t cap = common.capacity(); + policy.resize(common, + common.growth_left() > 0 ? cap : NextCapacity(cap), + HashtablezInfoHandle{}); + } + if (ABSL_PREDICT_TRUE(common.growth_left() > 0)) { + target = find_first_non_full(common, hash); + } else { + target = FindInsertPositionWithGrowthOrRehash(common, hash, policy); + } + } + } + PrepareInsertCommon(common); + common.growth_info().OverwriteControlAsFull(common.control()[target.offset]); + SetCtrl(common, target.offset, H2(hash), policy.slot_size); + common.infoz().RecordInsert(hash, target.probe_length); + return target.offset; +} + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 1d2e2d14..a64133a7 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -1126,6 +1126,13 @@ class GrowthInfo { return static_cast>(growth_left_info_) > 0; } + // Returns true if the table satisfies two properties: + // 1. Guaranteed to have no kDeleted slots. + // 2. There is no growth left. + bool HasNoGrowthLeftAndNoDeleted() const { + return growth_left_info_ == 0; + } + // Returns true if table guaranteed to have no k bool HasNoDeleted() const { return static_cast>(growth_left_info_) >= 0; @@ -1374,6 +1381,8 @@ class CommonFields : public CommonFieldsGenerationInfo { // This is stored in the heap allocation before the control bytes. // TODO(b/289225379): experiment with moving growth_info back inline to // increase room for SOO. + size_t growth_left() const { return growth_info().GetGrowthLeft(); } + GrowthInfo& growth_info() { auto* gl_ptr = reinterpret_cast(control()) - 1; assert(reinterpret_cast(gl_ptr) % alignof(GrowthInfo) == 0); @@ -1933,8 +1942,7 @@ class HashSetResizeHelper { // will be no performance benefit. // It has implicit assumption that `resize` will call // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`. - // Falls back to `find_first_non_full` in case of big groups, so it is - // safe to use after `rehash_and_grow_if_necessary`. + // Falls back to `find_first_non_full` in case of big groups. static FindInfo FindFirstNonFullAfterResize(const CommonFields& c, size_t old_capacity, size_t hash) { @@ -2216,14 +2224,22 @@ size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size, struct PolicyFunctions { size_t slot_size; + // Returns the pointer to the hash function stored in the set. + const void* (*hash_fn)(const CommonFields& common); + // Returns the hash of the pointed-to slot. size_t (*hash_slot)(const void* hash_fn, void* slot); - // Transfer the contents of src_slot to dst_slot. + // Transfers the contents of src_slot to dst_slot. void (*transfer)(void* set, void* dst_slot, void* src_slot); - // Deallocate the backing store from common. + // Deallocates the backing store from common. void (*dealloc)(CommonFields& common, const PolicyFunctions& policy); + + // Resizes set to the new capacity. + // Arguments are used as in raw_hash_set::resize_impl. + void (*resize)(CommonFields& common, size_t new_capacity, + HashtablezInfoHandle forced_infoz); }; // ClearBackingArray clears the backing array, either modifying it in place, @@ -2261,9 +2277,26 @@ ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) { memcpy(dst, src, SizeOfSlot); } -// Type-erased version of raw_hash_set::drop_deletes_without_resize. -void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn, - const PolicyFunctions& policy, void* tmp_space); +// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case. +const void* GetHashRefForEmptyHasher(const CommonFields& common); + +// Given the hash of a value not currently in the table and the first empty +// slot in the probe sequence, finds a viable slot index to insert it at. +// +// In case there's no space left, the table can be resized or rehashed +// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash). +// +// In the case of absence of deleted slots and positive growth_left, the element +// can be inserted in the provided `target` position. +// +// When the table has deleted slots (according to GrowthInfo), the target +// position will be searched one more time using `find_first_non_full`. +// +// REQUIRES: Table is not SOO. +// REQUIRES: At least one non-full slot available. +// REQUIRES: `target` is a valid empty position to insert. +size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target, + const PolicyFunctions& policy); // A SwissTable. // @@ -3533,7 +3566,7 @@ class raw_hash_set { // common(), old_capacity, hash) // can be called right after `resize`. void resize(size_t new_capacity) { - resize_impl(new_capacity, HashtablezInfoHandle{}); + raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{}); } // As above, except that we also accept a pre-sampled, forced infoz for @@ -3541,19 +3574,24 @@ class raw_hash_set { // store the infoz. void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) { assert(forced_infoz.IsSampled()); - resize_impl(NextCapacity(SooCapacity()), forced_infoz); + raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()), + forced_infoz); } - ABSL_ATTRIBUTE_NOINLINE void resize_impl( - size_t new_capacity, HashtablezInfoHandle forced_infoz) { + // Resizes set to the new capacity. + // It is a static function in order to use its pointer in GetPolicyFunctions. + ABSL_ATTRIBUTE_NOINLINE static void resize_impl( + CommonFields& common, size_t new_capacity, + HashtablezInfoHandle forced_infoz) { + raw_hash_set* set = reinterpret_cast(&common); assert(IsValidCapacity(new_capacity)); - assert(!fits_in_soo(new_capacity)); - const bool was_soo = is_soo(); - const bool had_soo_slot = was_soo && !empty(); + assert(!set->fits_in_soo(new_capacity)); + const bool was_soo = set->is_soo(); + const bool had_soo_slot = was_soo && !set->empty(); const ctrl_t soo_slot_h2 = - had_soo_slot ? static_cast(H2(hash_of(soo_slot()))) + had_soo_slot ? static_cast(H2(set->hash_of(set->soo_slot()))) : ctrl_t::kEmpty; - HashSetResizeHelper resize_helper(common(), was_soo, had_soo_slot, + HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot, forced_infoz); // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in // HashSetResizeHelper constructor because it can't transfer slots when @@ -3561,11 +3599,12 @@ class raw_hash_set { // TODO(b/289225379): try to handle more of the SOO cases inside // InitializeSlots. See comment on cl/555990034 snapshot #63. if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) { - resize_helper.old_heap_or_soo() = common().heap_or_soo(); + resize_helper.old_heap_or_soo() = common.heap_or_soo(); } else { - transfer(to_slot(resize_helper.old_soo_data()), soo_slot()); + set->transfer(set->to_slot(resize_helper.old_soo_data()), + set->soo_slot()); } - common().set_capacity(new_capacity); + common.set_capacity(new_capacity); // Note that `InitializeSlots` does different number initialization steps // depending on the values of `transfer_uses_memcpy` and capacities. // Refer to the comment in `InitializeSlots` for more details. @@ -3573,7 +3612,7 @@ class raw_hash_set { resize_helper.InitializeSlots( - common(), CharAlloc(alloc_ref()), soo_slot_h2, sizeof(key_type), + common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type), sizeof(value_type)); // In the SooEnabled() case, capacity is never 0 so we don't check. @@ -3585,30 +3624,30 @@ class raw_hash_set { // Nothing more to do in this case. if (was_soo && !had_soo_slot) return; - slot_type* new_slots = slot_array(); + slot_type* new_slots = set->slot_array(); if (grow_single_group) { if (PolicyTraits::transfer_uses_memcpy()) { // InitializeSlots did all the work. return; } if (was_soo) { - transfer(new_slots + resize_helper.SooSlotIndex(), - to_slot(resize_helper.old_soo_data())); + set->transfer(new_slots + resize_helper.SooSlotIndex(), + to_slot(resize_helper.old_soo_data())); return; } else { // We want GrowSizeIntoSingleGroup to be called here in order to make // InitializeSlots not depend on PolicyTraits. - resize_helper.GrowSizeIntoSingleGroup(common(), - alloc_ref()); + resize_helper.GrowSizeIntoSingleGroup(common, + set->alloc_ref()); } } else { // InitializeSlots prepares control bytes to correspond to empty table. const auto insert_slot = [&](slot_type* slot) { - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()}, PolicyTraits::element(slot)); - auto target = find_first_non_full(common(), hash); - SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); - transfer(new_slots + target.offset, slot); + auto target = find_first_non_full(common, hash); + SetCtrl(common, target.offset, H2(hash), sizeof(slot_type)); + set->transfer(new_slots + target.offset, slot); return target.probe_length; }; if (was_soo) { @@ -3623,80 +3662,13 @@ class raw_hash_set { total_probe_length += insert_slot(old_slots + i); } } - infoz().RecordRehash(total_probe_length); + common.infoz().RecordRehash(total_probe_length); } } - resize_helper.DeallocateOld(CharAlloc(alloc_ref()), + resize_helper.DeallocateOld(CharAlloc(set->alloc_ref()), sizeof(slot_type)); } - // Prunes control bytes to remove as many tombstones as possible. - // - // See the comment on `rehash_and_grow_if_necessary()`. - inline void drop_deletes_without_resize() { - // Stack-allocate space for swapping elements. - alignas(slot_type) unsigned char tmp[sizeof(slot_type)]; - DropDeletesWithoutResize(common(), &hash_ref(), GetPolicyFunctions(), tmp); - } - - // Called whenever the table *might* need to conditionally grow. - // - // This function is an optimization opportunity to perform a rehash even when - // growth is unnecessary, because vacating tombstones is beneficial for - // performance in the long-run. - void rehash_and_grow_if_necessary() { - const size_t cap = capacity(); - if (cap > Group::kWidth && - // Do these calculations in 64-bit to avoid overflow. - size() * uint64_t{32} <= cap * uint64_t{25}) { - // Squash DELETED without growing if there is enough capacity. - // - // Rehash in place if the current size is <= 25/32 of capacity. - // Rationale for such a high factor: 1) drop_deletes_without_resize() is - // faster than resize, and 2) it takes quite a bit of work to add - // tombstones. In the worst case, seems to take approximately 4 - // insert/erase pairs to create a single tombstone and so if we are - // rehashing because of tombstones, we can afford to rehash-in-place as - // long as we are reclaiming at least 1/8 the capacity without doing more - // than 2X the work. (Where "work" is defined to be size() for rehashing - // or rehashing in place, and 1 for an insert or erase.) But rehashing in - // place is faster per operation than inserting or even doubling the size - // of the table, so we actually afford to reclaim even less space from a - // resize-in-place. The decision is to rehash in place if we can reclaim - // at about 1/8th of the usable capacity (specifically 3/28 of the - // capacity) which means that the total cost of rehashing will be a small - // fraction of the total work. - // - // Here is output of an experiment using the BM_CacheInSteadyState - // benchmark running the old case (where we rehash-in-place only if we can - // reclaim at least 7/16*capacity) vs. this code (which rehashes in place - // if we can recover 3/32*capacity). - // - // Note that although in the worst-case number of rehashes jumped up from - // 15 to 190, but the number of operations per second is almost the same. - // - // Abridged output of running BM_CacheInSteadyState benchmark from - // raw_hash_set_benchmark. N is the number of insert/erase operations. - // - // | OLD (recover >= 7/16 | NEW (recover >= 3/32) - // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes - // 448 | 145284 0.44 18 | 140118 0.44 19 - // 493 | 152546 0.24 11 | 151417 0.48 28 - // 538 | 151439 0.26 11 | 151152 0.53 38 - // 583 | 151765 0.28 11 | 150572 0.57 50 - // 628 | 150241 0.31 11 | 150853 0.61 66 - // 672 | 149602 0.33 12 | 150110 0.66 90 - // 717 | 149998 0.35 12 | 149531 0.70 129 - // 762 | 149836 0.37 13 | 148559 0.74 190 - // 807 | 149736 0.39 14 | 151107 0.39 14 - // 852 | 150204 0.42 15 | 151019 0.42 15 - drop_deletes_without_resize(); - } else { - // Otherwise grow the container. - resize(NextCapacity(cap)); - } - } - // Casting directly from e.g. char* to slot_type* can cause compilation errors // on objective-C. This function converts to void* first, avoiding the issue. static slot_type* to_slot(void* buf) { @@ -3817,6 +3789,7 @@ class raw_hash_set { template std::pair find_or_prepare_insert_non_soo(const K& key) { + assert(!is_soo()); prefetch_heap_block(); auto hash = hash_ref()(key); auto seq = probe(common(), hash); @@ -3833,9 +3806,10 @@ class raw_hash_set { if (ABSL_PREDICT_TRUE(mask_empty)) { size_t target = seq.offset( GetInsertionOffset(mask_empty, capacity(), hash, control())); - return { - iterator_at(prepare_insert(hash, FindInfo{target, seq.index()})), - true}; + return {iterator_at(PrepareInsertNonSoo(common(), hash, + FindInfo{target, seq.index()}, + GetPolicyFunctions())), + true}; } seq.next(); assert(seq.index() <= capacity() && "full table!"); @@ -3852,63 +3826,6 @@ class raw_hash_set { return find_or_prepare_insert_non_soo(key); } - // Given the hash of a value not currently in the table and the first empty - // slot in the probe sequence, finds the viable slot index to insert it at. - // - // In case there's no space left, the table can be resized or rehashed - // (see rehash_and_grow_if_necessary). - // - // In case of absence of deleted slots and positive growth_left, element can - // be inserted in the provided `target` position. - // - // When table has deleted slots (according to GrowthInfo), target position - // will be searched one more time using `find_first_non_full`. - // - // REQUIRES: At least one non-full slot available. - // REQUIRES: `target` is a valid empty position to insert. - size_t prepare_insert(size_t hash, FindInfo target) ABSL_ATTRIBUTE_NOINLINE { - assert(!is_soo()); - // When there are no deleted slots in the table - // and growth_left is positive, we can insert at the first - // empty slot in the probe sequence (target). - bool use_target_hint = false; - // Optimization is disabled on enabled generations. - // We have to rehash even sparse tables randomly in such mode. -#ifndef ABSL_SWISSTABLE_ENABLE_GENERATIONS - use_target_hint = growth_info().HasNoDeletedAndGrowthLeft(); -#endif - if (ABSL_PREDICT_FALSE(!use_target_hint)) { - const bool rehash_for_bug_detection = - common().should_rehash_for_bug_detection_on_insert(); - if (rehash_for_bug_detection) { - // Move to a different heap allocation in order to detect bugs. - const size_t cap = capacity(); - resize(growth_left() > 0 ? cap : NextCapacity(cap)); - } - if (!rehash_for_bug_detection && - ABSL_PREDICT_FALSE(growth_left() == 0)) { - const size_t old_capacity = capacity(); - rehash_and_grow_if_necessary(); - // NOTE: It is safe to use `FindFirstNonFullAfterResize` after - // `rehash_and_grow_if_necessary`, whether capacity changes or not. - // `rehash_and_grow_if_necessary` may *not* call `resize` - // and perform `drop_deletes_without_resize` instead. But this - // could happen only on big tables and will not change capacity. - // For big tables `FindFirstNonFullAfterResize` will always - // fallback to normal `find_first_non_full`. - target = HashSetResizeHelper::FindFirstNonFullAfterResize( - common(), old_capacity, hash); - } else { - target = find_first_non_full(common(), hash); - } - } - PrepareInsertCommon(common()); - growth_info().OverwriteControlAsFull(control()[target.offset]); - SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); - infoz().RecordInsert(hash, target.probe_length); - return target.offset; - } - // Constructs the value in the space pointed by the iterator. This only works // after an unsuccessful find_or_prepare_insert() and before any other // modifications happen in the raw_hash_set. @@ -3949,7 +3866,7 @@ class raw_hash_set { // See `CapacityToGrowth()`. size_t growth_left() const { assert(!is_soo()); - return growth_info().GetGrowthLeft(); + return common().growth_left(); } GrowthInfo& growth_info() { @@ -4009,6 +3926,10 @@ class raw_hash_set { return settings_.template get<3>(); } + static const void* get_hash_ref_fn(const CommonFields& common) { + auto* h = reinterpret_cast(&common); + return &h->hash_ref(); + } static void transfer_slot_fn(void* set, void* dst, void* src) { auto* h = static_cast(set); h->transfer(static_cast(dst), static_cast(src)); @@ -4030,6 +3951,10 @@ class raw_hash_set { static const PolicyFunctions& GetPolicyFunctions() { static constexpr PolicyFunctions value = { sizeof(slot_type), + // TODO(b/328722020): try to type erase + // for standard layout and alignof(Hash) <= alignof(CommonFields). + std::is_empty::value ? &GetHashRefForEmptyHasher + : &raw_hash_set::get_hash_ref_fn, PolicyTraits::template get_hash_slot_fn(), PolicyTraits::transfer_uses_memcpy() ? TransferRelocatable @@ -4037,6 +3962,7 @@ class raw_hash_set { (std::is_same>::value ? &DeallocateStandard : &raw_hash_set::dealloc_fn), + &raw_hash_set::resize_impl, }; return value; } diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index c4e05d60..10f793ef 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -126,6 +126,22 @@ TEST(GrowthInfoTest, HasNoDeletedAndGrowthLeft) { EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft()); } +TEST(GrowthInfoTest, HasNoGrowthLeftAndNoDeleted) { + GrowthInfo gi; + gi.InitGrowthLeftNoDeleted(1); + EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted()); + gi.OverwriteEmptyAsFull(); + EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted()); + gi.OverwriteFullAsDeleted(); + EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted()); + gi.OverwriteFullAsEmpty(); + EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted()); + gi.InitGrowthLeftNoDeleted(0); + EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted()); + gi.OverwriteFullAsEmpty(); + EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted()); +} + TEST(GrowthInfoTest, OverwriteFullAsEmpty) { GrowthInfo gi; gi.InitGrowthLeftNoDeleted(5); -- cgit v1.2.3 From f36d33317ce3ca0a2212ffd264a26fd18e57a509 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Mon, 1 Jul 2024 23:23:14 -0700 Subject: Make mutable CompressedTuple::get() constexpr. This change makes the mutable overloads of CompressedTuple::get() constexpr. This is consistent with std::get(std::tuple), which is constexpr since C++14. PiperOrigin-RevId: 648603141 Change-Id: Icbd61809f7a06723cf581dbed5488b7bae998cc9 --- absl/container/internal/compressed_tuple.h | 12 +++---- absl/container/internal/compressed_tuple_test.cc | 42 ++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 9 deletions(-) (limited to 'absl/container/internal/compressed_tuple_test.cc') diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index f05a1fdc..6db0468d 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h @@ -89,9 +89,9 @@ struct Storage { explicit constexpr Storage(absl::in_place_t, V&& v) : value(std::forward(v)) {} constexpr const T& get() const& { return value; } - T& get() & { return value; } + constexpr T& get() & { return value; } constexpr const T&& get() const&& { return std::move(*this).value; } - T&& get() && { return std::move(*this).value; } + constexpr T&& get() && { return std::move(*this).value; } }; template @@ -102,9 +102,9 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward(v)) {} constexpr const T& get() const& { return *this; } - T& get() & { return *this; } + constexpr T& get() & { return *this; } constexpr const T&& get() const&& { return std::move(*this); } - T&& get() && { return std::move(*this); } + constexpr T&& get() && { return std::move(*this); } }; template @@ -237,7 +237,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple std::forward(base)...) {} template - ElemT& get() & { + constexpr ElemT& get() & { return StorageT::get(); } @@ -247,7 +247,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple } template - ElemT&& get() && { + constexpr ElemT&& get() && { return std::move(*this).StorageT::get(); } diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index 3cd9e18b..c3edf542 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -31,14 +31,22 @@ // These are declared at global scope purely so that error messages // are smaller and easier to understand. -enum class CallType { kConstRef, kConstMove }; +enum class CallType { kMutableRef, kConstRef, kMutableMove, kConstMove }; template struct Empty { + constexpr CallType value() & { return CallType::kMutableRef; } constexpr CallType value() const& { return CallType::kConstRef; } + constexpr CallType value() && { return CallType::kMutableMove; } constexpr CallType value() const&& { return CallType::kConstMove; } }; +// Unconditionally return an lvalue reference to `t`. +template +constexpr T& AsLValue(T&& t) { + return t; +} + template struct NotEmpty { T value; @@ -375,8 +383,24 @@ TEST(CompressedTupleTest, Constexpr) { constexpr int value() const { return v; } int v; }; - constexpr CompressedTuple, Empty<0>> x( - 7, 1.25, CompressedTuple(5), {}); + + using Tuple = CompressedTuple, Empty<0>>; + + constexpr int r0 = + AsLValue(Tuple(1, 0.75, CompressedTuple(9), {})).get<0>(); + constexpr double r1 = + AsLValue(Tuple(1, 0.75, CompressedTuple(9), {})).get<1>(); + constexpr int r2 = + AsLValue(Tuple(1, 0.75, CompressedTuple(9), {})).get<2>().get<0>(); + constexpr CallType r3 = + AsLValue(Tuple(1, 0.75, CompressedTuple(9), {})).get<3>().value(); + + EXPECT_EQ(r0, 1); + EXPECT_EQ(r1, 0.75); + EXPECT_EQ(r2, 9); + EXPECT_EQ(r3, CallType::kMutableRef); + + constexpr Tuple x(7, 1.25, CompressedTuple(5), {}); constexpr int x0 = x.get<0>(); constexpr double x1 = x.get<1>(); constexpr int x2 = x.get<2>().get<0>(); @@ -387,6 +411,18 @@ TEST(CompressedTupleTest, Constexpr) { EXPECT_EQ(x2, 5); EXPECT_EQ(x3, CallType::kConstRef); + constexpr int m0 = Tuple(5, 0.25, CompressedTuple(3), {}).get<0>(); + constexpr double m1 = Tuple(5, 0.25, CompressedTuple(3), {}).get<1>(); + constexpr int m2 = + Tuple(5, 0.25, CompressedTuple(3), {}).get<2>().get<0>(); + constexpr CallType m3 = + Tuple(5, 0.25, CompressedTuple(3), {}).get<3>().value(); + + EXPECT_EQ(m0, 5); + EXPECT_EQ(m1, 0.25); + EXPECT_EQ(m2, 3); + EXPECT_EQ(m3, CallType::kMutableMove); + constexpr CompressedTuple, TrivialStruct, int> trivial = {}; constexpr CallType trivial0 = trivial.get<0>().value(); constexpr int trivial1 = trivial.get<1>().value(); -- cgit v1.2.3