aboutsummaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
authorArthur O'Dwyer <arthur.j.odwyer@gmail.com>2024-02-21 09:17:54 -0800
committerCopybara-Service <copybara-worker@google.com>2024-02-21 09:18:57 -0800
commit92c8575d24cb5c27718e1cd1ed21a9847351a11b (patch)
tree3720389d85a5175ca9a42f1b3354118207e9302d /absl
parentb0f85e2355b173d3f89dee29a7f817b52f8e72a2 (diff)
downloadabseil-92c8575d24cb5c27718e1cd1ed21a9847351a11b.tar.gz
abseil-92c8575d24cb5c27718e1cd1ed21a9847351a11b.tar.bz2
abseil-92c8575d24cb5c27718e1cd1ed21a9847351a11b.zip
PR #1618: inlined_vector: Use trivial relocation for `SwapInlinedElements`
Imported from GitHub PR https://github.com/abseil/abseil-cpp/pull/1618 I noticed while working on #1615 that `inlined_vector` could use the trivial relocatability trait here, too. Here the memcpy codepath already exists; we just have to opt in to using it. Merge 567a1dd9b6b3352f649e900b24834b59e39cfa14 into a7012a5bfcf26a41b9dd32d4c429004773503dd6 Merging this change closes #1618 COPYBARA_INTEGRATE_REVIEW=https://github.com/abseil/abseil-cpp/pull/1618 from Quuxplusone:trivial-swap 567a1dd9b6b3352f649e900b24834b59e39cfa14 PiperOrigin-RevId: 609019296 Change-Id: I4055ab790245752179e405b490fcd479e7389726
Diffstat (limited to 'absl')
-rw-r--r--absl/container/inlined_vector_test.cc33
-rw-r--r--absl/container/internal/inlined_vector.h31
2 files changed, 47 insertions, 17 deletions
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 241389ae..5ecf88a9 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -304,6 +304,35 @@ TEST(UniquePtr, MoveAssign) {
}
}
+// Swapping containers of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+// TODO(absl-team): Using unique_ptr here is technically correct, but
+// a trivially relocatable struct would be less semantically confusing.
+TEST(UniquePtr, Swap) {
+ for (size_t size1 = 0; size1 < 5; ++size1) {
+ for (size_t size2 = 0; size2 < 5; ++size2) {
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
+ for (size_t i = 0; i < size1; ++i) {
+ a.push_back(std::make_unique<size_t>(i + 10));
+ }
+ for (size_t i = 0; i < size2; ++i) {
+ b.push_back(std::make_unique<size_t>(i + 20));
+ }
+ a.swap(b);
+ ASSERT_THAT(a, SizeIs(size2));
+ ASSERT_THAT(b, SizeIs(size1));
+ for (size_t i = 0; i < a.size(); ++i) {
+ ASSERT_THAT(a[i], Pointee(i + 20));
+ }
+ for (size_t i = 0; i < b.size(); ++i) {
+ ASSERT_THAT(b[i], Pointee(i + 10));
+ }
+ }
+ }
+}
+
// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.
@@ -783,7 +812,9 @@ TEST(OverheadTest, Storage) {
// The union should be absorbing some of the allocation bookkeeping overhead
// in the larger vectors, leaving only the size_ field as overhead.
- struct T { void* val; };
+ struct T {
+ void* val;
+ };
size_t expected_overhead = sizeof(T);
EXPECT_EQ((2 * expected_overhead),
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 0eb9c34d..90a74dc7 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -322,14 +322,13 @@ class Storage {
// The policy to be used specifically when swapping inlined elements.
using SwapInlinedElementsPolicy = absl::conditional_t<
- // Fast path: if the value type can be trivially move constructed/assigned
- // and destroyed, and we know the allocator doesn't do anything fancy,
- // then it's safe for us to simply swap the bytes in the inline storage.
- // It's as if we had move-constructed a temporary vector, move-assigned
- // one to the other, then move-assigned the first from the temporary.
- absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
- absl::is_trivially_move_assignable<ValueType<A>>,
- absl::is_trivially_destructible<ValueType<A>>,
+ // Fast path: if the value type can be trivially relocated, and we
+ // know the allocator doesn't do anything fancy, then it's safe for us
+ // to simply swap the bytes in the inline storage. It's as if we had
+ // relocated the first vector's elements into temporary storage,
+ // relocated the second's elements into the (now-empty) first's,
+ // and then relocated from temporary storage into the second.
+ absl::conjunction<absl::is_trivially_relocatable<ValueType<A>>,
std::is_same<A, std::allocator<ValueType<A>>>>::value,
MemcpyPolicy,
absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
@@ -624,8 +623,8 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Initialize(ValueAdapter values,
+ SizeType<A> new_size) -> void {
// Only callable from constructors!
ABSL_HARDENING_ASSERT(!GetIsAllocated());
ABSL_HARDENING_ASSERT(GetSize() == 0);
@@ -656,8 +655,8 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Assign(ValueAdapter values,
+ SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
@@ -699,8 +698,8 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
- -> void {
+auto Storage<T, N, A>::Resize(ValueAdapter values,
+ SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
@@ -885,8 +884,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
- -> Iterator<A> {
+auto Storage<T, N, A>::Erase(ConstIterator<A> from,
+ ConstIterator<A> to) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));