aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--absl/container/internal/layout.h245
-rw-r--r--absl/container/internal/layout_benchmark.cc178
-rw-r--r--absl/container/internal/layout_test.cc396
3 files changed, 753 insertions, 66 deletions
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index 341c8262..7f2b83a2 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -81,9 +81,30 @@
// }
//
// The layout we used above combines fixed-size with dynamically-sized fields.
-// This is quite common. Layout is optimized for this use case and generates
-// optimal code. All computations that can be performed at compile time are
-// indeed performed at compile time.
+// This is quite common. Layout is optimized for this use case and attempts to
+// generate optimal code. To help the compiler do that in more cases, you can
+// specify the fixed sizes using `WithStaticSizes`. This ensures that all
+// computations that can be performed at compile time are indeed performed at
+// compile time. E.g.:
+//
+// using SL = L::WithStaticSizes<1, 1>;
+//
+// void Use(unsigned char* p) {
+// // First, extract N and M.
+// // Using `prefix` we can access the first three arrays but not more.
+// //
+// // More details: The first element always has offset 0. `SL`
+// // has offsets for the second and third array based on sizes of
+// // the first and second array, specified via `WithStaticSizes`.
+// constexpr auto prefix = SL::Partial();
+// size_t n = *prefix.Pointer<0>(p);
+// size_t m = *prefix.Pointer<1>(p);
+//
+// // Now we can get a pointer to the final payload.
+// const SL layout(n, m);
+// double* a = layout.Pointer<double>(p);
+// int* b = layout.Pointer<int>(p);
+// }
//
// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
@@ -107,7 +128,7 @@
// CompactString(const char* s = "") {
// const size_t size = strlen(s);
// // size_t[1] followed by char[size + 1].
-// const L layout(1, size + 1);
+// const L layout(size + 1);
// p_.reset(new unsigned char[layout.AllocSize()]);
// // If running under ASAN, mark the padding bytes, if any, to catch
// // memory errors.
@@ -125,14 +146,13 @@
//
// const char* c_str() const {
// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
-// // The argument in Partial(1) specifies that we have size_t[1] in front
-// // of the characters.
-// return L::Partial(1).Pointer<char>(p_.get());
+// return L::Partial().Pointer<char>(p_.get());
// }
//
// private:
-// // Our heap allocation contains a size_t followed by an array of chars.
-// using L = Layout<size_t, char>;
+// // Our heap allocation contains a single size_t followed by an array of
+// // chars.
+// using L = Layout<size_t, char>::WithStaticSizes<1>;
// std::unique_ptr<unsigned char[]> p_;
// };
//
@@ -146,11 +166,12 @@
//
// The interface exported by this file consists of:
// - class `Layout<>` and its public members.
-// - The public members of class `internal_layout::LayoutImpl<>`. That class
-// isn't intended to be used directly, and its name and template parameter
-// list are internal implementation details, but the class itself provides
-// most of the functionality in this file. See comments on its members for
-// detailed documentation.
+// - The public members of classes `internal_layout::LayoutWithStaticSizes<>`
+// and `internal_layout::LayoutImpl<>`. Those classes aren't intended to be
+// used directly, and their name and template parameter list are internal
+// implementation details, but the classes themselves provide most of the
+// functionality in this file. See comments on their members for detailed
+// documentation.
//
// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
@@ -164,7 +185,7 @@
#include <stddef.h>
#include <stdint.h>
-#include <ostream>
+#include <array>
#include <string>
#include <tuple>
#include <type_traits>
@@ -210,9 +231,6 @@ struct NotAligned<const Aligned<T, N>> {
template <size_t>
using IntToSize = size_t;
-template <class>
-using TypeToSize = size_t;
-
template <class T>
struct Type : NotAligned<T> {
using type = T;
@@ -309,7 +327,8 @@ using IsLegalElementType = std::integral_constant<
!std::is_volatile<typename Type<T>::type>::value &&
adl_barrier::IsPow2(AlignOf<T>::value)>;
-template <class Elements, class SizeSeq, class OffsetSeq>
+template <class Elements, class StaticSizeSeq, class RuntimeSizeSeq,
+ class SizeSeq, class OffsetSeq>
class LayoutImpl;
// Public base class of `Layout` and the result type of `Layout::Partial()`.
@@ -317,31 +336,49 @@ class LayoutImpl;
// `Elements...` contains all template arguments of `Layout` that created this
// instance.
//
-// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
-// passed to `Layout::Partial()` or `Layout::Layout()`.
+// `StaticSizeSeq...` is an index_sequence containing the sizes specified at
+// compile-time.
+//
+// `RuntimeSizeSeq...` is `[0, NumRuntimeSizes)`, where `NumRuntimeSizes` is the
+// number of arguments passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is `NumRuntimeSizes` plus
+// the number of sizes in `StaticSizeSeq`.
//
// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
// can compute offsets).
-template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
-class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
- absl::index_sequence<OffsetSeq...>> {
+template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
+ size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<
+ std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
+ absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
+ absl::index_sequence<OffsetSeq...>> {
private:
static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
"Invalid element type (see IsLegalElementType)");
+ static_assert(sizeof...(StaticSizeSeq) <= sizeof...(Elements),
+ "Too many static sizes specified");
enum {
NumTypes = sizeof...(Elements),
+ NumStaticSizes = sizeof...(StaticSizeSeq),
+ NumRuntimeSizes = sizeof...(RuntimeSizeSeq),
NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq),
};
// These are guaranteed by `Layout`.
+ static_assert(NumStaticSizes + NumRuntimeSizes == NumSizes, "Internal error");
+ static_assert(NumSizes <= NumTypes, "Internal error");
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error");
static_assert(NumTypes > 0, "Internal error");
+ static constexpr std::array<size_t, sizeof...(StaticSizeSeq)> kStaticSizes = {
+ StaticSizeSeq...};
+
// Returns the index of `T` in `Elements...`. Results in a compilation error
// if `Elements...` doesn't contain exactly one instance of `T`.
template <class T>
@@ -364,7 +401,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type;
- constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+ constexpr explicit LayoutImpl(IntToSize<RuntimeSizeSeq>... sizes)
: size_{sizes...} {}
// Alignment of the layout, equal to the strictest alignment of all elements.
@@ -390,7 +427,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
ElementAlignment<N>::value);
}
@@ -412,8 +449,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
return {{Offset<OffsetSeq>()...}};
}
- // The number of elements in the Nth array. This is the Nth argument of
- // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+ // The number of elements in the Nth array (zero-based).
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
@@ -421,10 +457,15 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// assert(x.Size<1>() == 4);
//
// Requires: `N < NumSizes`.
- template <size_t N>
+ template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
+ constexpr size_t Size() const {
+ return kStaticSizes[N];
+ }
+
+ template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds");
- return size_[N];
+ return size_[N - NumStaticSizes];
}
// The number of elements in the array with the specified element type.
@@ -579,7 +620,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
- SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
+ SizeOf<ElementType<NumTypes - 1>>::value * Size<NumTypes - 1>();
}
// If built with --config=asan, poisons padding bytes (if any) in the
@@ -603,7 +644,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>();
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
@@ -632,47 +673,66 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
- absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
- "(", sizes[i + 1], ")");
+ absl::StrAppend(&res, "[", DebugSize(i), "]; @", offsets[i + 1],
+ types[i + 1], "(", sizes[i + 1], ")");
}
// NumSizes is a constant that may be zero. Some compilers cannot see that
// inside the if statement "size_[NumSizes - 1]" must be valid.
int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) {
- absl::StrAppend(&res, "[", size_[last], "]");
+ absl::StrAppend(&res, "[", DebugSize(static_cast<size_t>(last)), "]");
}
return res;
}
private:
+ size_t DebugSize(size_t n) const {
+ if (n < NumStaticSizes) {
+ return kStaticSizes[n];
+ } else {
+ return size_[n - NumStaticSizes];
+ }
+ }
+
// Arguments of `Layout::Partial()` or `Layout::Layout()`.
- size_t size_[NumSizes > 0 ? NumSizes : 1];
+ size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
};
-template <size_t NumSizes, class... Ts>
-using LayoutType = LayoutImpl<
- std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
- absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+// Defining a constexpr static class member variable is redundant and deprecated
+// in C++17, but required in C++14.
+template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
+ size_t... SizeSeq, size_t... OffsetSeq>
+constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
+ std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
+ absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
+ absl::index_sequence<OffsetSeq...>>::kStaticSizes;
-} // namespace internal_layout
+template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
+using LayoutType = LayoutImpl<
+ std::tuple<Ts...>, StaticSizeSeq,
+ absl::make_index_sequence<NumRuntimeSizes>,
+ absl::make_index_sequence<NumRuntimeSizes + StaticSizeSeq::size()>,
+ absl::make_index_sequence<adl_barrier::Min(
+ sizeof...(Ts), NumRuntimeSizes + StaticSizeSeq::size() + 1)>>;
+
+template <class StaticSizeSeq, class... Ts>
+class LayoutWithStaticSizes
+ : public LayoutType<StaticSizeSeq,
+ sizeof...(Ts) - adl_barrier::Min(sizeof...(Ts),
+ StaticSizeSeq::size()),
+ Ts...> {
+ private:
+ using Super =
+ LayoutType<StaticSizeSeq,
+ sizeof...(Ts) -
+ adl_barrier::Min(sizeof...(Ts), StaticSizeSeq::size()),
+ Ts...>;
-// Descriptor of arrays of various types and sizes laid out in memory one after
-// another. See the top of the file for documentation.
-//
-// Check out the public API of internal_layout::LayoutImpl above. The type is
-// internal to the library but its methods are public, and they are inherited
-// by `Layout`.
-template <class... Ts>
-class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
public:
- static_assert(sizeof...(Ts) > 0, "At least one field is required");
- static_assert(
- absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
- "Invalid element type (see IsLegalElementType)");
-
// The result type of `Partial()` with `NumSizes` arguments.
template <size_t NumSizes>
- using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+ using PartialType =
+ internal_layout::LayoutType<StaticSizeSeq, NumSizes, Ts...>;
// `Layout` knows the element types of the arrays we want to lay out in
// memory but not the number of elements in each array.
@@ -698,14 +758,18 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
//
- // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+ // Requires: `sizeof...(Sizes) + NumStaticSizes <= sizeof...(Ts)`.
// Requires: all arguments are convertible to `size_t`.
template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
- static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
- return PartialType<sizeof...(Sizes)>(std::forward<Sizes>(sizes)...);
+ static_assert(sizeof...(Sizes) + StaticSizeSeq::size() <= sizeof...(Ts),
+ "");
+ return PartialType<sizeof...(Sizes)>(
+ static_cast<size_t>(std::forward<Sizes>(sizes))...);
}
+ // Inherit LayoutType's constructor.
+ //
// Creates a layout with the sizes of all arrays specified. If you know
// only the sizes of the first N arrays (where N can be zero), you can use
// `Partial()` defined above. The constructor is essentially equivalent to
@@ -714,8 +778,69 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
- constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
- : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+ //
+ // Implementation note: we do this via a `using` declaration instead of
+ // defining our own explicit constructor because the signature of LayoutType's
+ // constructor depends on RuntimeSizeSeq, which we don't have access to here.
+ // If we defined our own constructor here, it would have to use a parameter
+ // pack and then cast the arguments to size_t when calling the superclass
+ // constructor, similar to what Partial() does. But that would suffer from the
+ // same problem that Partial() has, which is that the parameter types are
+ // inferred from the arguments, which may be signed types, which must then be
+ // cast to size_t. This can lead to negative values being silently (i.e. with
+ // no compiler warnings) cast to an unsigned type. Having a constructor with
+ // size_t parameters helps the compiler generate better warnings about
+ // potential bad casts, while avoiding false warnings when positive literal
+ // arguments are used. If an argument is a positive literal integer (e.g.
+ // `1`), the compiler will understand that it can be safely converted to
+ // size_t, and hence not generate a warning. But if a negative literal (e.g.
+ // `-1`) or a variable with signed type is used, then it can generate a
+ // warning about a potentially unsafe implicit cast. It would be great if we
+ // could do this for Partial() too, but unfortunately as of C++23 there seems
+ // to be no way to define a function with a variable number of paramters of a
+ // certain type, a.k.a. homogenous function parameter packs. So we're forced
+ // to choose between explicitly casting the arguments to size_t, which
+ // suppresses all warnings, even potentially valid ones, or implicitly casting
+ // them to size_t, which generates bogus warnings whenever literal arguments
+ // are used, even if they're positive.
+ using Super::Super;
+};
+
+} // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutWithStaticSizes and
+// internal_layout::LayoutImpl above. Those types are internal to the library
+// but their methods are public, and they are inherited by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutWithStaticSizes<
+ absl::make_index_sequence<0>, Ts...> {
+ private:
+ using Super =
+ internal_layout::LayoutWithStaticSizes<absl::make_index_sequence<0>,
+ Ts...>;
+
+ public:
+ // If you know the sizes of some or all of the arrays at compile time, you can
+ // use `WithStaticSizes` or `WithStaticSizeSequence` to create a `Layout` type
+ // with those sizes baked in. This can help the compiler generate optimal code
+ // for calculating array offsets and AllocSize().
+ //
+ // Like `Partial()`, the N sizes you specify are for the first N arrays, and
+ // they specify the number of elements in each array, not the number of bytes.
+ template <class StaticSizeSeq>
+ using WithStaticSizeSequence =
+ internal_layout::LayoutWithStaticSizes<StaticSizeSeq, Ts...>;
+
+ template <size_t... StaticSizes>
+ using WithStaticSizes =
+ WithStaticSizeSequence<std::index_sequence<StaticSizes...>>;
+
+ // Inherit LayoutWithStaticSizes's constructor, which requires you to specify
+ // all the array sizes.
+ using Super::Super;
};
} // namespace container_internal
diff --git a/absl/container/internal/layout_benchmark.cc b/absl/container/internal/layout_benchmark.cc
index 3af35e33..a8fbfa7b 100644
--- a/absl/container/internal/layout_benchmark.cc
+++ b/absl/container/internal/layout_benchmark.cc
@@ -15,6 +15,9 @@
// Every benchmark should have the same performance as the corresponding
// headroom benchmark.
+#include <cstddef>
+#include <cstdint>
+
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/layout.h"
#include "benchmark/benchmark.h"
@@ -28,6 +31,8 @@ using ::benchmark::DoNotOptimize;
using Int128 = int64_t[2];
+constexpr size_t MyAlign(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
// This benchmark provides the upper bound on performance for BM_OffsetConstant.
template <size_t Offset, class... Ts>
void BM_OffsetConstantHeadroom(benchmark::State& state) {
@@ -37,6 +42,15 @@ void BM_OffsetConstantHeadroom(benchmark::State& state) {
}
template <size_t Offset, class... Ts>
+void BM_OffsetConstantStatic(benchmark::State& state) {
+ using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
+ ABSL_RAW_CHECK(L::Partial().template Offset<3>() == Offset, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(L::Partial().template Offset<3>());
+ }
+}
+
+template <size_t Offset, class... Ts>
void BM_OffsetConstant(benchmark::State& state) {
using L = Layout<Ts...>;
ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
@@ -46,14 +60,75 @@ void BM_OffsetConstant(benchmark::State& state) {
}
}
+template <size_t Offset, class... Ts>
+void BM_OffsetConstantIndirect(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ auto p = L::Partial(3, 5, 7);
+ ABSL_RAW_CHECK(p.template Offset<3>() == Offset, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(p);
+ DoNotOptimize(p.template Offset<3>());
+ }
+}
+
+template <class... Ts>
+size_t PartialOffset(size_t k);
+
+template <>
+size_t PartialOffset<int8_t, int16_t, int32_t, Int128>(size_t k) {
+ constexpr size_t o = MyAlign(MyAlign(3 * 1, 2) + 5 * 2, 4);
+ // return Align(o + k * 4, 8);
+ return (o + k * 4 + 7) & ~7U;
+}
+
+template <>
+size_t PartialOffset<Int128, int32_t, int16_t, int8_t>(size_t k) {
+ // No alignment is necessary.
+ return 3 * 16 + 5 * 4 + k * 2;
+}
+
+// This benchmark provides the upper bound on performance for BM_OffsetVariable.
+template <size_t Offset, class... Ts>
+void BM_OffsetPartialHeadroom(benchmark::State& state) {
+ size_t k = 7;
+ ABSL_RAW_CHECK(PartialOffset<Ts...>(k) == Offset, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(k);
+ DoNotOptimize(PartialOffset<Ts...>(k));
+ }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetPartialStatic(benchmark::State& state) {
+ using L = typename Layout<Ts...>::template WithStaticSizes<3, 5>;
+ size_t k = 7;
+ ABSL_RAW_CHECK(L::Partial(k).template Offset<3>() == Offset,
+ "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(k);
+ DoNotOptimize(L::Partial(k).template Offset<3>());
+ }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetPartial(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ size_t k = 7;
+ ABSL_RAW_CHECK(L::Partial(3, 5, k).template Offset<3>() == Offset,
+ "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(k);
+ DoNotOptimize(L::Partial(3, 5, k).template Offset<3>());
+ }
+}
+
template <class... Ts>
size_t VariableOffset(size_t n, size_t m, size_t k);
template <>
size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
size_t k) {
- auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); };
- return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
+ return MyAlign(MyAlign(MyAlign(n * 1, 2) + m * 2, 4) + k * 4, 8);
}
template <>
@@ -94,6 +169,75 @@ void BM_OffsetVariable(benchmark::State& state) {
}
}
+template <class... Ts>
+size_t AllocSize(size_t x);
+
+template <>
+size_t AllocSize<int8_t, int16_t, int32_t, Int128>(size_t x) {
+ constexpr size_t o =
+ Layout<int8_t, int16_t, int32_t, Int128>::Partial(3, 5, 7)
+ .template Offset<Int128>();
+ return o + sizeof(Int128) * x;
+}
+
+template <>
+size_t AllocSize<Int128, int32_t, int16_t, int8_t>(size_t x) {
+ constexpr size_t o =
+ Layout<Int128, int32_t, int16_t, int8_t>::Partial(3, 5, 7)
+ .template Offset<int8_t>();
+ return o + sizeof(int8_t) * x;
+}
+
+// This benchmark provides the upper bound on performance for BM_AllocSize
+template <size_t Size, class... Ts>
+void BM_AllocSizeHeadroom(benchmark::State& state) {
+ size_t x = 9;
+ ABSL_RAW_CHECK(AllocSize<Ts...>(x) == Size, "Invalid size");
+ for (auto _ : state) {
+ DoNotOptimize(x);
+ DoNotOptimize(AllocSize<Ts...>(x));
+ }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSizeStatic(benchmark::State& state) {
+ using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
+ size_t x = 9;
+ ABSL_RAW_CHECK(L(x).AllocSize() == Size, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(x);
+ DoNotOptimize(L(x).AllocSize());
+ }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSize(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ size_t n = 3;
+ size_t m = 5;
+ size_t k = 7;
+ size_t x = 9;
+ ABSL_RAW_CHECK(L(n, m, k, x).AllocSize() == Size, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(n);
+ DoNotOptimize(m);
+ DoNotOptimize(k);
+ DoNotOptimize(x);
+ DoNotOptimize(L(n, m, k, x).AllocSize());
+ }
+}
+
+template <size_t Size, class... Ts>
+void BM_AllocSizeIndirect(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ auto l = L(3, 5, 7, 9);
+ ABSL_RAW_CHECK(l.AllocSize() == Size, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(l);
+ DoNotOptimize(l.AllocSize());
+ }
+}
+
// Run all benchmarks in two modes:
//
// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
@@ -106,16 +250,46 @@ void BM_OffsetVariable(benchmark::State& state) {
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantStatic, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 48, int8_t, int16_t, int32_t,
+ Int128);
+
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstantStatic, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 82, Int128, int32_t, int16_t,
+ int8_t);
+
+OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 48, int8_t, int16_t, int32_t,
+ Int128);
+OFFSET_BENCHMARK(BM_OffsetPartialStatic, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetPartial, 48, int8_t, int16_t, int32_t, Int128);
+
+OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 82, Int128, int32_t, int16_t,
+ int8_t);
+OFFSET_BENCHMARK(BM_OffsetPartialStatic, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetPartial, 82, Int128, int32_t, int16_t, int8_t);
+
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
+
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
+
+OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSizeStatic, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSize, 192, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_AllocSizeIndirect, 192, int8_t, int16_t, int32_t, Int128);
+
+OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSizeStatic, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSize, 91, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_AllocSizeIndirect, 91, Int128, int32_t, int16_t, int8_t);
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index ae55cf7e..47fc9f33 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -68,9 +68,7 @@ struct alignas(8) Int128 {
// int64_t is *not* 8-byte aligned on all platforms!
struct alignas(8) Int64 {
int64_t a;
- friend bool operator==(Int64 lhs, Int64 rhs) {
- return lhs.a == rhs.a;
- }
+ friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; }
};
// Properties of types that this test relies on.
@@ -271,6 +269,35 @@ TEST(Layout, Offsets) {
}
}
+TEST(Layout, StaticOffsets) {
+ using L = Layout<int8_t, int32_t, Int128>;
+ {
+ using SL = L::WithStaticSizes<>;
+ EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(SL::Partial(5).Offsets(), ElementsAre(0, 8));
+ EXPECT_THAT(SL::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ }
+ {
+ using SL = L::WithStaticSizes<5>;
+ EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8));
+ EXPECT_THAT(SL::Partial(3).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL::Partial(3, 1).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL(3, 1).Offsets(), ElementsAre(0, 8, 24));
+ }
+ {
+ using SL = L::WithStaticSizes<5, 3>;
+ EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL::Partial(1).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL(1).Offsets(), ElementsAre(0, 8, 24));
+ }
+ {
+ using SL = L::WithStaticSizes<5, 3, 1>;
+ EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(SL().Offsets(), ElementsAre(0, 8, 24));
+ }
+}
+
TEST(Layout, AllocSize) {
{
using L = Layout<int32_t>;
@@ -295,6 +322,30 @@ TEST(Layout, AllocSize) {
}
}
+TEST(Layout, StaticAllocSize) {
+ using L = Layout<int8_t, int32_t, Int128>;
+ {
+ using SL = L::WithStaticSizes<>;
+ EXPECT_EQ(136, SL::Partial(3, 5, 7).AllocSize());
+ EXPECT_EQ(136, SL(3, 5, 7).AllocSize());
+ }
+ {
+ using SL = L::WithStaticSizes<3>;
+ EXPECT_EQ(136, SL::Partial(5, 7).AllocSize());
+ EXPECT_EQ(136, SL(5, 7).AllocSize());
+ }
+ {
+ using SL = L::WithStaticSizes<3, 5>;
+ EXPECT_EQ(136, SL::Partial(7).AllocSize());
+ EXPECT_EQ(136, SL(7).AllocSize());
+ }
+ {
+ using SL = L::WithStaticSizes<3, 5, 7>;
+ EXPECT_EQ(136, SL::Partial().AllocSize());
+ EXPECT_EQ(136, SL().AllocSize());
+ }
+}
+
TEST(Layout, SizeByIndex) {
{
using L = Layout<int32_t>;
@@ -370,6 +421,27 @@ TEST(Layout, Sizes) {
}
}
+TEST(Layout, StaticSize) {
+ using L = Layout<int8_t, int32_t, Int128>;
+ {
+ using SL = L::WithStaticSizes<>;
+ EXPECT_THAT(SL::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(SL::Partial(3).Size<0>(), 3);
+ EXPECT_THAT(SL::Partial(3).Size<int8_t>(), 3);
+ EXPECT_THAT(SL::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(SL::Partial(3, 5, 7).Size<0>(), 3);
+ EXPECT_THAT(SL::Partial(3, 5, 7).Size<int8_t>(), 3);
+ EXPECT_THAT(SL::Partial(3, 5, 7).Size<2>(), 7);
+ EXPECT_THAT(SL::Partial(3, 5, 7).Size<Int128>(), 7);
+ EXPECT_THAT(SL::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ EXPECT_THAT(SL(3, 5, 7).Size<0>(), 3);
+ EXPECT_THAT(SL(3, 5, 7).Size<int8_t>(), 3);
+ EXPECT_THAT(SL(3, 5, 7).Size<2>(), 7);
+ EXPECT_THAT(SL(3, 5, 7).Size<Int128>(), 7);
+ EXPECT_THAT(SL(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ }
+}
+
TEST(Layout, PointerByIndex) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -720,6 +792,61 @@ TEST(Layout, MutablePointers) {
}
}
+TEST(Layout, StaticPointers) {
+ alignas(max_align_t) const unsigned char p[100] = {0};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::WithStaticSizes<>::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+ Type<std::tuple<const int8_t*>>(x.Pointers(p)));
+ }
+ {
+ const auto x = L::WithStaticSizes<>::Partial(1);
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::WithStaticSizes<1>::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::WithStaticSizes<>::Partial(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const auto x = L::WithStaticSizes<1>::Partial(2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const auto x = L::WithStaticSizes<1, 2>::Partial(3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const auto x = L::WithStaticSizes<1, 2, 3>::Partial();
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const L::WithStaticSizes<1, 2, 3> x;
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+}
+
TEST(Layout, SliceByIndexSize) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -769,7 +896,6 @@ TEST(Layout, SliceByTypeSize) {
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
-
TEST(Layout, MutableSliceByIndexSize) {
alignas(max_align_t) unsigned char p[100] = {0};
{
@@ -820,6 +946,39 @@ TEST(Layout, MutableSliceByTypeSize) {
}
}
+TEST(Layout, StaticSliceSize) {
+ alignas(max_align_t) const unsigned char cp[100] = {0};
+ alignas(max_align_t) unsigned char p[100] = {0};
+ using L = Layout<int8_t, int32_t, Int128>;
+ using SL = L::WithStaticSizes<3, 5>;
+
+ EXPECT_EQ(3, SL::Partial().Slice<0>(cp).size());
+ EXPECT_EQ(3, SL::Partial().Slice<int8_t>(cp).size());
+ EXPECT_EQ(3, SL::Partial(7).Slice<0>(cp).size());
+ EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(cp).size());
+
+ EXPECT_EQ(5, SL::Partial().Slice<1>(cp).size());
+ EXPECT_EQ(5, SL::Partial().Slice<int32_t>(cp).size());
+ EXPECT_EQ(5, SL::Partial(7).Slice<1>(cp).size());
+ EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(cp).size());
+
+ EXPECT_EQ(7, SL::Partial(7).Slice<2>(cp).size());
+ EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(cp).size());
+
+ EXPECT_EQ(3, SL::Partial().Slice<0>(p).size());
+ EXPECT_EQ(3, SL::Partial().Slice<int8_t>(p).size());
+ EXPECT_EQ(3, SL::Partial(7).Slice<0>(p).size());
+ EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(p).size());
+
+ EXPECT_EQ(5, SL::Partial().Slice<1>(p).size());
+ EXPECT_EQ(5, SL::Partial().Slice<int32_t>(p).size());
+ EXPECT_EQ(5, SL::Partial(7).Slice<1>(p).size());
+ EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(p).size());
+
+ EXPECT_EQ(7, SL::Partial(7).Slice<2>(p).size());
+ EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(p).size());
+}
+
TEST(Layout, SliceByIndexData) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -1230,6 +1389,39 @@ TEST(Layout, MutableSliceByTypeData) {
}
}
+TEST(Layout, StaticSliceData) {
+ alignas(max_align_t) const unsigned char cp[100] = {0};
+ alignas(max_align_t) unsigned char p[100] = {0};
+ using L = Layout<int8_t, int32_t, Int128>;
+ using SL = L::WithStaticSizes<3, 5>;
+
+ EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<0>(cp).data()));
+ EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<int8_t>(cp).data()));
+ EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<0>(cp).data()));
+ EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<int8_t>(cp).data()));
+
+ EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<1>(cp).data()));
+ EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<int32_t>(cp).data()));
+ EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<1>(cp).data()));
+ EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<int32_t>(cp).data()));
+
+ EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<2>(cp).data()));
+ EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<Int128>(cp).data()));
+
+ EXPECT_EQ(0, Distance(p, SL::Partial().Slice<0>(p).data()));
+ EXPECT_EQ(0, Distance(p, SL::Partial().Slice<int8_t>(p).data()));
+ EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<0>(p).data()));
+ EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<int8_t>(p).data()));
+
+ EXPECT_EQ(4, Distance(p, SL::Partial().Slice<1>(p).data()));
+ EXPECT_EQ(4, Distance(p, SL::Partial().Slice<int32_t>(p).data()));
+ EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<1>(p).data()));
+ EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<int32_t>(p).data()));
+
+ EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<2>(p).data()));
+ EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<Int128>(p).data()));
+}
+
MATCHER_P(IsSameSlice, slice, "") {
return arg.size() == slice.size() && arg.data() == slice.data();
}
@@ -1339,6 +1531,43 @@ TEST(Layout, MutableSlices) {
}
}
+TEST(Layout, StaticSlices) {
+ alignas(max_align_t) const unsigned char cp[100] = {0};
+ alignas(max_align_t) unsigned char p[100] = {0};
+ using SL = Layout<int8_t, int8_t, Int128>::WithStaticSizes<1, 2>;
+ {
+ const auto x = SL::Partial();
+ EXPECT_THAT(
+ (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(
+ x.Slices(cp))),
+ Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp))));
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+ }
+ {
+ const auto x = SL::Partial(3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(cp))),
+ Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
+ IsSameSlice(x.Slice<2>(cp))));
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+ x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+ {
+ const SL x(3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(cp))),
+ Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
+ IsSameSlice(x.Slice<2>(cp))));
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+ x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+}
+
TEST(Layout, UnalignedTypes) {
constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
@@ -1377,6 +1606,36 @@ TEST(Layout, Alignment) {
static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+ static_assert(
+ Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
+ static_assert(
+ Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
+}
+
+TEST(Layout, StaticAlignment) {
+ static_assert(Layout<int8_t>::WithStaticSizes<>::Alignment() == 1, "");
+ static_assert(Layout<int8_t>::WithStaticSizes<0>::Alignment() == 1, "");
+ static_assert(Layout<int8_t>::WithStaticSizes<7>::Alignment() == 1, "");
+ static_assert(Layout<int32_t>::WithStaticSizes<>::Alignment() == 4, "");
+ static_assert(Layout<int32_t>::WithStaticSizes<0>::Alignment() == 4, "");
+ static_assert(Layout<int32_t>::WithStaticSizes<3>::Alignment() == 4, "");
+ static_assert(
+ Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
+ static_assert(
+ Layout<Aligned<int8_t, 64>>::WithStaticSizes<0>::Alignment() == 64, "");
+ static_assert(
+ Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
+ static_assert(
+ Layout<int32_t, Int64, int8_t>::WithStaticSizes<>::Alignment() == 8, "");
+ static_assert(
+ Layout<int32_t, Int64, int8_t>::WithStaticSizes<0, 0, 0>::Alignment() ==
+ 8,
+ "");
+ static_assert(
+ Layout<int32_t, Int64, int8_t>::WithStaticSizes<1, 1, 1>::Alignment() ==
+ 8,
+ "");
}
TEST(Layout, ConstexprPartial) {
@@ -1384,6 +1643,15 @@ TEST(Layout, ConstexprPartial) {
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
}
+
+TEST(Layout, StaticConstexpr) {
+ constexpr size_t M = alignof(max_align_t);
+ using L = Layout<unsigned char, Aligned<unsigned char, 2 * M>>;
+ using SL = L::WithStaticSizes<1, 3>;
+ constexpr SL x;
+ static_assert(x.Offset<1>() == 2 * M, "");
+}
+
// [from, to)
struct Region {
size_t from;
@@ -1458,6 +1726,41 @@ TEST(Layout, PoisonPadding) {
}
}
+TEST(Layout, StaticPoisonPadding) {
+ using L = Layout<int8_t, Int64, int32_t, Int128>;
+ using SL = L::WithStaticSizes<1, 2>;
+
+ constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
+ {
+ constexpr auto x = SL::Partial();
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}});
+ }
+ {
+ constexpr auto x = SL::Partial(3);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr auto x = SL::Partial(3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr SL x(3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+}
+
TEST(Layout, DebugString) {
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
@@ -1500,6 +1803,62 @@ TEST(Layout, DebugString) {
}
}
+TEST(Layout, StaticDebugString) {
+ {
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial();
+ EXPECT_EQ("@0<signed char>(1)", x.DebugString());
+ }
+ {
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+ }
+ {
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial();
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+ }
+ {
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1,
+ 2);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial(2);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t,
+ Int128>::WithStaticSizes<1, 2>::Partial();
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t,
+ Int128>::WithStaticSizes<1, 2, 3, 4>::Partial();
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+ {
+ constexpr Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1, 2, 3,
+ 4>
+ x;
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+}
+
TEST(Layout, CharTypes) {
constexpr Layout<int32_t> x(1);
alignas(max_align_t) char c[x.AllocSize()] = {};
@@ -1638,6 +1997,35 @@ TEST(CompactString, Works) {
EXPECT_STREQ("hello", s.c_str());
}
+// Same as the previous CompactString example, except we set the first array
+// size to 1 statically, since we know it is always 1. This allows us to compute
+// the offset of the character array at compile time.
+class StaticCompactString {
+ public:
+ StaticCompactString(const char* s = "") { // NOLINT
+ const size_t size = strlen(s);
+ const SL layout(size + 1);
+ p_.reset(new unsigned char[layout.AllocSize()]);
+ layout.PoisonPadding(p_.get());
+ *layout.Pointer<size_t>(p_.get()) = size;
+ memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+ }
+
+ size_t size() const { return *SL::Partial().Pointer<size_t>(p_.get()); }
+
+ const char* c_str() const { return SL::Partial().Pointer<char>(p_.get()); }
+
+ private:
+ using SL = Layout<size_t, char>::WithStaticSizes<1>;
+ std::unique_ptr<unsigned char[]> p_;
+};
+
+TEST(StaticCompactString, Works) {
+ StaticCompactString s = "hello";
+ EXPECT_EQ(5, s.size());
+ EXPECT_STREQ("hello", s.c_str());
+}
+
} // namespace example
} // namespace