aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2024-07-01 06:47:59 -0700
committerCopybara-Service <copybara-worker@google.com>2024-07-01 06:49:10 -0700
commit4eb81046b2df1804220ef5af3fda54b6fa614c27 (patch)
tree5d0985a6725b7b046604e3d284eafcca8775a6b1
parenta7c5f985f6b7915b51b5d5799c690c99a04afdc5 (diff)
downloadabseil-4eb81046b2df1804220ef5af3fda54b6fa614c27.tar.gz
abseil-4eb81046b2df1804220ef5af3fda54b6fa614c27.tar.bz2
abseil-4eb81046b2df1804220ef5af3fda54b6fa614c27.zip
Static cast instead of reinterpret cast raw hash set slots as casting from void* to T* is well defined
PiperOrigin-RevId: 648352837 Change-Id: I082cd0c007706ae8baa8f26cdc85d51b69bffd54
-rw-r--r--absl/container/internal/raw_hash_set.h34
1 files changed, 13 insertions, 21 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 02e389b9..d4fe8f5c 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -878,7 +878,7 @@ struct GroupPortableImpl {
// Note: this includes: kEmpty, kDeleted, kSentinel.
// It is useful in contexts when kSentinel is not present.
auto MaskNonFull() const {
- return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
+ return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
}
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
@@ -1134,9 +1134,7 @@ class GrowthInfo {
// Returns true if the table satisfies two properties:
// 1. Guaranteed to have no kDeleted slots.
// 2. There is no growth left.
- bool HasNoGrowthLeftAndNoDeleted() const {
- return growth_left_info_ == 0;
- }
+ bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
// Returns true if table guaranteed to have no k
bool HasNoDeleted() const {
@@ -1144,9 +1142,7 @@ class GrowthInfo {
}
// Returns the number of elements left to grow.
- size_t GetGrowthLeft() const {
- return growth_left_info_ & kGrowthLeftMask;
- }
+ size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
private:
static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
@@ -1421,8 +1417,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
should_rehash_for_bug_detection_on_insert(control(), capacity());
}
bool should_rehash_for_bug_detection_on_move() const {
- return CommonFieldsGenerationInfo::
- should_rehash_for_bug_detection_on_move(control(), capacity());
+ return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
+ control(), capacity());
}
void reset_reserved_growth(size_t reservation) {
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
@@ -1855,8 +1851,8 @@ constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
// Returns the address of the ith slot in slots where each slot occupies
// slot_size.
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
- return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
- (slot * slot_size));
+ return static_cast<void*>(static_cast<char*>(slot_array) +
+ (slot * slot_size));
}
// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
@@ -2100,8 +2096,8 @@ class HashSetResizeHelper {
using slot_type = typename PolicyTraits::slot_type;
assert(is_single_group(c.capacity()));
- auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
- auto* old_slots_ptr = reinterpret_cast<slot_type*>(old_slots());
+ auto* new_slots = static_cast<slot_type*>(c.slot_array());
+ auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
size_t shuffle_bit = old_capacity_ / 2 + 1;
for (size_t i = 0; i < old_capacity_; ++i) {
@@ -3675,8 +3671,7 @@ class raw_hash_set {
insert_slot(to_slot(resize_helper.old_soo_data()));
return;
} else {
- auto* old_slots =
- reinterpret_cast<slot_type*>(resize_helper.old_slots());
+ auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
size_t total_probe_length = 0;
for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
if (IsFull(resize_helper.old_ctrl()[i])) {
@@ -3692,9 +3687,7 @@ class raw_hash_set {
// Casting directly from e.g. char* to slot_type* can cause compilation errors
// on objective-C. This function converts to void* first, avoiding the issue.
- static slot_type* to_slot(void* buf) {
- return reinterpret_cast<slot_type*>(buf);
- }
+ static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
// Requires that lhs does not have a full SOO slot.
static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
@@ -3735,7 +3728,7 @@ class raw_hash_set {
}
}
- template<bool propagate_alloc>
+ template <bool propagate_alloc>
raw_hash_set& assign_impl(raw_hash_set&& that) {
// We don't bother checking for this/that aliasing. We just need to avoid
// breaking the invariants in that case.
@@ -3880,8 +3873,7 @@ class raw_hash_set {
}
// We only do validation for small tables so that it's constant time.
if (capacity() > 16) return;
- IterateOverFullSlots(
- common(), slot_array(), assert_consistent);
+ IterateOverFullSlots(common(), slot_array(), assert_consistent);
#endif
}