aboutsummaryrefslogtreecommitdiff
path: root/absl/random/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/random/internal')
-rw-r--r--absl/random/internal/BUILD.bazel10
-rw-r--r--absl/random/internal/fast_uniform_bits.h3
-rw-r--r--absl/random/internal/nonsecure_base.h2
-rw-r--r--absl/random/internal/pcg_engine.h25
-rw-r--r--absl/random/internal/pool_urbg.cc18
-rw-r--r--absl/random/internal/seed_material.cc4
6 files changed, 23 insertions, 39 deletions
diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel
index fd5b6195..81ca669b 100644
--- a/absl/random/internal/BUILD.bazel
+++ b/absl/random/internal/BUILD.bazel
@@ -24,9 +24,11 @@ load(
"absl_random_randen_copts_init",
)
-package(default_visibility = [
+default_package_visibility = [
"//absl/random:__pkg__",
-])
+]
+
+package(default_visibility = default_package_visibility)
licenses(["notice"])
@@ -248,6 +250,8 @@ cc_library(
hdrs = ["randen_engine.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = default_package_visibility + [
+ ],
deps = [
":iostream_state_saver",
":randen",
@@ -389,7 +393,7 @@ ABSL_RANDOM_NONPORTABLE_TAGS = [
"no_test_darwin_x86_64",
"no_test_ios_x86_64",
"no_test_loonix",
- "no_test_msvc_x64",
+ "no_test_lexan",
"no_test_wasm",
]
diff --git a/absl/random/internal/fast_uniform_bits.h b/absl/random/internal/fast_uniform_bits.h
index f3a5c00f..8d8ed045 100644
--- a/absl/random/internal/fast_uniform_bits.h
+++ b/absl/random/internal/fast_uniform_bits.h
@@ -151,7 +151,8 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
result_type r = static_cast<result_type>(g() - kMin);
for (size_t n = 1; n < kIters; ++n) {
- r = (r << kShift) + static_cast<result_type>(g() - kMin);
+ r = static_cast<result_type>(r << kShift) +
+ static_cast<result_type>(g() - kMin);
}
return r;
}
diff --git a/absl/random/internal/nonsecure_base.h b/absl/random/internal/nonsecure_base.h
index c7d7fa4b..c3b80335 100644
--- a/absl/random/internal/nonsecure_base.h
+++ b/absl/random/internal/nonsecure_base.h
@@ -44,7 +44,7 @@ class RandenPoolSeedSeq {
// Generate random unsigned values directly into the buffer.
template <typename Contiguous>
void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
- const size_t n = std::distance(begin, end);
+ const size_t n = static_cast<size_t>(std::distance(begin, end));
auto* a = &(*begin);
RandenPool<uint8_t>::Fill(
absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
diff --git a/absl/random/internal/pcg_engine.h b/absl/random/internal/pcg_engine.h
index 4ab44c94..e1f4ef33 100644
--- a/absl/random/internal/pcg_engine.h
+++ b/absl/random/internal/pcg_engine.h
@@ -221,47 +221,26 @@ class pcg_engine {
template <uint64_t kMultA, uint64_t kMultB, uint64_t kIncA, uint64_t kIncB>
class pcg128_params {
public:
-#if ABSL_HAVE_INTRINSIC_INT128
- using state_type = __uint128_t;
- static inline constexpr state_type make_u128(uint64_t a, uint64_t b) {
- return (static_cast<__uint128_t>(a) << 64) | b;
- }
-#else
using state_type = absl::uint128;
- static inline constexpr state_type make_u128(uint64_t a, uint64_t b) {
- return absl::MakeUint128(a, b);
- }
-#endif
-
static inline constexpr state_type multiplier() {
- return make_u128(kMultA, kMultB);
+ return absl::MakeUint128(kMultA, kMultB);
}
static inline constexpr state_type increment() {
- return make_u128(kIncA, kIncB);
+ return absl::MakeUint128(kIncA, kIncB);
}
};
// Implementation of the PCG xsl_rr_128_64 128-bit mixing function, which
// accepts an input of state_type and mixes it into an output of result_type.
struct pcg_xsl_rr_128_64 {
-#if ABSL_HAVE_INTRINSIC_INT128
- using state_type = __uint128_t;
-#else
using state_type = absl::uint128;
-#endif
using result_type = uint64_t;
inline uint64_t operator()(state_type state) {
// This is equivalent to the xsl_rr_128_64 mixing function.
-#if ABSL_HAVE_INTRINSIC_INT128
uint64_t rotate = static_cast<uint64_t>(state >> 122u);
state ^= state >> 64;
uint64_t s = static_cast<uint64_t>(state);
-#else
- uint64_t h = Uint128High64(state);
- uint64_t rotate = h >> 58u;
- uint64_t s = Uint128Low64(state) ^ h;
-#endif
return rotr(s, static_cast<int>(rotate));
}
};
diff --git a/absl/random/internal/pool_urbg.cc b/absl/random/internal/pool_urbg.cc
index 725100a4..5aefa7d9 100644
--- a/absl/random/internal/pool_urbg.cc
+++ b/absl/random/internal/pool_urbg.cc
@@ -131,7 +131,7 @@ void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) {
}
// Number of pooled urbg entries.
-static constexpr int kPoolSize = 8;
+static constexpr size_t kPoolSize = 8;
// Shared pool entries.
static absl::once_flag pool_once;
@@ -147,15 +147,15 @@ ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize];
// on subsequent runs the order within the same program may be significantly
// different. However, as other thread IDs are not assigned sequentially,
// this is not expected to matter.
-int GetPoolID() {
+size_t GetPoolID() {
static_assert(kPoolSize >= 1,
"At least one urbg instance is required for PoolURBG");
- ABSL_CONST_INIT static std::atomic<int64_t> sequence{0};
+ ABSL_CONST_INIT static std::atomic<uint64_t> sequence{0};
#ifdef ABSL_HAVE_THREAD_LOCAL
- static thread_local int my_pool_id = -1;
- if (ABSL_PREDICT_FALSE(my_pool_id < 0)) {
+ static thread_local size_t my_pool_id = kPoolSize;
+ if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) {
my_pool_id = (sequence++ % kPoolSize);
}
return my_pool_id;
@@ -171,8 +171,8 @@ int GetPoolID() {
// Store the value in the pthread_{get/set}specific. However an uninitialized
// value is 0, so add +1 to distinguish from the null value.
- intptr_t my_pool_id =
- reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+ uintptr_t my_pool_id =
+ reinterpret_cast<uintptr_t>(pthread_getspecific(tid_key));
if (ABSL_PREDICT_FALSE(my_pool_id == 0)) {
// No allocated ID, allocate the next value, cache it, and return.
my_pool_id = (sequence++ % kPoolSize) + 1;
@@ -194,7 +194,7 @@ RandenPoolEntry* PoolAlignedAlloc() {
// Not all the platforms that we build for have std::aligned_alloc, however
// since we never free these objects, we can over allocate and munge the
// pointers to the correct alignment.
- intptr_t x = reinterpret_cast<intptr_t>(
+ uintptr_t x = reinterpret_cast<uintptr_t>(
new char[sizeof(RandenPoolEntry) + kAlignment]);
auto y = x % kAlignment;
void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y));
@@ -215,7 +215,7 @@ void InitPoolURBG() {
absl::MakeSpan(seed_material))) {
random_internal::ThrowSeedGenException();
}
- for (int i = 0; i < kPoolSize; i++) {
+ for (size_t i = 0; i < kPoolSize; i++) {
shared_pools[i] = PoolAlignedAlloc();
shared_pools[i]->Init(
absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize));
diff --git a/absl/random/internal/seed_material.cc b/absl/random/internal/seed_material.cc
index c03cad85..1041302b 100644
--- a/absl/random/internal/seed_material.cc
+++ b/absl/random/internal/seed_material.cc
@@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) {
}
while (success && buffer_size > 0) {
- int bytes_read = read(dev_urandom, buffer, buffer_size);
+ ssize_t bytes_read = read(dev_urandom, buffer, buffer_size);
int read_error = errno;
success = (bytes_read > 0);
if (success) {
buffer += bytes_read;
- buffer_size -= bytes_read;
+ buffer_size -= static_cast<size_t>(bytes_read);
} else if (bytes_read == -1 && read_error == EINTR) {
success = true; // Need to try again.
}