diff options
author | Warner Losh <imp@FreeBSD.org> | 2025-02-05 23:20:13 +0000 |
---|---|---|
committer | Warner Losh <imp@FreeBSD.org> | 2025-02-05 23:20:13 +0000 |
commit | 48ec896efb0b78141df004eaa21288b84590c9da (patch) | |
tree | 33799792fd95c266d472ab1ae51d50ab4f942eb3 /test/unit/prng.c | |
parent | d28d7fbede216494aa3942af042cc084fcd6098a (diff) |
jemalloc: Import 5.3.0 54eaed1d8b56b1aa528be3bdd1877e59c56fa90cvendor/jemalloc/5.3.0vendor/jemalloc
Import jemalloc 5.3.0.
This import changes how manage the jemalloc vendor branch (which was
just started anyway). Starting with 5.3.0, we import a clean tree from
the upstream github, removing all the old files that are no longer
upstream, or that we've kept around for some reason. We do this because
we merge from this raw version of jemalloc into the FreeBSD
contrib/jemalloc, then we run autogen stuff, generate all the generated
.h files with gmake, then finally remove much of the generated files in
contrib/jemalloc using an update script.
Sponsored by: Netflix
Diffstat (limited to 'test/unit/prng.c')
-rw-r--r-- | test/unit/prng.c | 226 |
1 files changed, 89 insertions, 137 deletions
diff --git a/test/unit/prng.c b/test/unit/prng.c index b5795c2f44c1..a6d9b014a675 100644 --- a/test/unit/prng.c +++ b/test/unit/prng.c @@ -1,44 +1,44 @@ #include "test/jemalloc_test.h" -static void -test_prng_lg_range_u32(bool atomic) { - atomic_u32_t sa, sb; +TEST_BEGIN(test_prng_lg_range_u32) { + uint32_t sa, sb; uint32_t ra, rb; unsigned lg_range; - atomic_store_u32(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_u32(&sa, 32, atomic); - atomic_store_u32(&sa, 42, ATOMIC_RELAXED); - rb = prng_lg_range_u32(&sa, 32, atomic); - assert_u32_eq(ra, rb, + sa = 42; + ra = prng_lg_range_u32(&sa, 32); + sa = 42; + rb = prng_lg_range_u32(&sa, 32); + expect_u32_eq(ra, rb, "Repeated generation should produce repeated results"); - atomic_store_u32(&sb, 42, ATOMIC_RELAXED); - rb = prng_lg_range_u32(&sb, 32, atomic); - assert_u32_eq(ra, rb, + sb = 42; + rb = prng_lg_range_u32(&sb, 32); + expect_u32_eq(ra, rb, "Equivalent generation should produce equivalent results"); - atomic_store_u32(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_u32(&sa, 32, atomic); - rb = prng_lg_range_u32(&sa, 32, atomic); - assert_u32_ne(ra, rb, + sa = 42; + ra = prng_lg_range_u32(&sa, 32); + rb = prng_lg_range_u32(&sa, 32); + expect_u32_ne(ra, rb, "Full-width results must not immediately repeat"); - atomic_store_u32(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_u32(&sa, 32, atomic); + sa = 42; + ra = prng_lg_range_u32(&sa, 32); for (lg_range = 31; lg_range > 0; lg_range--) { - atomic_store_u32(&sb, 42, ATOMIC_RELAXED); - rb = prng_lg_range_u32(&sb, lg_range, atomic); - assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), + sb = 42; + rb = prng_lg_range_u32(&sb, lg_range); + expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); - assert_u32_eq(rb, (ra >> (32 - lg_range)), + expect_u32_eq(rb, (ra >> (32 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } + } +TEST_END -static void -test_prng_lg_range_u64(void) { +TEST_BEGIN(test_prng_lg_range_u64) { uint64_t sa, sb, ra, rb; unsigned lg_range; @@ -46,18 +46,18 @@ test_prng_lg_range_u64(void) { ra = prng_lg_range_u64(&sa, 64); sa = 42; rb = prng_lg_range_u64(&sa, 64); - assert_u64_eq(ra, rb, + expect_u64_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u64(&sb, 64); - assert_u64_eq(ra, rb, + expect_u64_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u64(&sa, 64); rb = prng_lg_range_u64(&sa, 64); - assert_u64_ne(ra, rb, + expect_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; @@ -65,173 +65,125 @@ test_prng_lg_range_u64(void) { for (lg_range = 63; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u64(&sb, lg_range); - assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), + expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); - assert_u64_eq(rb, (ra >> (64 - lg_range)), + expect_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } +TEST_END -static void -test_prng_lg_range_zu(bool atomic) { - atomic_zu_t sa, sb; +TEST_BEGIN(test_prng_lg_range_zu) { + size_t sa, sb; size_t ra, rb; unsigned lg_range; - atomic_store_zu(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - atomic_store_zu(&sa, 42, ATOMIC_RELAXED); - rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - assert_zu_eq(ra, rb, + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); + sa = 42; + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); + expect_zu_eq(ra, rb, "Repeated generation should produce repeated results"); - atomic_store_zu(&sb, 42, ATOMIC_RELAXED); - rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - assert_zu_eq(ra, rb, + sb = 42; + rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR)); + expect_zu_eq(ra, rb, "Equivalent generation should produce equivalent results"); - atomic_store_zu(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - assert_zu_ne(ra, rb, + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); + expect_zu_ne(ra, rb, "Full-width results must not immediately repeat"); - atomic_store_zu(&sa, 42, ATOMIC_RELAXED); - ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; lg_range--) { - atomic_store_zu(&sb, 42, ATOMIC_RELAXED); - rb = prng_lg_range_zu(&sb, lg_range, atomic); - assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), + sb = 42; + rb = prng_lg_range_zu(&sb, lg_range); + expect_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); - assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - + expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range)), "Expected high order bits of full-width " "result, lg_range=%u", lg_range); } -} - -TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { - test_prng_lg_range_u32(false); -} -TEST_END - -TEST_BEGIN(test_prng_lg_range_u32_atomic) { - test_prng_lg_range_u32(true); -} -TEST_END - -TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { - test_prng_lg_range_u64(); -} -TEST_END -TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { - test_prng_lg_range_zu(false); } TEST_END -TEST_BEGIN(test_prng_lg_range_zu_atomic) { - test_prng_lg_range_zu(true); -} -TEST_END - -static void -test_prng_range_u32(bool atomic) { +TEST_BEGIN(test_prng_range_u32) { uint32_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 - for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { - atomic_u32_t s; + const uint32_t max_range = 10000000; + const uint32_t range_step = 97; + const unsigned nreps = 10; + + for (range = 2; range < max_range; range += range_step) { + uint32_t s; unsigned rep; - atomic_store_u32(&s, range, ATOMIC_RELAXED); - for (rep = 0; rep < NREPS; rep++) { - uint32_t r = prng_range_u32(&s, range, atomic); + s = range; + for (rep = 0; rep < nreps; rep++) { + uint32_t r = prng_range_u32(&s, range); - assert_u32_lt(r, range, "Out of range"); + expect_u32_lt(r, range, "Out of range"); } } } +TEST_END -static void -test_prng_range_u64(void) { +TEST_BEGIN(test_prng_range_u64) { uint64_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 - for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + const uint64_t max_range = 10000000; + const uint64_t range_step = 97; + const unsigned nreps = 10; + + for (range = 2; range < max_range; range += range_step) { uint64_t s; unsigned rep; s = range; - for (rep = 0; rep < NREPS; rep++) { + for (rep = 0; rep < nreps; rep++) { uint64_t r = prng_range_u64(&s, range); - assert_u64_lt(r, range, "Out of range"); + expect_u64_lt(r, range, "Out of range"); } } } +TEST_END -static void -test_prng_range_zu(bool atomic) { +TEST_BEGIN(test_prng_range_zu) { size_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 - for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { - atomic_zu_t s; + const size_t max_range = 10000000; + const size_t range_step = 97; + const unsigned nreps = 10; + + + for (range = 2; range < max_range; range += range_step) { + size_t s; unsigned rep; - atomic_store_zu(&s, range, ATOMIC_RELAXED); - for (rep = 0; rep < NREPS; rep++) { - size_t r = prng_range_zu(&s, range, atomic); + s = range; + for (rep = 0; rep < nreps; rep++) { + size_t r = prng_range_zu(&s, range); - assert_zu_lt(r, range, "Out of range"); + expect_zu_lt(r, range, "Out of range"); } } } - -TEST_BEGIN(test_prng_range_u32_nonatomic) { - test_prng_range_u32(false); -} -TEST_END - -TEST_BEGIN(test_prng_range_u32_atomic) { - test_prng_range_u32(true); -} -TEST_END - -TEST_BEGIN(test_prng_range_u64_nonatomic) { - test_prng_range_u64(); -} -TEST_END - -TEST_BEGIN(test_prng_range_zu_nonatomic) { - test_prng_range_zu(false); -} -TEST_END - -TEST_BEGIN(test_prng_range_zu_atomic) { - test_prng_range_zu(true); -} TEST_END int main(void) { - return test( - test_prng_lg_range_u32_nonatomic, - test_prng_lg_range_u32_atomic, - test_prng_lg_range_u64_nonatomic, - test_prng_lg_range_zu_nonatomic, - test_prng_lg_range_zu_atomic, - test_prng_range_u32_nonatomic, - test_prng_range_u32_atomic, - test_prng_range_u64_nonatomic, - test_prng_range_zu_nonatomic, - test_prng_range_zu_atomic); + return test_no_reentrancy( + test_prng_lg_range_u32, + test_prng_lg_range_u64, + test_prng_lg_range_zu, + test_prng_range_u32, + test_prng_range_u64, + test_prng_range_zu); } |