aboutsummaryrefslogtreecommitdiff
path: root/test/unit/prof_log.c
diff options
context:
space:
mode:
authorWarner Losh <imp@FreeBSD.org>2025-02-05 23:20:13 +0000
committerWarner Losh <imp@FreeBSD.org>2025-02-05 23:20:13 +0000
commit48ec896efb0b78141df004eaa21288b84590c9da (patch)
tree33799792fd95c266d472ab1ae51d50ab4f942eb3 /test/unit/prof_log.c
parentd28d7fbede216494aa3942af042cc084fcd6098a (diff)
jemalloc: Import 5.3.0 54eaed1d8b56b1aa528be3bdd1877e59c56fa90cvendor/jemalloc/5.3.0vendor/jemalloc
Import jemalloc 5.3.0. This import changes how manage the jemalloc vendor branch (which was just started anyway). Starting with 5.3.0, we import a clean tree from the upstream github, removing all the old files that are no longer upstream, or that we've kept around for some reason. We do this because we merge from this raw version of jemalloc into the FreeBSD contrib/jemalloc, then we run autogen stuff, generate all the generated .h files with gmake, then finally remove much of the generated files in contrib/jemalloc using an update script. Sponsored by: Netflix
Diffstat (limited to 'test/unit/prof_log.c')
-rw-r--r--test/unit/prof_log.c65
1 files changed, 34 insertions, 31 deletions
diff --git a/test/unit/prof_log.c b/test/unit/prof_log.c
index 92fbd7cea5a4..5ff208e2d334 100644
--- a/test/unit/prof_log.c
+++ b/test/unit/prof_log.c
@@ -1,18 +1,19 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_log.h"
#define N_PARAM 100
#define N_THREADS 10
-static void assert_rep() {
- assert_b_eq(prof_log_rep_check(), false, "Rep check failed");
+static void expect_rep() {
+ expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
}
-static void assert_log_empty() {
- assert_zu_eq(prof_log_bt_count(), 0,
+static void expect_log_empty() {
+ expect_zu_eq(prof_log_bt_count(), 0,
"The log has backtraces; it isn't empty");
- assert_zu_eq(prof_log_thr_count(), 0,
+ expect_zu_eq(prof_log_thr_count(), 0,
"The log has threads; it isn't empty");
- assert_zu_eq(prof_log_alloc_count(), 0,
+ expect_zu_eq(prof_log_alloc_count(), 0,
"The log has allocations; it isn't empty");
}
@@ -34,22 +35,22 @@ TEST_BEGIN(test_prof_log_many_logs) {
test_skip_if(!config_prof);
for (i = 0; i < N_PARAM; i++) {
- assert_b_eq(prof_log_is_logging(), false,
+ expect_b_eq(prof_log_is_logging(), false,
"Logging shouldn't have started yet");
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
- assert_b_eq(prof_log_is_logging(), true,
+ expect_b_eq(prof_log_is_logging(), true,
"Logging should be started by now");
- assert_log_empty();
- assert_rep();
+ expect_log_empty();
+ expect_rep();
f();
- assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
- assert_rep();
- assert_b_eq(prof_log_is_logging(), true,
+ expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
+ expect_rep();
+ expect_b_eq(prof_log_is_logging(), true,
"Logging should still be on");
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
- assert_b_eq(prof_log_is_logging(), false,
+ expect_b_eq(prof_log_is_logging(), false,
"Logging should have turned off");
}
}
@@ -61,7 +62,7 @@ static void *f_thread(void *unused) {
int i;
for (i = 0; i < N_PARAM; i++) {
void *p = malloc(100);
- memset(p, 100, sizeof(char));
+ memset(p, 100, 1);
free(p);
}
@@ -73,7 +74,7 @@ TEST_BEGIN(test_prof_log_many_threads) {
test_skip_if(!config_prof);
int i;
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
for (i = 0; i < N_THREADS; i++) {
thd_create(&thr_buf[i], &f_thread, NULL);
@@ -82,10 +83,10 @@ TEST_BEGIN(test_prof_log_many_threads) {
for (i = 0; i < N_THREADS; i++) {
thd_join(thr_buf[i], NULL);
}
- assert_zu_eq(prof_log_thr_count(), N_THREADS,
+ expect_zu_eq(prof_log_thr_count(), N_THREADS,
"Wrong number of thread entries");
- assert_rep();
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_rep();
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
@@ -110,19 +111,19 @@ TEST_BEGIN(test_prof_log_many_traces) {
test_skip_if(!config_prof);
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
int i;
- assert_rep();
- assert_log_empty();
+ expect_rep();
+ expect_log_empty();
for (i = 0; i < N_PARAM; i++) {
- assert_rep();
+ expect_rep();
f1();
- assert_rep();
+ expect_rep();
f2();
- assert_rep();
+ expect_rep();
f3();
- assert_rep();
+ expect_rep();
}
/*
* There should be 8 total backtraces: two for malloc/free in f1(), two
@@ -131,16 +132,18 @@ TEST_BEGIN(test_prof_log_many_traces) {
* optimizations such as loop unrolling might generate more call sites.
* So >= 8 traces are expected.
*/
- assert_zu_ge(prof_log_bt_count(), 8,
+ expect_zu_ge(prof_log_bt_count(), 8,
"Expect at least 8 backtraces given sample workload");
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
int
main(void) {
- prof_log_dummy_set(true);
+ if (config_prof) {
+ prof_log_dummy_set(true);
+ }
return test_no_reentrancy(
test_prof_log_many_logs,
test_prof_log_many_traces,