aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-09-10 19:00:38 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-09-10 19:00:38 +0000
commit1c0b34417b0290ab82fabbbbfe354883a08227fa (patch)
tree33adc98982024174c37e051bf56974d08bb89639 /sys/sys/vmmeter.h
parente1275c6805dbbe2d5f63b72c39d01559c303fe92 (diff)
downloadsrc-1c0b34417b0290ab82fabbbbfe354883a08227fa.tar.gz
src-1c0b34417b0290ab82fabbbbfe354883a08227fa.zip
Move vmmeter atomic counters into dedicated cache lines
Prior to the change they were subject to extreme false sharing. In particular this change shaves about 3 seconds real time of -j 80 buildkernel. Reviewed by: alc, markj Differential Revision: https://reviews.freebsd.org/D12281
Notes
Notes: svn path=/head/; revision=323393
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h15
1 files changed, 11 insertions, 4 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index d769258360d8..bc536f71e7b5 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -60,6 +60,12 @@ struct vmtotal {
#if defined(_KERNEL) || defined(_WANT_VMMETER)
#include <sys/counter.h>
+#ifdef _KERNEL
+#define VMMETER_ALIGNED __aligned(CACHE_LINE_SIZE)
+#else
+#define VMMETER_ALIGNED
+#endif
+
/*
* System wide statistics counters.
* Locking:
@@ -126,14 +132,15 @@ struct vmmeter {
u_int v_free_target; /* (c) pages desired free */
u_int v_free_min; /* (c) pages desired free */
u_int v_free_count; /* (f) pages free */
- u_int v_wire_count; /* (a) pages wired down */
- u_int v_active_count; /* (q) pages active */
u_int v_inactive_target; /* (c) pages desired inactive */
- u_int v_inactive_count; /* (q) pages inactive */
- u_int v_laundry_count; /* (q) pages eligible for laundering */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
+ u_int v_wire_count VMMETER_ALIGNED; /* (a) pages wired down */
+ u_int v_active_count VMMETER_ALIGNED; /* (a) pages active */
+ u_int v_inactive_count VMMETER_ALIGNED; /* (a) pages inactive */
+ u_int v_laundry_count VMMETER_ALIGNED; /* (a) pages eligible for
+ laundering */
};
#endif /* _KERNEL || _WANT_VMMETER */