aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2018-02-12 22:53:00 +0000
committerJeff Roberson <jeff@FreeBSD.org>2018-02-12 22:53:00 +0000
commite958ad4cf3cc9e2624eba7634b972c18d5079a83 (patch)
treea8e1202a0bea71fdfbe01e46adc415772d0f708c /sys/sys/vmmeter.h
parent487340b00406d7e8fc8e4963e130c3919102076b (diff)
downloadsrc-e958ad4cf3cc9e2624eba7634b972c18d5079a83.tar.gz
src-e958ad4cf3cc9e2624eba7634b972c18d5079a83.zip
Make v_wire_count a per-cpu counter(9) counter. This eliminates a
significant source of cache line contention from vm_page_alloc(). Use accessors and vm_page_unwire_noq() so that the mechanism can be easily changed in the future. Reviewed by: markj Discussed with: kib, glebius Tested by: pho (earlier version) Sponsored by: Netflix, Dell/EMC Isilon Differential Revision: https://reviews.freebsd.org/D14273
Notes
Notes: svn path=/head/; revision=329187
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h22
1 files changed, 21 insertions, 1 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index de18d7702400..408cd862c357 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -125,6 +125,7 @@ struct vmmeter {
counter_u64_t v_vforkpages; /* (p) pages affected by vfork() */
counter_u64_t v_rforkpages; /* (p) pages affected by rfork() */
counter_u64_t v_kthreadpages; /* (p) ... and by kernel fork() */
+ counter_u64_t v_wire_count; /* (p) pages wired down */
#define VM_METER_NCOUNTERS \
(offsetof(struct vmmeter, v_page_size) / sizeof(counter_u64_t))
/*
@@ -139,7 +140,6 @@ struct vmmeter {
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
- u_int v_wire_count VMMETER_ALIGNED; /* (a) pages wired down */
};
#endif /* _KERNEL || _WANT_VMMETER */
@@ -155,7 +155,27 @@ extern domainset_t vm_severe_domains;
#define VM_CNT_INC(var) VM_CNT_ADD(var, 1)
#define VM_CNT_FETCH(var) counter_u64_fetch(vm_cnt.var)
+static inline void
+vm_wire_add(int cnt)
+{
+
+ VM_CNT_ADD(v_wire_count, cnt);
+}
+
+static inline void
+vm_wire_sub(int cnt)
+{
+
+ VM_CNT_ADD(v_wire_count, -cnt);
+}
+
u_int vm_free_count(void);
+static inline u_int
+vm_wire_count(void)
+{
+
+ return (VM_CNT_FETCH(v_wire_count));
+}
/*
* Return TRUE if we are under our severe low-free-pages threshold