aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2017-04-17 17:34:47 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2017-04-17 17:34:47 +0000
commit83c9dea1bac40c7c7cbde4ccb3d747134311ab5a (patch)
tree7679e220e254a60031cd36e1421cb9c844a72521 /sys/sys/vmmeter.h
parent21d5d37ba4c0131d6c141695366e266e32cc3bc1 (diff)
downloadsrc-83c9dea1bac40c7c7cbde4ccb3d747134311ab5a.tar.gz
src-83c9dea1bac40c7c7cbde4ccb3d747134311ab5a.zip
- Remove 'struct vmmeter' from 'struct pcpu', leaving only global vmmeter
in place. To do per-cpu stats, convert all fields that previously were maintained in the vmmeters that sit in pcpus to counter(9). - Since some vmmeter stats may be touched at very early stages of boot, before we have set up UMA and we can do counter_u64_alloc(), provide an early counter mechanism: o Leave one spare uint64_t in struct pcpu, named pc_early_dummy_counter. o Point counter(9) fields of vmmeter to pcpu[0].pc_early_dummy_counter, so that at early stages of boot, before counters are allocated we already point to a counter that can be safely written to. o For sparc64 that required a whole dummy pcpu[MAXCPU] array. Further related changes: - Don't include vmmeter.h into pcpu.h. - vm.stats.vm.v_swappgsout and vm.stats.vm.v_swappgsin changed to 64-bit, to match kernel representation. - struct vmmeter hidden under _KERNEL, and only vmstat(1) is an exclusion. This is based on benno@'s 4-year old patch: https://lists.freebsd.org/pipermail/freebsd-arch/2013-July/014471.html Reviewed by: kib, gallatin, marius, lidl Differential Revision: https://reviews.freebsd.org/D10156
Notes
Notes: svn path=/head/; revision=317061
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h142
1 files changed, 71 insertions, 71 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index b5d0ef148dc8..d769258360d8 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -39,50 +39,84 @@
*/
#define MAXSLP 20
+/* Systemwide totals computed every five seconds. */
+struct vmtotal {
+ int16_t t_rq; /* length of the run queue */
+ int16_t t_dw; /* jobs in ``disk wait'' (neg priority) */
+ int16_t t_pw; /* jobs in page wait */
+ int16_t t_sl; /* jobs sleeping in core */
+ int16_t t_sw; /* swapped out runnable/short block jobs */
+ int32_t t_vm; /* total virtual memory */
+ int32_t t_avm; /* active virtual memory */
+ int32_t t_rm; /* total real memory in use */
+ int32_t t_arm; /* active real memory */
+ int32_t t_vmshr; /* shared virtual memory */
+ int32_t t_avmshr; /* active shared virtual memory */
+ int32_t t_rmshr; /* shared real memory */
+ int32_t t_armshr; /* active shared real memory */
+ int32_t t_free; /* free memory pages */
+};
+
+#if defined(_KERNEL) || defined(_WANT_VMMETER)
+#include <sys/counter.h>
+
/*
* System wide statistics counters.
* Locking:
* a - locked by atomic operations
* c - constant after initialization
* f - locked by vm_page_queue_free_mtx
- * p - locked by being in the PCPU and atomicity respect to interrupts
+ * p - uses counter(9)
* q - changes are synchronized by the corresponding vm_pagequeue lock
*/
struct vmmeter {
/*
* General system activity.
*/
- u_int v_swtch; /* (p) context switches */
- u_int v_trap; /* (p) calls to trap */
- u_int v_syscall; /* (p) calls to syscall() */
- u_int v_intr; /* (p) device interrupts */
- u_int v_soft; /* (p) software interrupts */
+ counter_u64_t v_swtch; /* (p) context switches */
+ counter_u64_t v_trap; /* (p) calls to trap */
+ counter_u64_t v_syscall; /* (p) calls to syscall() */
+ counter_u64_t v_intr; /* (p) device interrupts */
+ counter_u64_t v_soft; /* (p) software interrupts */
/*
* Virtual memory activity.
*/
- u_int v_vm_faults; /* (p) address memory faults */
- u_int v_io_faults; /* (p) page faults requiring I/O */
- u_int v_cow_faults; /* (p) copy-on-writes faults */
- u_int v_cow_optim; /* (p) optimized copy-on-writes faults */
- u_int v_zfod; /* (p) pages zero filled on demand */
- u_int v_ozfod; /* (p) optimized zero fill pages */
- u_int v_swapin; /* (p) swap pager pageins */
- u_int v_swapout; /* (p) swap pager pageouts */
- u_int v_swappgsin; /* (p) swap pager pages paged in */
- u_int v_swappgsout; /* (p) swap pager pages paged out */
- u_int v_vnodein; /* (p) vnode pager pageins */
- u_int v_vnodeout; /* (p) vnode pager pageouts */
- u_int v_vnodepgsin; /* (p) vnode_pager pages paged in */
- u_int v_vnodepgsout; /* (p) vnode pager pages paged out */
- u_int v_intrans; /* (p) intransit blocking page faults */
- u_int v_reactivated; /* (p) pages reactivated by the pagedaemon */
- u_int v_pdwakeups; /* (p) times daemon has awaken from sleep */
- u_int v_pdpages; /* (p) pages analyzed by daemon */
- u_int v_pdshortfalls; /* (p) page reclamation shortfalls */
-
- u_int v_dfree; /* (p) pages freed by daemon */
- u_int v_pfree; /* (p) pages freed by exiting processes */
- u_int v_tfree; /* (p) total pages freed */
+ counter_u64_t v_vm_faults; /* (p) address memory faults */
+ counter_u64_t v_io_faults; /* (p) page faults requiring I/O */
+ counter_u64_t v_cow_faults; /* (p) copy-on-writes faults */
+ counter_u64_t v_cow_optim; /* (p) optimized COW faults */
+ counter_u64_t v_zfod; /* (p) pages zero filled on demand */
+ counter_u64_t v_ozfod; /* (p) optimized zero fill pages */
+ counter_u64_t v_swapin; /* (p) swap pager pageins */
+ counter_u64_t v_swapout; /* (p) swap pager pageouts */
+ counter_u64_t v_swappgsin; /* (p) swap pager pages paged in */
+ counter_u64_t v_swappgsout; /* (p) swap pager pages paged out */
+ counter_u64_t v_vnodein; /* (p) vnode pager pageins */
+ counter_u64_t v_vnodeout; /* (p) vnode pager pageouts */
+ counter_u64_t v_vnodepgsin; /* (p) vnode_pager pages paged in */
+ counter_u64_t v_vnodepgsout; /* (p) vnode pager pages paged out */
+ counter_u64_t v_intrans; /* (p) intransit blocking page faults */
+ counter_u64_t v_reactivated; /* (p) reactivated by the pagedaemon */
+ counter_u64_t v_pdwakeups; /* (p) times daemon has awaken */
+ counter_u64_t v_pdpages; /* (p) pages analyzed by daemon */
+ counter_u64_t v_pdshortfalls; /* (p) page reclamation shortfalls */
+
+ counter_u64_t v_dfree; /* (p) pages freed by daemon */
+ counter_u64_t v_pfree; /* (p) pages freed by processes */
+ counter_u64_t v_tfree; /* (p) total pages freed */
+ /*
+ * Fork/vfork/rfork activity.
+ */
+ counter_u64_t v_forks; /* (p) fork() calls */
+ counter_u64_t v_vforks; /* (p) vfork() calls */
+ counter_u64_t v_rforks; /* (p) rfork() calls */
+ counter_u64_t v_kthreads; /* (p) fork() calls by kernel */
+ counter_u64_t v_forkpages; /* (p) pages affected by fork() */
+ counter_u64_t v_vforkpages; /* (p) pages affected by vfork() */
+ counter_u64_t v_rforkpages; /* (p) pages affected by rfork() */
+ counter_u64_t v_kthreadpages; /* (p) ... and by kernel fork() */
+#define VM_METER_NCOUNTERS \
+ (offsetof(struct vmmeter, v_page_size) / sizeof(counter_u64_t))
/*
* Distribution of page usages.
*/
@@ -100,24 +134,18 @@ struct vmmeter {
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
- /*
- * Fork/vfork/rfork activity.
- */
- u_int v_forks; /* (p) fork() calls */
- u_int v_vforks; /* (p) vfork() calls */
- u_int v_rforks; /* (p) rfork() calls */
- u_int v_kthreads; /* (p) fork() calls by kernel */
- u_int v_forkpages; /* (p) VM pages affected by fork() */
- u_int v_vforkpages; /* (p) VM pages affected by vfork() */
- u_int v_rforkpages; /* (p) VM pages affected by rfork() */
- u_int v_kthreadpages; /* (p) VM pages affected by fork() by kernel */
};
+#endif /* _KERNEL || _WANT_VMMETER */
+
#ifdef _KERNEL
extern struct vmmeter vm_cnt;
-
extern u_int vm_pageout_wakeup_thresh;
+#define VM_CNT_ADD(var, x) counter_u64_add(vm_cnt.var, x)
+#define VM_CNT_INC(var) VM_CNT_ADD(var, 1)
+#define VM_CNT_FETCH(var) counter_u64_fetch(vm_cnt.var)
+
/*
* Return TRUE if we are under our severe low-free-pages threshold
*
@@ -189,33 +217,5 @@ vm_laundry_target(void)
return (vm_paging_target());
}
-
-/*
- * Obtain the value of a per-CPU counter.
- */
-#define VM_METER_PCPU_CNT(member) \
- vm_meter_cnt(__offsetof(struct vmmeter, member))
-
-u_int vm_meter_cnt(size_t);
-
-#endif
-
-/* systemwide totals computed every five seconds */
-struct vmtotal {
- int16_t t_rq; /* length of the run queue */
- int16_t t_dw; /* jobs in ``disk wait'' (neg priority) */
- int16_t t_pw; /* jobs in page wait */
- int16_t t_sl; /* jobs sleeping in core */
- int16_t t_sw; /* swapped out runnable/short block jobs */
- int32_t t_vm; /* total virtual memory */
- int32_t t_avm; /* active virtual memory */
- int32_t t_rm; /* total real memory in use */
- int32_t t_arm; /* active real memory */
- int32_t t_vmshr; /* shared virtual memory */
- int32_t t_avmshr; /* active shared virtual memory */
- int32_t t_rmshr; /* shared real memory */
- int32_t t_armshr; /* active shared real memory */
- int32_t t_free; /* free memory pages */
-};
-
-#endif
+#endif /* _KERNEL */
+#endif /* _SYS_VMMETER_H_ */