aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2016-11-09 18:48:37 +0000
committerAlan Cox <alc@FreeBSD.org>2016-11-09 18:48:37 +0000
commitebcddc721771e8c70f98b61576242d2e4983682b (patch)
tree6e4503c31681e0975d80a1904e0f094824a2c83d /sys/sys/vmmeter.h
parentbde951447fbed7c2669f80b5c7b6d3d16bb144f8 (diff)
downloadsrc-ebcddc721771e8c70f98b61576242d2e4983682b.tar.gz
src-ebcddc721771e8c70f98b61576242d2e4983682b.zip
Introduce a new page queue, PQ_LAUNDRY, for storing unreferenced, dirty
pages, specificially, dirty pages that have passed once through the inactive queue. A new, dedicated thread is responsible for both deciding when to launder pages and actually laundering them. The new policy uses the relative sizes of the inactive and laundry queues to determine whether to launder pages at a given point in time. In general, this leads to more intelligent swapping behavior, since the laundry thread will avoid pageouts when the marginal benefit of doing so is low. Previously, without a dedicated queue for dirty pages, the page daemon didn't have the information to determine whether pageout provides any benefit to the system. Thus, the previous policy often resulted in small but steadily increasing amounts of swap usage when the system is under memory pressure, even when the inactive queue consisted mostly of clean pages. This change addresses that issue, and also paves the way for some future virtual memory system improvements by removing the last source of object-cached clean pages, i.e., PG_CACHE pages. The new laundry thread sleeps while waiting for a request from the page daemon thread(s). A request is raised by setting the variable vm_laundry_request and waking the laundry thread. We request launderings for two reasons: to try and balance the inactive and laundry queue sizes ("background laundering"), and to quickly make up for a shortage of free pages and clean inactive pages ("shortfall laundering"). When background laundering is requested, the laundry thread computes the number of page daemon wakeups that have taken place since the last laundering. If this number is large enough relative to the ratio of the laundry and (global) inactive queue sizes, we will launder vm_background_launder_target pages at vm_background_launder_rate KB/s. Otherwise, the laundry thread goes back to sleep without doing any work. When scanning the laundry queue during background laundering, reactivated pages are counted towards the laundry thread's target. In contrast, shortfall laundering is requested when an inactive queue scan fails to meet its target. In this case, the laundry thread attempts to launder enough pages to meet v_free_target within 0.5s, which is the inactive queue scan period. A laundry request can be latched while another is currently being serviced. In particular, a shortfall request will immediately preempt a background laundering. This change also redefines the meaning of vm_cnt.v_reactivated and removes the functions vm_page_cache() and vm_page_try_to_cache(). The new meaning of vm_cnt.v_reactivated now better reflects its name. It represents the number of inactive or laundry pages that are returned to the active queue on account of a reference. In collaboration with: markj Reviewed by: kib Tested by: pho Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D8302
Notes
Notes: svn path=/head/; revision=308474
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h24
1 files changed, 22 insertions, 2 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 39f03d0d7075..54291f5f0308 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -75,9 +75,10 @@ struct vmmeter {
u_int v_vnodepgsin; /* (p) vnode_pager pages paged in */
u_int v_vnodepgsout; /* (p) vnode pager pages paged out */
u_int v_intrans; /* (p) intransit blocking page faults */
- u_int v_reactivated; /* (f) pages reactivated from free list */
+ u_int v_reactivated; /* (p) pages reactivated by the pagedaemon */
u_int v_pdwakeups; /* (p) times daemon has awaken from sleep */
u_int v_pdpages; /* (p) pages analyzed by daemon */
+ u_int v_pdshortfalls; /* (p) page reclamation shortfalls */
u_int v_tcached; /* (p) total pages cached */
u_int v_dfree; /* (p) pages freed by daemon */
@@ -96,6 +97,7 @@ struct vmmeter {
u_int v_active_count; /* (q) pages active */
u_int v_inactive_target; /* (c) pages desired inactive */
u_int v_inactive_count; /* (q) pages inactive */
+ u_int v_laundry_count; /* (q) pages eligible for laundering */
u_int v_cache_count; /* (f) pages on cache queue */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
@@ -111,7 +113,6 @@ struct vmmeter {
u_int v_vforkpages; /* (p) VM pages affected by vfork() */
u_int v_rforkpages; /* (p) VM pages affected by rfork() */
u_int v_kthreadpages; /* (p) VM pages affected by fork() by kernel */
- u_int v_spare[2];
};
#ifdef _KERNEL
@@ -184,6 +185,25 @@ vm_paging_needed(void)
vm_pageout_wakeup_thresh);
}
+/*
+ * Return the number of pages we need to launder.
+ * A positive number indicates that we have a shortfall of clean pages.
+ */
+static inline int
+vm_laundry_target(void)
+{
+
+ return (vm_paging_target());
+}
+
+/*
+ * Obtain the value of a per-CPU counter.
+ */
+#define VM_METER_PCPU_CNT(member) \
+ vm_meter_cnt(__offsetof(struct vmmeter, member))
+
+u_int vm_meter_cnt(size_t);
+
#endif
/* systemwide totals computed every five seconds */