aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2013-08-13 21:56:16 +0000
committerJeff Roberson <jeff@FreeBSD.org>2013-08-13 21:56:16 +0000
commitd9e232109f0de1a30cffdabe17140ca35016eeec (patch)
tree47081a4e187db2ed514b7a42997e9cb33a059764 /sys/sys/vmmeter.h
parent086d23cfd3c3077a278634fe411425b53b81acff (diff)
downloadsrc-d9e232109f0de1a30cffdabe17140ca35016eeec.tar.gz
src-d9e232109f0de1a30cffdabe17140ca35016eeec.zip
Improve pageout flow control to wakeup more frequently and do less work while
maintaining better LRU of active pages. - Change v_free_target to include the quantity previously represented by v_cache_min so we don't need to add them together everywhere we use them. - Add a pageout_wakeup_thresh that sets the free page count trigger for waking the page daemon. Set this 10% above v_free_min so we wakeup before any phase transitions in vm users. - Adjust down v_free_target now that we're willing to accept more pagedaemon wakeups. This means we process fewer pages in one iteration as well, leading to shorter lock hold times and less overall disruption. - Eliminate vm_pageout_page_stats(). This was a minor variation on the PQ_ACTIVE segment of the normal pageout daemon. Instead we now process 1 / vm_pageout_update_period pages every second. This causes us to visit the whole active list every 60 seconds. Previously we would only maintain the active LRU when we were short on pages which would mean it could be woefully out of date. Reviewed by: alc (slight variant of this) Discussed with: alc, kib, jhb Sponsored by: EMC / Isilon Storage Division
Notes
Notes: svn path=/head/; revision=254304
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h14
1 files changed, 5 insertions, 9 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 59b430d53206..d2ad920a4066 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -98,7 +98,7 @@ struct vmmeter {
u_int v_inactive_count; /* (q) pages inactive */
u_int v_cache_count; /* (f) pages on cache queue */
u_int v_cache_min; /* (c) min pages desired on cache queue */
- u_int v_cache_max; /* (c) max pages in cached obj */
+ u_int v_cache_max; /* (c) max pages in cached obj (unused) */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
@@ -118,6 +118,8 @@ struct vmmeter {
extern struct vmmeter cnt;
+extern int vm_pageout_wakeup_thresh;
+
/*
* Return TRUE if we are under our severe low-free-pages threshold
*
@@ -170,10 +172,7 @@ static __inline
int
vm_paging_target(void)
{
- return (
- (cnt.v_free_target + cnt.v_cache_min) -
- (cnt.v_free_count + cnt.v_cache_count)
- );
+ return (cnt.v_free_target - (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -184,10 +183,7 @@ static __inline
int
vm_paging_needed(void)
{
- return (
- (cnt.v_free_reserved + cnt.v_cache_min) >
- (cnt.v_free_count + cnt.v_cache_count)
- );
+ return (cnt.v_free_count + cnt.v_cache_count < vm_pageout_wakeup_thresh);
}
#endif