aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/sys/vmmeter.h6
-rw-r--r--sys/vm/vm_page.c13
-rw-r--r--sys/vm/vm_phys.h4
3 files changed, 12 insertions, 11 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index bc536f71e7b5..c36a0addb3b2 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -131,7 +131,6 @@ struct vmmeter {
u_int v_free_reserved; /* (c) pages reserved for deadlock */
u_int v_free_target; /* (c) pages desired free */
u_int v_free_min; /* (c) pages desired free */
- u_int v_free_count; /* (f) pages free */
u_int v_inactive_target; /* (c) pages desired inactive */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
@@ -141,6 +140,7 @@ struct vmmeter {
u_int v_inactive_count VMMETER_ALIGNED; /* (a) pages inactive */
u_int v_laundry_count VMMETER_ALIGNED; /* (a) pages eligible for
laundering */
+ u_int v_free_count VMMETER_ALIGNED; /* (a) pages free */
};
#endif /* _KERNEL || _WANT_VMMETER */
@@ -208,10 +208,10 @@ vm_paging_target(void)
* Returns TRUE if the pagedaemon needs to be woken up.
*/
static inline int
-vm_paging_needed(void)
+vm_paging_needed(u_int free_count)
{
- return (vm_cnt.v_free_count < vm_pageout_wakeup_thresh);
+ return (free_count < vm_pageout_wakeup_thresh);
}
/*
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 18e83635d2e2..31553bdc380a 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1588,6 +1588,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
{
vm_page_t m;
int flags, req_class;
+ u_int free_count;
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
@@ -1655,7 +1656,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
* At this point we had better have found a good page.
*/
KASSERT(m != NULL, ("missing page"));
- vm_phys_freecnt_adj(m, -1);
+ free_count = vm_phys_freecnt_adj(m, -1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_alloc_check(m);
@@ -1713,7 +1714,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
*/
- if (vm_paging_needed())
+ if (vm_paging_needed(free_count))
pagedaemon_wakeup();
return (m);
@@ -1899,7 +1900,7 @@ retry:
pmap_page_set_memattr(m, memattr);
pindex++;
}
- if (vm_paging_needed())
+ if (vm_paging_needed(vm_cnt.v_free_count))
pagedaemon_wakeup();
return (m_ret);
}
@@ -1948,7 +1949,7 @@ vm_page_t
vm_page_alloc_freelist(int flind, int req)
{
vm_page_t m;
- u_int flags;
+ u_int flags, free_count;
int req_class;
req_class = req & VM_ALLOC_CLASS_MASK;
@@ -1980,7 +1981,7 @@ vm_page_alloc_freelist(int flind, int req)
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
- vm_phys_freecnt_adj(m, -1);
+ free_count = vm_phys_freecnt_adj(m, -1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_alloc_check(m);
@@ -2002,7 +2003,7 @@ vm_page_alloc_freelist(int flind, int req)
}
/* Unmanaged pages don't use "act_count". */
m->oflags = VPO_UNMANAGED;
- if (vm_paging_needed())
+ if (vm_paging_needed(free_count))
pagedaemon_wakeup();
return (m);
}
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index ef0a95e4933f..4a08be7a42d1 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -112,13 +112,13 @@ vm_phys_domain(vm_page_t m)
#endif
}
-static inline void
+static inline u_int
vm_phys_freecnt_adj(vm_page_t m, int adj)
{
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- vm_cnt.v_free_count += adj;
vm_phys_domain(m)->vmd_free_count += adj;
+ return (vm_cnt.v_free_count += adj);
}
#endif /* _KERNEL */