aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2019-08-28 16:08:06 +0000
committerMark Johnston <markj@FreeBSD.org>2019-08-28 16:08:06 +0000
commitb5d239cb970f97e96deed8bcde7d689b61e47bcf (patch)
treeb09b7761b1c2c208c546243ef174b5a34a5dd04b
parentd8deeff04d58117f45068bae81935973eed3f7e7 (diff)
downloadsrc-b5d239cb970f97e96deed8bcde7d689b61e47bcf.tar.gz
src-b5d239cb970f97e96deed8bcde7d689b61e47bcf.zip
Wire pages in vm_page_grab() when appropriate.
uiomove_object_page() and exec_map_first_page() would previously wire a page after having grabbed it. Ask vm_page_grab() to perform the wiring instead: this removes some redundant code, and is cheaper in the case where the requested page is not resident since the page allocator can be asked to initialize the page as wired, whereas a separate vm_page_wire() call requires the page lock. In vm_imgact_hold_page(), use vm_page_unwire_noq() instead of vm_page_unwire(PQ_NONE). The latter ensures that the page is dequeued before returning, but this is unnecessary since vm_page_free() will trigger a batched dequeue of the page. Reviewed by: alc, kib Tested by: pho (part of a larger patch) MFC after: 1 week Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21440
Notes
Notes: svn path=/head/; revision=351569
-rw-r--r--sys/kern/kern_exec.c9
-rw-r--r--sys/kern/uipc_shm.c7
-rw-r--r--sys/vm/vm_glue.c2
3 files changed, 9 insertions, 9 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index e148e0b1f20d..01472c4dc695 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -972,11 +972,13 @@ exec_map_first_page(struct image_params *imgp)
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif
- ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
+ ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
+ VM_ALLOC_WIRED);
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
vm_page_xbusy(ma[0]);
if (!vm_pager_has_page(object, 0, NULL, &after)) {
vm_page_lock(ma[0]);
+ vm_page_unwire_noq(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
@@ -1004,6 +1006,8 @@ exec_map_first_page(struct image_params *imgp)
if (rv != VM_PAGER_OK) {
for (i = 0; i < initial_pagein; i++) {
vm_page_lock(ma[i]);
+ if (i == 0)
+ vm_page_unwire_noq(ma[i]);
vm_page_free(ma[i]);
vm_page_unlock(ma[i]);
}
@@ -1014,9 +1018,6 @@ exec_map_first_page(struct image_params *imgp)
for (i = 1; i < initial_pagein; i++)
vm_page_readahead_finish(ma[i]);
}
- vm_page_lock(ma[0]);
- vm_page_wire(ma[0]);
- vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index d393f7818387..3980453b1366 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -188,7 +188,8 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
* lock to page out tobj's pages because tobj is a OBJT_SWAP
* type object.
*/
- m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
+ m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
+ VM_ALLOC_WIRED);
if (m->valid != VM_PAGE_BITS_ALL) {
vm_page_xbusy(m);
if (vm_pager_has_page(obj, idx, NULL, NULL)) {
@@ -198,6 +199,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
"uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
obj, idx, m->valid, rv);
vm_page_lock(m);
+ vm_page_unwire_noq(m);
vm_page_free(m);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(obj);
@@ -207,9 +209,6 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
vm_page_zero_invalid(m, TRUE);
vm_page_xunbusy(m);
}
- vm_page_lock(m);
- vm_page_wire(m);
- vm_page_unlock(m);
VM_OBJECT_WUNLOCK(obj);
error = uiomove_fromphys(&m, offset, tlen, uio);
if (uio->uio_rw == UIO_WRITE && error == 0) {
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 4a7bbc9770e9..2ff1940f1aa6 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -230,7 +230,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
- vm_page_unwire(m, PQ_NONE);
+ vm_page_unwire_noq(m);
vm_page_free(m);
vm_page_unlock(m);
m = NULL;