aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
committerAttilio Rao <attilio@FreeBSD.org>2013-08-04 21:07:24 +0000
commit3b6714cacb30c4ded64d5580426cf7acc9dfcd32 (patch)
tree66d78aa520f99833b11e6eca180f8fa7216b21f8 /sys/kern
parentb9fdaa9b19b3382d087c564ed1661f7581d7155e (diff)
downloadsrc-3b6714cacb30c4ded64d5580426cf7acc9dfcd32.tar.gz
src-3b6714cacb30c4ded64d5580426cf7acc9dfcd32.zip
The page hold mechanism is fast but it has couple of fallouts:
- It does not let pages respect the LRU policy - It bloats the active/inactive queues of few pages Try to avoid it as much as possible with the long-term target to completely remove it. Use the soft-busy mechanism to protect page content accesses during short-term operations (like uiomove_fromphys()). After this change only vm_fault_quick_hold_pages() is still using the hold mechanism for page content access. There is an additional complexity there as the quick path cannot immediately access the page object to busy the page and the slow path cannot however busy more than one page a time (to avoid deadlocks). Fixing such primitive can bring to complete removal of the page hold mechanism. Sponsored by: EMC / Isilon storage division Discussed with: alc Reviewed by: jeff Tested by: pho
Notes
Notes: svn path=/head/; revision=253939
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_elf.c6
-rw-r--r--sys/kern/kern_exec.c4
-rw-r--r--sys/kern/sys_process.c11
3 files changed, 11 insertions, 10 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 61a2aefe4e06..9533635e7f9a 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -378,7 +378,7 @@ __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
off = offset - trunc_page(offset);
error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
end - start);
- vm_imgact_unmap_page(sf);
+ vm_imgact_unmap_page(object, sf);
if (error) {
return (KERN_FAILURE);
}
@@ -433,7 +433,7 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
sz = PAGE_SIZE - off;
error = copyout((caddr_t)sf_buf_kva(sf) + off,
(caddr_t)start, sz);
- vm_imgact_unmap_page(sf);
+ vm_imgact_unmap_page(object, sf);
if (error) {
return (KERN_FAILURE);
}
@@ -553,7 +553,7 @@ __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
trunc_page(offset + filsz);
error = copyout((caddr_t)sf_buf_kva(sf) + off,
(caddr_t)map_addr, copy_len);
- vm_imgact_unmap_page(sf);
+ vm_imgact_unmap_page(object, sf);
if (error) {
return (error);
}
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index c0e143595396..0c8b0c112e2f 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -973,7 +973,7 @@ exec_map_first_page(imgp)
vm_page_wakeup(ma[0]);
}
vm_page_lock(ma[0]);
- vm_page_hold(ma[0]);
+ vm_page_wire(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
@@ -994,7 +994,7 @@ exec_unmap_first_page(imgp)
sf_buf_free(imgp->firstpage);
imgp->firstpage = NULL;
vm_page_lock(m);
- vm_page_unhold(m);
+ vm_page_unwire(m, 0);
vm_page_unlock(m);
}
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 5508dcff15fb..33fac71c3f01 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -263,6 +263,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
writing = uio->uio_rw == UIO_WRITE;
reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
+ fault_flags |= VM_FAULT_IOBUSY;
/*
* Only map in one page at a time. We don't have to, but it
@@ -287,9 +288,9 @@ proc_rwmem(struct proc *p, struct uio *uio)
len = min(PAGE_SIZE - page_offset, uio->uio_resid);
/*
- * Fault and hold the page on behalf of the process.
+ * Fault and busy the page on behalf of the process.
*/
- error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m);
+ error = vm_fault_handle(map, pageno, reqprot, fault_flags, &m);
if (error != KERN_SUCCESS) {
if (error == KERN_RESOURCE_SHORTAGE)
error = ENOMEM;
@@ -315,9 +316,9 @@ proc_rwmem(struct proc *p, struct uio *uio)
/*
* Release the page.
*/
- vm_page_lock(m);
- vm_page_unhold(m);
- vm_page_unlock(m);
+ VM_OBJECT_WLOCK(m->object);
+ vm_page_io_finish(m);
+ VM_OBJECT_WUNLOCK(m->object);
} while (error == 0 && uio->uio_resid > 0);