aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/uipc_shm.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
commit63e9755548e4feebf798686ab8bce0cdaaaf7b46 (patch)
tree73004f9ecd43d157304327e6d0feb4ddf93012af /sys/kern/uipc_shm.c
parentf44e7436797617b6c6a42a280befb312f1ebf50f (diff)
downloadsrc-63e9755548e4feebf798686ab8bce0cdaaaf7b46.tar.gz
src-63e9755548e4feebf798686ab8bce0cdaaaf7b46.zip
(1/6) Replace busy checks with acquires where it is trival to do so.
This is the first in a series of patches that promotes the page busy field to a first class lock that no longer requires the object lock for consistency. Reviewed by: kib, markj Tested by: pho Sponsored by: Netflix, Intel Differential Revision: https://reviews.freebsd.org/D21548
Notes
Notes: svn path=/head/; revision=353535
Diffstat (limited to 'sys/kern/uipc_shm.c')
-rw-r--r--sys/kern/uipc_shm.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index e0c6be1f0fab..b0aaac0659a5 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -457,10 +457,9 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
if (base != 0) {
idx = OFF_TO_IDX(length);
retry:
- m = vm_page_lookup(object, idx);
+ m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
if (m != NULL) {
- if (vm_page_sleep_if_busy(m, "shmtrc"))
- goto retry;
+ MPASS(m->valid == VM_PAGE_BITS_ALL);
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
m = vm_page_alloc(object, idx,
VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
@@ -478,7 +477,6 @@ retry:
* as an access.
*/
vm_page_launder(m);
- vm_page_xunbusy(m);
} else {
vm_page_free(m);
VM_OBJECT_WUNLOCK(object);
@@ -490,6 +488,7 @@ retry:
KASSERT(m->valid == VM_PAGE_BITS_ALL,
("shm_dotruncate: page %p is invalid", m));
vm_page_dirty(m);
+ vm_page_xunbusy(m);
vm_pager_page_unswapped(m);
}
}