aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
commit63e9755548e4feebf798686ab8bce0cdaaaf7b46 (patch)
tree73004f9ecd43d157304327e6d0feb4ddf93012af /sys/kern
parentf44e7436797617b6c6a42a280befb312f1ebf50f (diff)
downloadsrc-63e9755548e4feebf798686ab8bce0cdaaaf7b46.tar.gz
src-63e9755548e4feebf798686ab8bce0cdaaaf7b46.zip
(1/6) Replace busy checks with acquires where it is trival to do so.
This is the first in a series of patches that promotes the page busy field to a first class lock that no longer requires the object lock for consistency. Reviewed by: kib, markj Tested by: pho Sponsored by: Netflix, Intel Differential Revision: https://reviews.freebsd.org/D21548
Notes
Notes: svn path=/head/; revision=353535
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exec.c6
-rw-r--r--sys/kern/uipc_shm.c7
-rw-r--r--sys/kern/vfs_bio.c48
-rw-r--r--sys/kern/vfs_cluster.c18
4 files changed, 40 insertions, 39 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 5abced8fbbaa..e1c647ca4f00 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -976,10 +976,14 @@ exec_map_first_page(struct image_params *imgp)
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif
+retry:
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED);
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
- vm_page_xbusy(ma[0]);
+ if (vm_page_busy_acquire(ma[0], VM_ALLOC_WAITFAIL) == 0) {
+ vm_page_unwire_noq(ma[0]);
+ goto retry;
+ }
if (!vm_pager_has_page(object, 0, NULL, &after)) {
if (vm_page_unwire_noq(ma[0]))
vm_page_free(ma[0]);
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index e0c6be1f0fab..b0aaac0659a5 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -457,10 +457,9 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
if (base != 0) {
idx = OFF_TO_IDX(length);
retry:
- m = vm_page_lookup(object, idx);
+ m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
if (m != NULL) {
- if (vm_page_sleep_if_busy(m, "shmtrc"))
- goto retry;
+ MPASS(m->valid == VM_PAGE_BITS_ALL);
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
m = vm_page_alloc(object, idx,
VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
@@ -478,7 +477,6 @@ retry:
* as an access.
*/
vm_page_launder(m);
- vm_page_xunbusy(m);
} else {
vm_page_free(m);
VM_OBJECT_WUNLOCK(object);
@@ -490,6 +488,7 @@ retry:
KASSERT(m->valid == VM_PAGE_BITS_ALL,
("shm_dotruncate: page %p is invalid", m));
vm_page_dirty(m);
+ vm_page_xunbusy(m);
vm_pager_page_unswapped(m);
}
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 8e14592b402d..baeaf2e32dc0 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2945,10 +2945,10 @@ vfs_vmio_invalidate(struct buf *bp)
presid = resid > (PAGE_SIZE - poffset) ?
(PAGE_SIZE - poffset) : resid;
KASSERT(presid >= 0, ("brelse: extra page"));
- while (vm_page_xbusied(m))
- vm_page_sleep_if_xbusy(m, "mbncsh");
+ vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
if (pmap_page_wired_mappings(m) == 0)
vm_page_set_invalid(m, poffset, presid);
+ vm_page_sunbusy(m);
vm_page_release_locked(m, flags);
resid -= presid;
poffset = 0;
@@ -3651,7 +3651,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
("vfs_clean_pages_dirty_buf: no buffer offset"));
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
- vfs_drain_busy_pages(bp);
+ vfs_busy_pages_acquire(bp);
vfs_setdirty_locked_object(bp);
for (i = 0; i < bp->b_npages; i++) {
noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
@@ -3663,6 +3663,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
+ vfs_busy_pages_release(bp);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
@@ -4559,28 +4560,25 @@ vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
}
/*
- * Ensure that all buffer pages are not exclusive busied. If any page is
- * exclusive busy, drain it.
+ * Acquire a shared busy on all pages in the buf.
*/
void
-vfs_drain_busy_pages(struct buf *bp)
+vfs_busy_pages_acquire(struct buf *bp)
{
- vm_page_t m;
- int i, last_busied;
+ int i;
VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
- last_busied = 0;
- for (i = 0; i < bp->b_npages; i++) {
- m = bp->b_pages[i];
- if (vm_page_xbusied(m)) {
- for (; last_busied < i; last_busied++)
- vm_page_sbusy(bp->b_pages[last_busied]);
- while (vm_page_xbusied(m)) {
- vm_page_sleep_if_xbusy(m, "vbpage");
- }
- }
- }
- for (i = 0; i < last_busied; i++)
+ for (i = 0; i < bp->b_npages; i++)
+ vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
+}
+
+void
+vfs_busy_pages_release(struct buf *bp)
+{
+ int i;
+
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
+ for (i = 0; i < bp->b_npages; i++)
vm_page_sunbusy(bp->b_pages[i]);
}
@@ -4613,17 +4611,17 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
VM_OBJECT_WLOCK(obj);
- vfs_drain_busy_pages(bp);
+ if ((bp->b_flags & B_CLUSTER) == 0) {
+ vm_object_pip_add(obj, bp->b_npages);
+ vfs_busy_pages_acquire(bp);
+ }
if (bp->b_bufsize != 0)
vfs_setdirty_locked_object(bp);
bogus = false;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
+ vm_page_assert_sbusied(m);
- if ((bp->b_flags & B_CLUSTER) == 0) {
- vm_object_pip_add(obj, 1);
- vm_page_sbusy(m);
- }
/*
* When readying a buffer for a read ( i.e
* clear_modify == 0 ), it is important to do
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 21efe900eea0..6a87dd28d57b 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -418,11 +418,9 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
if (i == 0) {
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
- vfs_drain_busy_pages(tbp);
vm_object_pip_add(tbp->b_bufobj->bo_object,
tbp->b_npages);
- for (k = 0; k < tbp->b_npages; k++)
- vm_page_sbusy(tbp->b_pages[k]);
+ vfs_busy_pages_acquire(tbp);
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
} else {
if ((bp->b_npages * PAGE_SIZE) +
@@ -470,10 +468,9 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
if ((tbp->b_pages[j]->valid &
vm_page_bits(toff, tinc)) != 0)
break;
- if (vm_page_xbusied(tbp->b_pages[j]))
+ if (vm_page_trysbusy(tbp->b_pages[j]) == 0)
break;
vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
- vm_page_sbusy(tbp->b_pages[j]);
off += tinc;
tsize -= tinc;
}
@@ -991,11 +988,14 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
if (i == 0) {
- vfs_drain_busy_pages(tbp);
+ vfs_busy_pages_acquire(tbp);
} else { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
- if (vm_page_xbusied(m)) {
+ if (vm_page_trysbusy(m) == 0) {
+ for (j--; j >= 0; j--)
+ vm_page_sunbusy(
+ tbp->b_pages[j]);
VM_OBJECT_WUNLOCK(
tbp->b_object);
bqrelse(tbp);
@@ -1003,10 +1003,10 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
}
}
}
+ vm_object_pip_add(tbp->b_bufobj->bo_object,
+ tbp->b_npages);
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
- vm_page_sbusy(m);
- vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {
bp->b_pages[bp->b_npages] = m;