aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2019-07-29 22:01:28 +0000
committerMark Johnston <markj@FreeBSD.org>2019-07-29 22:01:28 +0000
commit98549e2dc6fb0c38fef2a5357b10c4eb99674d9d (patch)
tree936bb59b20e13c4792fdc1b407ad6aee087773c8 /sys/kern/vfs_bio.c
parent724450761630cc0b3d8991ec2de00a8ceb507384 (diff)
downloadsrc-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.tar.gz
src-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.zip
Centralize the logic in vfs_vmio_unwire() and sendfile_free_page().
Both of these functions atomically unwire a page, optionally attempt to free the page, and enqueue or requeue the page. Add functions vm_page_release() and vm_page_release_locked() to perform the same task. The latter must be called with the page's object lock held. As a side effect of this refactoring, the buffer cache will no longer attempt to free mapped pages when completing direct I/O. This is consistent with the handling of pages by sendfile(SF_NOCACHE). Reviewed by: alc, kib MFC after: 2 weeks Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D20986
Notes
Notes: svn path=/head/; revision=350431
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c62
1 files changed, 15 insertions, 47 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index e6cb88447061..9dda13677a30 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2895,47 +2895,6 @@ vfs_vmio_iodone(struct buf *bp)
}
/*
- * Unwire a page held by a buf and either free it or update the page queues to
- * reflect its recent use.
- */
-static void
-vfs_vmio_unwire(struct buf *bp, vm_page_t m)
-{
- bool freed;
-
- vm_page_lock(m);
- if (vm_page_unwire_noq(m)) {
- if ((bp->b_flags & B_DIRECT) != 0)
- freed = vm_page_try_to_free(m);
- else
- freed = false;
- if (!freed) {
- /*
- * Use a racy check of the valid bits to determine
- * whether we can accelerate reclamation of the page.
- * The valid bits will be stable unless the page is
- * being mapped or is referenced by multiple buffers,
- * and in those cases we expect races to be rare. At
- * worst we will either accelerate reclamation of a
- * valid page and violate LRU, or unnecessarily defer
- * reclamation of an invalid page.
- *
- * The B_NOREUSE flag marks data that is not expected to
- * be reused, so accelerate reclamation in that case
- * too. Otherwise, maintain LRU.
- */
- if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0)
- vm_page_deactivate_noreuse(m);
- else if (vm_page_active(m))
- vm_page_reference(m);
- else
- vm_page_deactivate(m);
- }
- }
- vm_page_unlock(m);
-}
-
-/*
* Perform page invalidation when a buffer is released. The fully invalid
* pages will be reclaimed later in vfs_vmio_truncate().
*/
@@ -2944,7 +2903,7 @@ vfs_vmio_invalidate(struct buf *bp)
{
vm_object_t obj;
vm_page_t m;
- int i, resid, poffset, presid;
+ int flags, i, resid, poffset, presid;
if (buf_mapped(bp)) {
BUF_CHECK_MAPPED(bp);
@@ -2963,6 +2922,7 @@ vfs_vmio_invalidate(struct buf *bp)
*
* See man buf(9) for more information
*/
+ flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
obj = bp->b_bufobj->bo_object;
resid = bp->b_bufsize;
poffset = bp->b_offset & PAGE_MASK;
@@ -2984,7 +2944,7 @@ vfs_vmio_invalidate(struct buf *bp)
}
if (pmap_page_wired_mappings(m) == 0)
vm_page_set_invalid(m, poffset, presid);
- vfs_vmio_unwire(bp, m);
+ vm_page_release_locked(m, flags);
resid -= presid;
poffset = 0;
}
@@ -3000,7 +2960,7 @@ vfs_vmio_truncate(struct buf *bp, int desiredpages)
{
vm_object_t obj;
vm_page_t m;
- int i;
+ int flags, i;
if (bp->b_npages == desiredpages)
return;
@@ -3015,14 +2975,22 @@ vfs_vmio_truncate(struct buf *bp, int desiredpages)
/*
* The object lock is needed only if we will attempt to free pages.
*/
- obj = (bp->b_flags & B_DIRECT) != 0 ? bp->b_bufobj->bo_object : NULL;
- if (obj != NULL)
+ flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
+ if ((bp->b_flags & B_DIRECT) != 0) {
+ flags |= VPR_TRYFREE;
+ obj = bp->b_bufobj->bo_object;
VM_OBJECT_WLOCK(obj);
+ } else {
+ obj = NULL;
+ }
for (i = desiredpages; i < bp->b_npages; i++) {
m = bp->b_pages[i];
KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
bp->b_pages[i] = NULL;
- vfs_vmio_unwire(bp, m);
+ if (obj != NULL)
+ vm_page_release_locked(m, flags);
+ else
+ vm_page_release(m, flags);
}
if (obj != NULL)
VM_OBJECT_WUNLOCK(obj);