aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKa Ho Ng <khng@FreeBSD.org>2021-08-25 21:30:53 +0000
committerKa Ho Ng <khng@FreeBSD.org>2021-09-03 17:21:10 +0000
commitb31e93bc8d11b3356c1ad9813a5b382a300670b8 (patch)
tree0c40cffb97d84c3abc5ed239abcb67af304cd68d
parentf7fe1ce49e7ae131de831e5ed211e0fb36c66014 (diff)
tmpfs: Move partial page invalidation to a separate helper
The partial page invalidation code is factored out to be a separate helper from tmpfs_reg_resize(). Sponsored by: The FreeBSD Foundation Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D31683 (cherry picked from commit 399be91098adb23aa27ca1228b81a3ad67e8bba2)
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c96
1 files changed, 56 insertions, 40 deletions
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 387bc741e3cf..e746a7455860 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -361,6 +361,57 @@ tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages)
return (1);
}
+static int
+tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
+ int end, boolean_t ignerr)
+{
+ vm_page_t m;
+ int rv, error;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(base >= 0, ("%s: base %d", __func__, base));
+ KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
+ end));
+ error = 0;
+
+retry:
+ m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
+ if (m != NULL) {
+ MPASS(vm_page_all_valid(m));
+ } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
+ m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL |
+ VM_ALLOC_WAITFAIL);
+ if (m == NULL)
+ goto retry;
+ vm_object_pip_add(object, 1);
+ VM_OBJECT_WUNLOCK(object);
+ rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
+ VM_OBJECT_WLOCK(object);
+ vm_object_pip_wakeup(object);
+ if (rv == VM_PAGER_OK) {
+ /*
+ * Since the page was not resident, and therefore not
+ * recently accessed, immediately enqueue it for
+ * asynchronous laundering. The current operation is
+ * not regarded as an access.
+ */
+ vm_page_launder(m);
+ } else {
+ vm_page_free(m);
+ m = NULL;
+ if (!ignerr)
+ error = EIO;
+ }
+ }
+ if (m != NULL) {
+ pmap_zero_page_area(m, base, end - base);
+ vm_page_set_dirty(m);
+ vm_page_xunbusy(m);
+ }
+
+ return (error);
+}
+
void
tmpfs_ref_node(struct tmpfs_node *node)
{
@@ -1662,10 +1713,9 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
vm_object_t uobj;
- vm_page_t m;
vm_pindex_t idx, newpages, oldpages;
off_t oldsize;
- int base, rv;
+ int base, error;
MPASS(vp->v_type == VREG);
MPASS(newsize >= 0);
@@ -1702,45 +1752,11 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
base = newsize & PAGE_MASK;
if (base != 0) {
idx = OFF_TO_IDX(newsize);
-retry:
- m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT);
- if (m != NULL) {
- MPASS(vm_page_all_valid(m));
- } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
- m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
- VM_ALLOC_WAITFAIL);
- if (m == NULL)
- goto retry;
- vm_object_pip_add(uobj, 1);
+ error = tmpfs_partial_page_invalidate(uobj, idx, base,
+ PAGE_SIZE, ignerr);
+ if (error != 0) {
VM_OBJECT_WUNLOCK(uobj);
- rv = vm_pager_get_pages(uobj, &m, 1, NULL,
- NULL);
- VM_OBJECT_WLOCK(uobj);
- vm_object_pip_wakeup(uobj);
- if (rv == VM_PAGER_OK) {
- /*
- * Since the page was not resident,
- * and therefore not recently
- * accessed, immediately enqueue it
- * for asynchronous laundering. The
- * current operation is not regarded
- * as an access.
- */
- vm_page_launder(m);
- } else {
- vm_page_free(m);
- if (ignerr)
- m = NULL;
- else {
- VM_OBJECT_WUNLOCK(uobj);
- return (EIO);
- }
- }
- }
- if (m != NULL) {
- pmap_zero_page_area(m, base, PAGE_SIZE - base);
- vm_page_set_dirty(m);
- vm_page_xunbusy(m);
+ return (error);
}
}