aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2021-05-14 19:01:32 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2021-05-22 18:22:30 +0000
commite9757b0c94c02db1a2a154089af3ec0e4e8b3b1a (patch)
treee5491e17f7f4eb1274f238de4f12c573801652d5
parentdf6533f9b40ab5480aec3990561f4f50c94351d1 (diff)
downloadsrc-e9757b0c94c02db1a2a154089af3ec0e4e8b3b1a.tar.gz
src-e9757b0c94c02db1a2a154089af3ec0e4e8b3b1a.zip
vfs: refactor vdrop
In particular move vunlazy into its own routine. (cherry picked from commit cc6f46ac2fd5d910e632fced3f21d0b0f53030d8)
-rw-r--r--sys/kern/vfs_subr.c85
1 files changed, 40 insertions, 45 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 53c9c12be34c..270dab02bf39 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -3059,6 +3059,31 @@ vlazy(struct vnode *vp)
mtx_unlock(&mp->mnt_listmtx);
}
+static void
+vunlazy(struct vnode *vp)
+{
+ struct mount *mp;
+
+ ASSERT_VI_LOCKED(vp, __func__);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+
+ mp = vp->v_mount;
+ mtx_lock(&mp->mnt_listmtx);
+ VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
+ /*
+ * Don't remove the vnode from the lazy list if another thread
+ * has increased the hold count. It may have re-enqueued the
+ * vnode to the lazy list and is now responsible for its
+ * removal.
+ */
+ if (vp->v_holdcnt == 0) {
+ vp->v_mflag &= ~VMP_LAZYLIST;
+ TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
+ mp->mnt_lazyvnodelistsize--;
+ }
+ mtx_unlock(&mp->mnt_listmtx);
+}
+
/*
* This routine is only meant to be called from vgonel prior to dooming
* the vnode.
@@ -3514,42 +3539,6 @@ vdbatch_dequeue(struct vnode *vp)
* there is at least one resident non-cached page, the vnode cannot
* leave the active list without the page cleanup done.
*/
-static void
-vdrop_deactivate(struct vnode *vp)
-{
- struct mount *mp;
-
- ASSERT_VI_LOCKED(vp, __func__);
- /*
- * Mark a vnode as free: remove it from its active list
- * and put it up for recycling on the freelist.
- */
- VNASSERT(!VN_IS_DOOMED(vp), vp,
- ("vdrop: returning doomed vnode"));
- VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
- ("vnode with VI_OWEINACT set"));
- VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp,
- ("vnode with VI_DEFINACT set"));
- if (vp->v_mflag & VMP_LAZYLIST) {
- mp = vp->v_mount;
- mtx_lock(&mp->mnt_listmtx);
- VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST"));
- /*
- * Don't remove the vnode from the lazy list if another thread
- * has increased the hold count. It may have re-enqueued the
- * vnode to the lazy list and is now responsible for its
- * removal.
- */
- if (vp->v_holdcnt == 0) {
- vp->v_mflag &= ~VMP_LAZYLIST;
- TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
- mp->mnt_lazyvnodelistsize--;
- }
- mtx_unlock(&mp->mnt_listmtx);
- }
- vdbatch_enqueue(vp);
-}
-
static void __noinline
vdropl_final(struct vnode *vp)
{
@@ -3599,17 +3588,23 @@ vdropl(struct vnode *vp)
VI_UNLOCK(vp);
return;
}
- if (!VN_IS_DOOMED(vp)) {
- vfs_freevnodes_inc();
- vdrop_deactivate(vp);
- /*
- * Also unlocks the interlock. We can't assert on it as we
- * released our hold and by now the vnode might have been
- * freed.
- */
+ VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp);
+ VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
+ if (VN_IS_DOOMED(vp)) {
+ vdropl_final(vp);
return;
}
- vdropl_final(vp);
+
+ vfs_freevnodes_inc();
+ if (vp->v_mflag & VMP_LAZYLIST) {
+ vunlazy(vp);
+ }
+ /*
+ * Also unlocks the interlock. We can't assert on it as we
+ * released our hold and by now the vnode might have been
+ * freed.
+ */
+ vdbatch_enqueue(vp);
}
/*