aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/mount.h
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-01-13 02:34:02 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-01-13 02:34:02 +0000
commit57083d257635c2a102f72141bd41c7b8e3d0f31c (patch)
treee37f9584fcfbe5d4a2d1f73d9fc3f5c58a47dc1b /sys/sys/mount.h
parentac4ec14188d6812590b45f341f1f5a93def44517 (diff)
downloadsrc-57083d257635c2a102f72141bd41c7b8e3d0f31c.tar.gz
src-57083d257635c2a102f72141bd41c7b8e3d0f31c.zip
vfs: add per-mount vnode lazy list and use it for deferred inactive + msync
This obviates the need to scan the entire active list looking for vnodes of interest. msync is handled by adding all vnodes with write count to the lazy list. deferred inactive directly adds vnodes as it sets the VI_DEFINACT flag. Vnodes get dequeued from the list when their hold count reaches 0. Newly added MNT_VNODE_FOREACH_LAZY* macros support filtering so that spurious locking is avoided in the common case. Reviewed by: jeff Tested by: pho (in a larger patch, previous version) Differential Revision: https://reviews.freebsd.org/D22995
Notes
Notes: svn path=/head/; revision=356670
Diffstat (limited to 'sys/sys/mount.h')
-rw-r--r--sys/sys/mount.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index 7ec681dbac00..fdee238d1a3e 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -223,6 +223,8 @@ struct mount {
int mnt_activevnodelistsize;/* (l) # of active vnodes */
struct vnodelst mnt_tmpfreevnodelist; /* (l) list of free vnodes */
int mnt_tmpfreevnodelistsize;/* (l) # of free vnodes */
+ struct vnodelst mnt_lazyvnodelist; /* (l) list of lazy vnodes */
+ int mnt_lazyvnodelistsize; /* (l) # of lazy vnodes */
struct lock mnt_explock; /* vfs_export walkers lock */
TAILQ_ENTRY(mount) mnt_upper_link; /* (m) we in the all uppers */
TAILQ_HEAD(, mount) mnt_uppers; /* (m) upper mounts over us*/
@@ -267,6 +269,24 @@ void __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *);
#define MNT_VNODE_FOREACH_ACTIVE_ABORT(mp, mvp) \
__mnt_vnode_markerfree_active(&(mvp), (mp))
+/*
+ * Definitions for MNT_VNODE_FOREACH_LAZY.
+ */
+typedef int mnt_lazy_cb_t(struct vnode *, void *);
+struct vnode *__mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp,
+ mnt_lazy_cb_t *cb, void *cbarg);
+struct vnode *__mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp,
+ mnt_lazy_cb_t *cb, void *cbarg);
+void __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp);
+
+#define MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, cb, cbarg) \
+ for (vp = __mnt_vnode_first_lazy(&(mvp), (mp), (cb), (cbarg)); \
+ (vp) != NULL; \
+ vp = __mnt_vnode_next_lazy(&(mvp), (mp), (cb), (cbarg)))
+
+#define MNT_VNODE_FOREACH_LAZY_ABORT(mp, mvp) \
+ __mnt_vnode_markerfree_lazy(&(mvp), (mp))
+
#define MNT_ILOCK(mp) mtx_lock(&(mp)->mnt_mtx)
#define MNT_ITRYLOCK(mp) mtx_trylock(&(mp)->mnt_mtx)
#define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx)