aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/vfs_cache.c16
-rw-r--r--sys/sys/mount.h25
2 files changed, 27 insertions, 14 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 81ad03c9fd84..d457b199c6ce 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -3484,29 +3484,29 @@ cache_fplookup_climb_mount(struct cache_fpl *fpl)
prev_mp = NULL;
for (;;) {
- if (!vfs_op_thread_enter(mp)) {
+ if (!vfs_op_thread_enter_crit(mp)) {
if (prev_mp != NULL)
- vfs_op_thread_exit(prev_mp);
+ vfs_op_thread_exit_crit(prev_mp);
return (cache_fpl_partial(fpl));
}
if (prev_mp != NULL)
- vfs_op_thread_exit(prev_mp);
+ vfs_op_thread_exit_crit(prev_mp);
if (!vn_seqc_consistent(vp, vp_seqc)) {
- vfs_op_thread_exit(mp);
+ vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
if (!cache_fplookup_mp_supported(mp)) {
- vfs_op_thread_exit(mp);
+ vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
vp = atomic_load_ptr(&mp->mnt_rootvnode);
if (vp == NULL || VN_IS_DOOMED(vp)) {
- vfs_op_thread_exit(mp);
+ vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
vp_seqc = vn_seqc_read_any(vp);
if (seqc_in_modify(vp_seqc)) {
- vfs_op_thread_exit(mp);
+ vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
prev_mp = mp;
@@ -3515,7 +3515,7 @@ cache_fplookup_climb_mount(struct cache_fpl *fpl)
break;
}
- vfs_op_thread_exit(prev_mp);
+ vfs_op_thread_exit_crit(prev_mp);
fpl->tvp = vp;
fpl->tvp_seqc = vp_seqc;
return (0);
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index a3bc0518a7ea..f2ce078f2f81 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -1023,23 +1023,36 @@ int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
*zpcpu_get(mp->mnt_thread_in_ops_pcpu) == 1; \
})
-#define vfs_op_thread_enter(mp) ({ \
- bool _retval = true; \
- critical_enter(); \
+#define vfs_op_thread_enter_crit(mp) ({ \
+ bool _retval_crit = true; \
+ MPASS(curthread->td_critnest > 0); \
MPASS(!vfs_op_thread_entered(mp)); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1); \
__compiler_membar(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
- vfs_op_thread_exit(mp); \
- _retval = false; \
+ vfs_op_thread_exit_crit(mp); \
+ _retval_crit = false; \
} \
+ _retval_crit; \
+})
+
+#define vfs_op_thread_enter(mp) ({ \
+ bool _retval; \
+ critical_enter(); \
+ _retval = vfs_op_thread_enter_crit(mp); \
+ if (__predict_false(!_retval)) \
+ critical_exit(); \
_retval; \
})
-#define vfs_op_thread_exit(mp) do { \
+#define vfs_op_thread_exit_crit(mp) do { \
MPASS(vfs_op_thread_entered(mp)); \
__compiler_membar(); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0); \
+} while (0)
+
+#define vfs_op_thread_exit(mp) do { \
+ vfs_op_thread_exit_crit(mp); \
critical_exit(); \
} while (0)