aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/mount.h
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-02-12 11:17:45 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-02-12 11:17:45 +0000
commit123c51973174fd8250f835975b0bb56bdd93e657 (patch)
treec3da070daaa29a308ddec4b3ff1afaab652a0f3c /sys/sys/mount.h
parent00ac9d2632391c51891d45d97c7cf0a761c1ba84 (diff)
downloadsrc-123c51973174fd8250f835975b0bb56bdd93e657.tar.gz
src-123c51973174fd8250f835975b0bb56bdd93e657.zip
vfs: switch to smp_rendezvous_cpus_retry for vfs_op_thread_enter/exit
In particular on amd64 this eliminates an atomic op in the common case, trading it for IPIs in the uncommon case of catching CPUs executing the code while the filesystem is getting suspended or unmounted.
Notes
Notes: svn path=/head/; revision=357810
Diffstat (limited to 'sys/sys/mount.h')
-rw-r--r--sys/sys/mount.h13
1 files changed, 4 insertions, 9 deletions
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index f5e31fb18d84..394c2f2f18cd 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -983,13 +983,8 @@ enum mount_counter { MNT_COUNT_REF, MNT_COUNT_LOCKREF, MNT_COUNT_WRITEOPCOUNT };
int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
/*
- * We mark ourselves as entering the section and post a sequentially consistent
- * fence, meaning the store is completed before we get into the section and
- * mnt_vfs_ops is only read afterwards.
- *
- * Any thread transitioning the ops counter 0->1 does things in the opposite
- * order - first bumps the count, posts a sequentially consistent fence and
- * observes all CPUs not executing within the section.
+ * Code transitioning mnt_vfs_ops to > 0 issues IPIs until it observes
+ * all CPUs not executing code enclosed by mnt_thread_in_ops_pcpu.
*
* This provides an invariant that by the time the last CPU is observed not
* executing, everyone else entering will see the counter > 0 and exit.
@@ -1009,7 +1004,7 @@ int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
critical_enter(); \
MPASS(!vfs_op_thread_entered(mp)); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1); \
- atomic_thread_fence_seq_cst(); \
+ __compiler_membar(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
vfs_op_thread_exit(mp); \
_retval = false; \
@@ -1019,7 +1014,7 @@ int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
#define vfs_op_thread_exit(mp) do { \
MPASS(vfs_op_thread_entered(mp)); \
- atomic_thread_fence_rel(); \
+ __compiler_membar(); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0); \
critical_exit(); \
} while (0)