aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2021-02-26 23:54:17 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2021-03-06 02:06:55 +0000
commitc10e0a4656bfe650bb3bf46fcf026a8a51a34a69 (patch)
treee85f53d07f90f01365016a5d83fdcf2aec1206c0
parenta552da13fc8f803ffc51755436441f3c14e3c817 (diff)
downloadsrc-c10e0a4656bfe650bb3bf46fcf026a8a51a34a69.tar.gz
src-c10e0a4656bfe650bb3bf46fcf026a8a51a34a69.zip
Use atomic_interrupt_fence() instead of bare __compiler_membar()
(cherry picked from commit b5449c92b489445635c7962875ce73b2c9211bba)
-rw-r--r--sys/kern/kern_rmlock.c32
-rw-r--r--sys/sys/kpilite.h5
-rw-r--r--sys/sys/mount.h4
-rw-r--r--sys/sys/sched.h4
-rw-r--r--sys/sys/systm.h6
5 files changed, 24 insertions, 27 deletions
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 9135709d88cf..f661e209b633 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -366,7 +366,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* Check to see if the IPI granted us the lock after all. The load of
* rmp_flags must happen after the tracker is removed from the list.
*/
- __compiler_membar();
+ atomic_interrupt_fence();
if (tracker->rmp_flags) {
/* Just add back tracker - we hold the lock. */
rm_tracker_add(pc, tracker);
@@ -448,7 +448,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
td->td_critnest++; /* critical_enter(); */
- __compiler_membar();
+ atomic_interrupt_fence();
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
@@ -456,7 +456,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
sched_pin();
- __compiler_membar();
+ atomic_interrupt_fence();
td->td_critnest--;
@@ -873,17 +873,15 @@ db_show_rm(const struct lock_object *lock)
* Concurrent writers take turns taking the lock while going off cpu. If this is
* of concern for your usecase, this is not the right primitive.
*
- * Neither rms_rlock nor rms_runlock use fences. Instead compiler barriers are
- * inserted to prevert reordering of generated code. Execution ordering is
- * provided with the use of an IPI handler.
+ * Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
+ * fences are inserted to ensure ordering with the code executed in the IPI
+ * handler.
*
* No attempt is made to track which CPUs read locked at least once,
* consequently write locking sends IPIs to all of them. This will become a
* problem at some point. The easiest way to lessen it is to provide a bitmap.
*/
-#define rms_int_membar() __compiler_membar()
-
#define RMS_NOOWNER ((void *)0x1)
#define RMS_TRANSIENT ((void *)0x2)
#define RMS_FLAGMASK 0xf
@@ -1030,14 +1028,14 @@ rms_rlock(struct rmslock *rms)
critical_enter();
pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) {
rms_rlock_fallback(rms);
return;
}
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_readers_inc(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu);
critical_exit();
}
@@ -1052,15 +1050,15 @@ rms_try_rlock(struct rmslock *rms)
critical_enter();
pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) {
rms_int_influx_exit(rms, pcpu);
critical_exit();
return (0);
}
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_readers_inc(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu);
critical_exit();
return (1);
@@ -1092,14 +1090,14 @@ rms_runlock(struct rmslock *rms)
critical_enter();
pcpu = rms_int_pcpu(rms);
rms_int_influx_enter(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
if (__predict_false(rms->writers > 0)) {
rms_runlock_fallback(rms);
return;
}
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_readers_dec(rms, pcpu);
- rms_int_membar();
+ atomic_interrupt_fence();
rms_int_influx_exit(rms, pcpu);
critical_exit();
}
diff --git a/sys/sys/kpilite.h b/sys/sys/kpilite.h
index 8742ef5cfbe8..2a7e9743f799 100644
--- a/sys/sys/kpilite.h
+++ b/sys/sys/kpilite.h
@@ -38,7 +38,7 @@ sched_pin_lite(struct thread_lite *td)
KASSERT((struct thread *)td == curthread, ("sched_pin called on non curthread"));
td->td_pinned++;
- __compiler_membar();
+ atomic_interrupt_fence();
}
static __inline void
@@ -47,9 +47,8 @@ sched_unpin_lite(struct thread_lite *td)
KASSERT((struct thread *)td == curthread, ("sched_unpin called on non curthread"));
KASSERT(td->td_pinned > 0, ("sched_unpin called on non pinned thread"));
- __compiler_membar();
+ atomic_interrupt_fence();
td->td_pinned--;
- __compiler_membar();
}
#endif
#endif
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index f965dd72d7ba..98d50161bed5 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -1086,7 +1086,7 @@ void resume_all_fs(void);
_mpcpu = vfs_mount_pcpu(mp); \
MPASS(mpcpu->mntp_thread_in_ops == 0); \
_mpcpu->mntp_thread_in_ops = 1; \
- __compiler_membar(); \
+ atomic_interrupt_fence(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
vfs_op_thread_exit_crit(mp, _mpcpu); \
_retval_crit = false; \
@@ -1106,7 +1106,7 @@ void resume_all_fs(void);
#define vfs_op_thread_exit_crit(mp, _mpcpu) do { \
MPASS(_mpcpu == vfs_mount_pcpu(mp)); \
MPASS(_mpcpu->mntp_thread_in_ops == 1); \
- __compiler_membar(); \
+ atomic_interrupt_fence(); \
_mpcpu->mntp_thread_in_ops = 0; \
} while (0)
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index d734ec61266e..64651ffa9c90 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -173,13 +173,13 @@ static __inline void
sched_pin(void)
{
curthread->td_pinned++;
- __compiler_membar();
+ atomic_interrupt_fence();
}
static __inline void
sched_unpin(void)
{
- __compiler_membar();
+ atomic_interrupt_fence();
curthread->td_pinned--;
}
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 5de12e5bc1e5..72a10c401af9 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -284,7 +284,7 @@ critical_enter(void)
td = (struct thread_lite *)curthread;
td->td_critnest++;
- __compiler_membar();
+ atomic_interrupt_fence();
}
static __inline void
@@ -295,9 +295,9 @@ critical_exit(void)
td = (struct thread_lite *)curthread;
KASSERT(td->td_critnest != 0,
("critical_exit: td_critnest == 0"));
- __compiler_membar();
+ atomic_interrupt_fence();
td->td_critnest--;
- __compiler_membar();
+ atomic_interrupt_fence();
if (__predict_false(td->td_owepreempt))
critical_exit_preempt();