aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/subr_smr.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2020-02-04 02:44:52 +0000
committerJeff Roberson <jeff@FreeBSD.org>2020-02-04 02:44:52 +0000
commitbc6509845d454643cd65f0d640ed4412ecfcee2a (patch)
tree1f59f46d8ad9b8f38bb6936349dcec8496a383cb /sys/kern/subr_smr.c
parentc8ea36e881bbd56cfd2aadb8fd1f4da0dfcd19ed (diff)
downloadsrc-bc6509845d454643cd65f0d640ed4412ecfcee2a.tar.gz
src-bc6509845d454643cd65f0d640ed4412ecfcee2a.zip
Implement a deferred write advancement feature that can be used to further
amortize shared cacheline writes. Discussed with: rlibby Differential Revision: https://reviews.freebsd.org/D23462
Notes
Notes: svn path=/head/; revision=357487
Diffstat (limited to 'sys/kern/subr_smr.c')
-rw-r--r--sys/kern/subr_smr.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/sys/kern/subr_smr.c b/sys/kern/subr_smr.c
index c344fdcb7f5f..f475b8516363 100644
--- a/sys/kern/subr_smr.c
+++ b/sys/kern/subr_smr.c
@@ -209,6 +209,26 @@ smr_advance(smr_t smr)
return (goal);
}
+smr_seq_t
+smr_advance_deferred(smr_t smr, int limit)
+{
+ smr_seq_t goal;
+ smr_t csmr;
+
+ critical_enter();
+ csmr = zpcpu_get(smr);
+ if (++csmr->c_deferred >= limit) {
+ goal = SMR_SEQ_INVALID;
+ csmr->c_deferred = 0;
+ } else
+ goal = smr_shared_current(csmr->c_shared) + SMR_SEQ_INCR;
+ critical_exit();
+ if (goal != SMR_SEQ_INVALID)
+ return (goal);
+
+ return (smr_advance(smr));
+}
+
/*
* Poll to determine whether all readers have observed the 'goal' write
* sequence number.
@@ -257,6 +277,17 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
/*
+ * This may have come from a deferred advance. Consider one
+ * increment past the current wr_seq valid and make sure we
+ * have advanced far enough to succeed. We simply add to avoid
+ * an additional fence.
+ */
+ if (goal == s_wr_seq + SMR_SEQ_INCR) {
+ atomic_add_int(&s->s_wr_seq, SMR_SEQ_INCR);
+ s_wr_seq = goal;
+ }
+
+ /*
* Detect whether the goal is valid and has already been observed.
*
* The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for