diff options
author | Jeff Roberson <jeff@FreeBSD.org> | 2020-02-06 20:51:46 +0000 |
---|---|---|
committer | Jeff Roberson <jeff@FreeBSD.org> | 2020-02-06 20:51:46 +0000 |
commit | a40068e524d30ce358e949f5cf729366d688aee0 (patch) | |
tree | 4fb45c2d5500b8c2ae4f012bc62fcfc5cf380d87 /sys/kern/subr_smr.c | |
parent | cd0be8b2ed2e882ba3486a0b8be25b61c4d958ee (diff) | |
download | src-a40068e524d30ce358e949f5cf729366d688aee0.tar.gz src-a40068e524d30ce358e949f5cf729366d688aee0.zip |
Fix a race in smr_advance() that could result in unnecessary poll calls.
This was relatively harmless but surprising to see in counters. The
race occurred when rd_seq was read after the goal was updated and we
incorrectly calculated the delta between them.
Reviewed by: rlibby
Differential Revision: https://reviews.freebsd.org/D23464
Notes
Notes:
svn path=/head/; revision=357641
Diffstat (limited to 'sys/kern/subr_smr.c')
-rw-r--r-- | sys/kern/subr_smr.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/sys/kern/subr_smr.c b/sys/kern/subr_smr.c index 7a776289c2c0..bbbc5711c499 100644 --- a/sys/kern/subr_smr.c +++ b/sys/kern/subr_smr.c @@ -160,7 +160,7 @@ static uma_zone_t smr_zone; #define SMR_SEQ_INCR (UINT_MAX / 10000) #define SMR_SEQ_INIT (UINT_MAX - 100000) /* Force extra polls to test the integer overflow detection. */ -#define SMR_SEQ_MAX_DELTA (1000) +#define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32) #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 #endif @@ -188,7 +188,7 @@ smr_seq_t smr_advance(smr_t smr) { smr_shared_t s; - smr_seq_t goal; + smr_seq_t goal, s_rd_seq; /* * It is illegal to enter while in an smr section. @@ -203,12 +203,18 @@ smr_advance(smr_t smr) atomic_thread_fence_rel(); /* + * Load the current read seq before incrementing the goal so + * we are guaranteed it is always < goal. + */ + s = zpcpu_get(smr)->c_shared; + s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); + + /* * Increment the shared write sequence by 2. Since it is * initialized to 1 this means the only valid values are * odd and an observed value of 0 in a particular CPU means * it is not currently in a read section. */ - s = zpcpu_get(smr)->c_shared; goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR; counter_u64_add(advance, 1); @@ -217,7 +223,7 @@ smr_advance(smr_t smr) * far ahead of the read sequence number. This keeps the * wrap detecting arithmetic working in pathological cases. */ - if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA) { + if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) { counter_u64_add(advance_wait, 1); smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); } |