aboutsummaryrefslogtreecommitdiff
path: root/src/ck_epoch.c
diff options
context:
space:
mode:
authorOlivier Houchard <cognet@FreeBSD.org>2018-05-17 20:57:30 +0000
committerOlivier Houchard <cognet@FreeBSD.org>2018-05-17 20:57:30 +0000
commita0a6ef6565549755a31b821ba4245c060c921e2f (patch)
treee01c12883ef5d8513899b2f2ac7a27c8311e102f /src/ck_epoch.c
parente8d27288c2439ee79c81e4684ea90e8ca1aab845 (diff)
downloadsrc-a0a6ef6565549755a31b821ba4245c060c921e2f.tar.gz
src-a0a6ef6565549755a31b821ba4245c060c921e2f.zip
Import CK as of commit deca119d14bfffd440770eb67cbdbeaf7b57eb7bvendor/ck/20180517
This brings us ck_epoch_deferred, which is used by the new facility epoch(9).
Notes
Notes: svn path=/vendor-sys/ck/dist/; revision=333762 svn path=/vendor-sys/ck/20180517/; revision=333763; tag=vendor/ck/20180517
Diffstat (limited to 'src/ck_epoch.c')
-rw-r--r--src/ck_epoch.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/src/ck_epoch.c b/src/ck_epoch.c
index a3273b474b63..c0d12e5ec316 100644
--- a/src/ck_epoch.c
+++ b/src/ck_epoch.c
@@ -349,7 +349,7 @@ ck_epoch_scan(struct ck_epoch *global,
}
static void
-ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
+ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
{
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
ck_stack_entry_t *head, *next, *cursor;
@@ -362,7 +362,10 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
ck_epoch_entry_container(cursor);
next = CK_STACK_NEXT(cursor);
- entry->function(entry);
+ if (deferred != NULL)
+ ck_stack_push_spnc(deferred, &entry->stack_entry);
+ else
+ entry->function(entry);
i++;
}
@@ -390,7 +393,7 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
unsigned int epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
- ck_epoch_dispatch(record, epoch);
+ ck_epoch_dispatch(record, epoch, NULL);
return;
}
@@ -551,7 +554,7 @@ ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
* is far from ideal too.
*/
bool
-ck_epoch_poll(struct ck_epoch_record *record)
+ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
{
bool active;
unsigned int epoch;
@@ -572,7 +575,7 @@ ck_epoch_poll(struct ck_epoch_record *record)
if (active == false) {
record->epoch = epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
- ck_epoch_dispatch(record, epoch);
+ ck_epoch_dispatch(record, epoch, deferred);
return true;
}
@@ -580,6 +583,13 @@ ck_epoch_poll(struct ck_epoch_record *record)
/* If an active thread exists, rely on epoch observation. */
(void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
- ck_epoch_dispatch(record, epoch + 1);
+ ck_epoch_dispatch(record, epoch + 1, deferred);
return true;
}
+
+bool
+ck_epoch_poll(struct ck_epoch_record *record)
+{
+
+ return ck_epoch_poll_deferred(record, NULL);
+}