aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/subr_epoch.c
diff options
context:
space:
mode:
authorMatt Macy <mmacy@FreeBSD.org>2018-05-17 00:45:35 +0000
committerMatt Macy <mmacy@FreeBSD.org>2018-05-17 00:45:35 +0000
commitb8205686b43cb0928c80284ab6e07ca456b97f6e (patch)
tree0051f8def0497b989ce6a407627832a95a1d518d /sys/kern/subr_epoch.c
parent8ab507588bfbf8a161d62f6d265264bf9ed59d15 (diff)
downloadsrc-b8205686b43cb0928c80284ab6e07ca456b97f6e.tar.gz
src-b8205686b43cb0928c80284ab6e07ca456b97f6e.zip
epoch(9): Guarantee forward progress on busy sections
Add epoch section to struct thread. We can use this to ennable epoch counter to advance even if a section is perpetually occupied by a thread. Approved by: sbruno
Notes
Notes: svn path=/head/; revision=333695
Diffstat (limited to 'sys/kern/subr_epoch.c')
-rw-r--r--sys/kern/subr_epoch.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/sys/kern/subr_epoch.c b/sys/kern/subr_epoch.c
index bf5e12a2b79f..9c0a5e092abd 100644
--- a/sys/kern/subr_epoch.c
+++ b/sys/kern/subr_epoch.c
@@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
/* arbitrary --- needs benchmarking */
-#define MAX_ADAPTIVE_SPIN 5000
+#define MAX_ADAPTIVE_SPIN 1000
#define EPOCH_EXITING 0x1
#ifdef __amd64__
@@ -63,6 +63,7 @@ static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
#define EPOCH_ALIGN CACHE_LINE_SIZE
#endif
+CTASSERT(sizeof(epoch_section_t) == sizeof(ck_epoch_section_t));
SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
@@ -308,8 +309,12 @@ epoch_enter(epoch_t epoch)
KASSERT(found, ("recursing on a second epoch"));
}
#endif
+ if (td->td_epochnest > 1) {
+ critical_exit();
+ return;
+ }
sched_pin();
- ck_epoch_begin(&eps->eps_record.er_record, NULL);
+ ck_epoch_begin(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
critical_exit();
}
@@ -324,11 +329,15 @@ epoch_exit(epoch_t epoch)
MPASS(td->td_epochnest);
critical_enter();
eps = epoch->e_pcpu[curcpu];
- sched_unpin();
- ck_epoch_end(&eps->eps_record.er_record, NULL);
td->td_epochnest--;
if (td->td_epochnest == 0)
TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
+ else {
+ critical_exit();
+ return;
+ }
+ sched_unpin();
+ ck_epoch_end(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
eps->eps_record.er_gen++;
critical_exit();
}