aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_intr.c93
-rw-r--r--sys/sys/interrupt.h2
2 files changed, 67 insertions, 28 deletions
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 541e33906d6a..2944c89f5825 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -683,6 +683,45 @@ intr_handler_source(void *cookie)
}
/*
+ * If intr_event_handle() is running in the ISR context at the time of the call,
+ * then wait for it to complete.
+ */
+static void
+intr_event_barrier(struct intr_event *ie)
+{
+ int phase;
+
+ mtx_assert(&ie->ie_lock, MA_OWNED);
+ phase = ie->ie_phase;
+
+ /*
+ * Switch phase to direct future interrupts to the other active counter.
+ * Make sure that any preceding stores are visible before the switch.
+ */
+ KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
+ atomic_store_rel_int(&ie->ie_phase, !phase);
+
+ /*
+ * This code cooperates with wait-free iteration of ie_handlers
+ * in intr_event_handle.
+ * Make sure that the removal and the phase update are not reordered
+ * with the active count check.
+ * Note that no combination of acquire and release fences can provide
+ * that guarantee as Store->Load sequences can always be reordered.
+ */
+ atomic_thread_fence_seq_cst();
+
+ /*
+ * Now wait on the inactive phase.
+ * The acquire fence is needed so that that all post-barrier accesses
+ * are after the check.
+ */
+ while (ie->ie_active[phase] > 0)
+ cpu_spinwait();
+ atomic_thread_fence_acq();
+}
+
+/*
* Sleep until an ithread finishes executing an interrupt handler.
*
* XXX Doesn't currently handle interrupt filters or fast interrupt
@@ -752,44 +791,30 @@ intr_event_remove_handler(void *cookie)
}
/*
- * If there is no ithread, then just remove the handler and return.
- * XXX: Note that an INTR_FAST handler might be running on another
- * CPU!
+ * If there is no ithread, then directly remove the handler. Note that
+ * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
+ * care needs to be taken to keep ie_handlers consistent and to free
+ * the removed handler only when ie_handlers is quiescent.
*/
if (ie->ie_thread == NULL) {
CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
+ intr_event_barrier(ie);
+ intr_event_update(ie);
mtx_unlock(&ie->ie_lock);
free(handler, M_ITHREAD);
return (0);
}
/*
- * If the interrupt thread is already running, then just mark this
- * handler as being dead and let the ithread do the actual removal.
- *
- * During a cold boot while cold is set, msleep() does not sleep,
- * so we have to remove the handler here rather than letting the
- * thread do it.
+ * Let the interrupt thread do the job.
+ * The interrupt source is disabled when the interrupt thread is
+ * running, so it does not have to worry about interaction with
+ * intr_event_handle().
*/
- thread_lock(ie->ie_thread->it_thread);
- if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
- handler->ih_flags |= IH_DEAD;
-
- /*
- * Ensure that the thread will process the handler list
- * again and remove this handler if it has already passed
- * it on the list.
- *
- * The release part of the following store ensures
- * that the update of ih_flags is ordered before the
- * it_need setting. See the comment before
- * atomic_cmpset_acq(&ithd->it_need, ...) operation in
- * the ithread_execute_handlers().
- */
- atomic_store_rel_int(&ie->ie_thread->it_need, 1);
- } else
- CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
- thread_unlock(ie->ie_thread->it_thread);
+ KASSERT((handler->ih_flags & IH_DEAD) == 0,
+ ("duplicate handle remove"));
+ handler->ih_flags |= IH_DEAD;
+ intr_event_schedule_thread(ie);
while (handler->ih_flags & IH_DEAD)
msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
intr_event_update(ie);
@@ -1154,6 +1179,7 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
struct trapframe *oldframe;
struct thread *td;
int ret, thread;
+ int phase;
td = curthread;
@@ -1178,6 +1204,15 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
oldframe = td->td_intr_frame;
td->td_intr_frame = frame;
+ phase = ie->ie_phase;
+ atomic_add_int(&ie->ie_active[phase], 1);
+
+ /*
+ * This fence is required to ensure that no later loads are
+ * re-ordered before the ie_active store.
+ */
+ atomic_thread_fence_seq_cst();
+
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
if (ih->ih_filter == NULL) {
thread = 1;
@@ -1215,6 +1250,8 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
thread = 1;
}
}
+ atomic_add_rel_int(&ie->ie_active[phase], -1);
+
td->td_intr_frame = oldframe;
if (thread) {
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index de064bf5d430..d3432c078f51 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -122,6 +122,8 @@ struct intr_event {
struct timeval ie_warntm;
int ie_irq; /* Physical irq number if !SOFT. */
int ie_cpu; /* CPU this event is bound to. */
+ volatile int ie_phase; /* Switched to establish a barrier. */
+ volatile int ie_active[2]; /* Filters in ISR context. */
};
/* Interrupt event flags kept in ie_flags. */