aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2021-05-14 14:07:56 +0000
committerMark Johnston <markj@FreeBSD.org>2021-05-14 14:08:14 +0000
commit2cca77ee01343bf080f1b70f0217a84c200fe7c1 (patch)
treeb0eb5a177b94e453776f623a9ada55e2edc7dc34
parent34766aa8cb514472c571f8b0e90e833833acef51 (diff)
downloadsrc-2cca77ee01343bf080f1b70f0217a84c200fe7c1.tar.gz
src-2cca77ee01343bf080f1b70f0217a84c200fe7c1.zip
kqueue timer: Remove detached knotes from the process stop queue
There are some scenarios where a timer event may be detached when it is on the process' kqueue timer stop queue. If kqtimer_proc_continue() is called after that point, it will iterate over the queue and access freed timer structures. It is also possible, at least in a multithreaded program, for a stopped timer event to be scheduled without removing it from the process' stop queue. Ensure that we do not doubly enqueue the event structure in this case. Reported by: syzbot+cea0931bb4e34cd728bd@syzkaller.appspotmail.com Reported by: syzbot+9e1a2f3734652015998c@syzkaller.appspotmail.com Reviewed by: kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D30251
-rw-r--r--sys/kern/kern_event.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 1067e7f128b7..e7047e9a7ad9 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -680,11 +680,14 @@ struct kq_timer_cb_data {
struct proc *p;
struct knote *kn;
int cpuid;
+ int flags;
TAILQ_ENTRY(kq_timer_cb_data) link;
sbintime_t next; /* next timer event fires at */
sbintime_t to; /* precalculated timer period, 0 for abs */
};
+#define KQ_TIMER_CB_ENQUEUED 0x01
+
static void
kqtimer_sched_callout(struct kq_timer_cb_data *kc)
{
@@ -706,6 +709,7 @@ kqtimer_proc_continue(struct proc *p)
TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
+ kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
if (kc->next <= now)
filt_timerexpire_l(kc->kn, true);
else
@@ -753,7 +757,10 @@ filt_timerexpire_l(struct knote *kn, bool proc_locked)
if (!proc_locked)
PROC_LOCK(p);
if (P_SHOULDSTOP(p) || P_KILLED(p)) {
- TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
+ if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) {
+ kc->flags |= KQ_TIMER_CB_ENQUEUED;
+ TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
+ }
if (!proc_locked)
PROC_UNLOCK(p);
return;
@@ -826,6 +833,7 @@ filt_timerattach(struct knote *kn)
kc->kn = kn;
kc->p = curproc;
kc->cpuid = PCPU_GET(cpuid);
+ kc->flags = 0;
callout_init(&kc->c, 1);
filt_timerstart(kn, to);
@@ -856,6 +864,11 @@ filt_timerdetach(struct knote *kn)
kc = kn->kn_ptr.p_v;
callout_drain(&kc->c);
+ if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) {
+ PROC_LOCK(kc->p);
+ TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link);
+ PROC_UNLOCK(kc->p);
+ }
free(kc, M_KQUEUE);
old = atomic_fetchadd_int(&kq_ncallouts, -1);
KASSERT(old > 0, ("Number of callouts cannot become negative"));