diff options
author | Konstantin Belousov <kib@FreeBSD.org> | 2021-03-05 23:29:08 +0000 |
---|---|---|
committer | Konstantin Belousov <kib@FreeBSD.org> | 2021-04-23 11:14:09 +0000 |
commit | a753730ca0aae7b540ff8fbb24197b4e159b40fa (patch) | |
tree | 1c2a52a7c00529ba36d323a22264bf1d0b13bd85 /sys/kern/kern_event.c | |
parent | 3cb7188a310c36359cf0493a2abf58df5d8bfec6 (diff) |
Stop arming kqueue timers on knote owner suspend or terminate
(cherry picked from commit 2fd1ffefaa4d2cd99a19f866a949cb2cd58ef998)
Diffstat (limited to 'sys/kern/kern_event.c')
-rw-r--r-- | sys/kern/kern_event.c | 60 |
1 files changed, 53 insertions, 7 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index 5e9f1fc35dfe..31b091e20984 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -676,8 +676,10 @@ timer2sbintime(int64_t data, int flags) struct kq_timer_cb_data { struct callout c; + struct proc *p; struct knote *kn; int cpuid; + TAILQ_ENTRY(kq_timer_cb_data) link; sbintime_t next; /* next timer event fires at */ sbintime_t to; /* precalculated timer period, 0 for abs */ }; @@ -689,22 +691,65 @@ kqtimer_sched_callout(struct kq_timer_cb_data *kc) kc->cpuid, C_ABSOLUTE); } +void +kqtimer_proc_continue(struct proc *p) +{ + struct kq_timer_cb_data *kc, *kc1; + struct bintime bt; + sbintime_t now; + + PROC_LOCK_ASSERT(p, MA_OWNED); + + getboottimebin(&bt); + now = bttosbt(bt); + + TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) { + TAILQ_REMOVE(&p->p_kqtim_stop, kc, link); + if (kc->next <= now) + filt_timerexpire(kc->kn); + else + kqtimer_sched_callout(kc); + } +} + static void filt_timerexpire(void *knx) { struct knote *kn; struct kq_timer_cb_data *kc; + struct proc *p; + sbintime_t now; kn = knx; - kn->kn_data++; - KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ - - if ((kn->kn_flags & EV_ONESHOT) != 0) - return; kc = kn->kn_ptr.p_v; - if (kc->to == 0) + + if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) { + kn->kn_data++; + KNOTE_ACTIVATE(kn, 0); return; - kc->next += kc->to; + } + + for (now = sbinuptime(); kc->next <= now; kc->next += kc->to) + kn->kn_data++; + KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ + + /* + * Initial check for stopped kc->p is racy. It is fine to + * miss the set of the stop flags, at worst we would schedule + * one more callout. On the other hand, it is not fine to not + * schedule when we we missed clearing of the flags, we + * recheck them under the lock and observe consistent state. + */ + p = kc->p; + if (P_SHOULDSTOP(p) || P_KILLED(p)) { + PROC_LOCK(p); + if (P_SHOULDSTOP(p) || P_KILLED(p)) { + TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link); + PROC_UNLOCK(p); + return; + } + PROC_UNLOCK(p); + } kqtimer_sched_callout(kc); } @@ -762,6 +807,7 @@ filt_timerattach(struct knote *kn) kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); kc->kn = kn; + kc->p = curproc; kc->cpuid = PCPU_GET(cpuid); callout_init(&kc->c, 1); filt_timerstart(kn, to); |