aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2010-09-13 07:25:35 +0000
committerAlexander Motin <mav@FreeBSD.org>2010-09-13 07:25:35 +0000
commita157e42516dcee534177e5e0dc59815c3334d647 (patch)
treeb0666da99693a46e1663a3a882abfdee5e324830 /sys/kern
parent3c7f49dcf28dcb78fd219f420504e50518ccea48 (diff)
downloadsrc-a157e42516dcee534177e5e0dc59815c3334d647.tar.gz
src-a157e42516dcee534177e5e0dc59815c3334d647.zip
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is some work to do. When CPU is busy interrupts are generating at full rate of hz + stathz to fullfill scheduler and timekeeping requirements. But when CPU is idle, only minimum set of interrupts (down to 8 interrupts per second per CPU now), needed to handle scheduled callouts is executed. This allows significantly increase idle CPU sleep time, increasing effect of static power-saving technologies. Also it should reduce host CPU load on virtualized systems, when guest system is idle. There is set of tunables, also available as writable sysctls, allowing to control wanted event timer subsystem behavior: kern.eventtimer.timer - allows to choose event timer hardware to use. On x86 there is up to 4 different kinds of timers. Depending on whether chosen timer is per-CPU, behavior of other options slightly differs. kern.eventtimer.periodic - allows to choose periodic and one-shot operation mode. In periodic mode, current timer hardware taken as the only source of time for time events. This mode is quite alike to previous kernel behavior. One-shot mode instead uses currently selected time counter hardware to schedule all needed events one by one and program timer to generate interrupt exactly in specified time. Default value depends of chosen timer capabilities, but one-shot mode is preferred, until other is forced by user or hardware. kern.eventtimer.singlemul - in periodic mode specifies how much times higher timer frequency should be, to not strictly alias hardclock() and statclock() events. Default values are 2 and 4, but could be reduced to 1 if extra interrupts are unwanted. kern.eventtimer.idletick - makes each CPU to receive every timer interrupt independently of whether they busy or not. By default this options is disabled. If chosen timer is per-CPU and runs in periodic mode, this option has no effect - all interrupts are generating. As soon as this patch modifies cpu_idle() on some platforms, I have also refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions (if supported) under high sleep/wakeup rate, as fast alternative to other methods. It allows SMP scheduler to wake up sleeping CPUs much faster without using IPI, significantly increasing performance on some highly task-switching loads. Tested by: many (on i386, amd64, sparc64 and powerc) H/W donated by: Gheorghe Ardelean Sponsored by: iXsystems, Inc.
Notes
Notes: svn path=/head/; revision=212541
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c140
-rw-r--r--sys/kern/kern_clocksource.c863
-rw-r--r--sys/kern/kern_et.c2
-rw-r--r--sys/kern/kern_tc.c18
-rw-r--r--sys/kern/kern_timeout.c39
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/sched_ule.c8
7 files changed, 744 insertions, 328 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index c283b6b1e6a7..ff5747e104c7 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -373,11 +373,9 @@ int profprocs;
int ticks;
int psratio;
-int timer1hz;
-int timer2hz;
-static DPCPU_DEFINE(u_int, hard_cnt);
-static DPCPU_DEFINE(u_int, stat_cnt);
-static DPCPU_DEFINE(u_int, prof_cnt);
+static DPCPU_DEFINE(int, pcputicks); /* Per-CPU version of ticks. */
+static struct mtx global_hardclock_mtx;
+MTX_SYSINIT(global_hardclock_mtx, &global_hardclock_mtx, "ghc_mtx", MTX_SPIN);
/*
* Initialize clock frequencies and start both clocks running.
@@ -408,52 +406,6 @@ initclocks(dummy)
#endif
}
-void
-timer1clock(int usermode, uintfptr_t pc)
-{
- u_int *cnt;
-
- cnt = DPCPU_PTR(hard_cnt);
- *cnt += hz;
- if (*cnt >= timer1hz) {
- *cnt -= timer1hz;
- if (*cnt >= timer1hz)
- *cnt = 0;
- if (PCPU_GET(cpuid) == 0)
- hardclock(usermode, pc);
- else
- hardclock_cpu(usermode);
- }
- if (timer2hz == 0)
- timer2clock(usermode, pc);
-}
-
-void
-timer2clock(int usermode, uintfptr_t pc)
-{
- u_int *cnt;
- int t2hz = timer2hz ? timer2hz : timer1hz;
-
- cnt = DPCPU_PTR(stat_cnt);
- *cnt += stathz;
- if (*cnt >= t2hz) {
- *cnt -= t2hz;
- if (*cnt >= t2hz)
- *cnt = 0;
- statclock(usermode);
- }
- if (profprocs == 0)
- return;
- cnt = DPCPU_PTR(prof_cnt);
- *cnt += profhz;
- if (*cnt >= t2hz) {
- *cnt -= t2hz;
- if (*cnt >= t2hz)
- *cnt = 0;
- profclock(usermode, pc);
- }
-}
-
/*
* Each time the real-time timer fires, this function is called on all CPUs.
* Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
@@ -486,7 +438,7 @@ hardclock_cpu(int usermode)
PROC_SUNLOCK(p);
}
thread_lock(td);
- sched_tick();
+ sched_tick(1);
td->td_flags |= flags;
thread_unlock(td);
@@ -507,6 +459,7 @@ hardclock(int usermode, uintfptr_t pc)
atomic_add_int((volatile int *)&ticks, 1);
hardclock_cpu(usermode);
tc_ticktock();
+ cpu_tick_calibration();
/*
* If no separate statistics clock is available, run it from here.
*
@@ -525,6 +478,89 @@ hardclock(int usermode, uintfptr_t pc)
#endif /* SW_WATCHDOG */
}
+void
+hardclock_anycpu(int cnt, int usermode)
+{
+ struct pstats *pstats;
+ struct thread *td = curthread;
+ struct proc *p = td->td_proc;
+ int *t = DPCPU_PTR(pcputicks);
+ int flags;
+ int global, newticks;
+
+ /*
+ * Update per-CPU and possibly global ticks values.
+ */
+ *t += cnt;
+ do {
+ global = ticks;
+ newticks = *t - global;
+ if (newticks <= 0) {
+ if (newticks < -1)
+ *t = global - 1;
+ newticks = 0;
+ break;
+ }
+ } while (!atomic_cmpset_int(&ticks, global, *t));
+
+ /*
+ * Run current process's virtual and profile time, as needed.
+ */
+ pstats = p->p_stats;
+ flags = 0;
+ if (usermode &&
+ timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
+ PROC_SLOCK(p);
+ if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
+ tick * cnt) == 0)
+ flags |= TDF_ALRMPEND | TDF_ASTPENDING;
+ PROC_SUNLOCK(p);
+ }
+ if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
+ PROC_SLOCK(p);
+ if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
+ tick * cnt) == 0)
+ flags |= TDF_PROFPEND | TDF_ASTPENDING;
+ PROC_SUNLOCK(p);
+ }
+ thread_lock(td);
+ sched_tick(cnt);
+ td->td_flags |= flags;
+ thread_unlock(td);
+
+#ifdef HWPMC_HOOKS
+ if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
+ PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
+#endif
+ callout_tick();
+ /* We are in charge to handle this tick duty. */
+ if (newticks > 0) {
+ mtx_lock_spin(&global_hardclock_mtx);
+ tc_ticktock();
+#ifdef DEVICE_POLLING
+ hardclock_device_poll(); /* This is very short and quick. */
+#endif /* DEVICE_POLLING */
+#ifdef SW_WATCHDOG
+ if (watchdog_enabled > 0) {
+ watchdog_ticks -= newticks;
+ if (watchdog_ticks <= 0)
+ watchdog_fire();
+ }
+#endif /* SW_WATCHDOG */
+ mtx_unlock_spin(&global_hardclock_mtx);
+ }
+ if (curcpu == CPU_FIRST())
+ cpu_tick_calibration();
+}
+
+void
+hardclock_sync(int cpu)
+{
+ int *t = DPCPU_ID_PTR(cpu, pcputicks);
+
+ *t = ticks;
+}
+
/*
* Compute number of ticks in the specified amount of time.
*/
diff --git a/sys/kern/kern_clocksource.c b/sys/kern/kern_clocksource.c
index 6b005de8c5e9..29304a496cb8 100644
--- a/sys/kern/kern_clocksource.c
+++ b/sys/kern/kern_clocksource.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/kdb.h>
+#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/kernel.h>
@@ -59,28 +60,79 @@ __FBSDID("$FreeBSD$");
cyclic_clock_func_t cyclic_clock_func[MAXCPU];
#endif
-static void cpu_restartclocks(void);
-static void timercheck(void);
-inline static int doconfigtimer(int i);
-static void configtimer(int i);
+int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
-static struct eventtimer *timer[2] = { NULL, NULL };
-static int timertest = 0;
-static int timerticks[2] = { 0, 0 };
-static int profiling_on = 0;
-static struct bintime timerperiod[2];
+static void setuptimer(void);
+static void loadtimer(struct bintime *now, int first);
+static int doconfigtimer(void);
+static void configtimer(int start);
+static int round_freq(struct eventtimer *et, int freq);
-static char timername[2][32];
-TUNABLE_STR("kern.eventtimer.timer1", timername[0], sizeof(*timername));
-TUNABLE_STR("kern.eventtimer.timer2", timername[1], sizeof(*timername));
+static void getnextcpuevent(struct bintime *event, int idle);
+static void getnextevent(struct bintime *event);
+static int handleevents(struct bintime *now, int fake);
+#ifdef SMP
+static void cpu_new_callout(int cpu, int ticks);
+#endif
+
+static struct mtx et_hw_mtx;
+
+#define ET_HW_LOCK(state) \
+ { \
+ if (timer->et_flags & ET_FLAGS_PERCPU) \
+ mtx_lock_spin(&(state)->et_hw_mtx); \
+ else \
+ mtx_lock_spin(&et_hw_mtx); \
+ }
+
+#define ET_HW_UNLOCK(state) \
+ { \
+ if (timer->et_flags & ET_FLAGS_PERCPU) \
+ mtx_unlock_spin(&(state)->et_hw_mtx); \
+ else \
+ mtx_unlock_spin(&et_hw_mtx); \
+ }
+
+static struct eventtimer *timer = NULL;
+static struct bintime timerperiod; /* Timer period for periodic mode. */
+static struct bintime hardperiod; /* hardclock() events period. */
+static struct bintime statperiod; /* statclock() events period. */
+static struct bintime profperiod; /* profclock() events period. */
+static struct bintime nexttick; /* Next global timer tick time. */
+static u_int busy = 0; /* Reconfiguration is in progress. */
+static int profiling = 0; /* Profiling events enabled. */
+
+static char timername[32]; /* Wanted timer. */
+TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
-static u_int singlemul = 0;
+static u_int singlemul = 0; /* Multiplier for periodic mode. */
TUNABLE_INT("kern.eventtimer.singlemul", &singlemul);
SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul,
- 0, "Multiplier, used in single timer mode");
-
-typedef u_int tc[2];
-static DPCPU_DEFINE(tc, configtimer);
+ 0, "Multiplier for periodic mode");
+
+static u_int idletick = 0; /* Idle mode allowed. */
+TUNABLE_INT("kern.eventtimer.idletick", &idletick);
+SYSCTL_INT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick,
+ 0, "Run periodic events when idle");
+
+static int periodic = 0; /* Periodic or one-shot mode. */
+TUNABLE_INT("kern.eventtimer.periodic", &periodic);
+
+struct pcpu_state {
+ struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
+ u_int action; /* Reconfiguration requests. */
+ u_int handle; /* Immediate handle resuests. */
+ struct bintime now; /* Last tick time. */
+ struct bintime nextevent; /* Next scheduled event on this CPU. */
+ struct bintime nexttick; /* Next timer tick time. */
+ struct bintime nexthard; /* Next hardlock() event. */
+ struct bintime nextstat; /* Next statclock() event. */
+ struct bintime nextprof; /* Next profclock() event. */
+ int ipi; /* This CPU needs IPI. */
+ int idle; /* This CPU is in idle mode. */
+};
+
+static DPCPU_DEFINE(struct pcpu_state, timerstate);
#define FREQ2BT(freq, bt) \
{ \
@@ -91,159 +143,325 @@ static DPCPU_DEFINE(tc, configtimer);
(((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \
((bt)->frac >> 1))
-/* Per-CPU timer1 handler. */
-static int
-hardclockhandler(struct trapframe *frame)
+/*
+ * Timer broadcast IPI handler.
+ */
+int
+hardclockintr(void)
{
+ struct bintime now;
+ struct pcpu_state *state;
+ int done;
+
+ if (doconfigtimer() || busy)
+ return (FILTER_HANDLED);
+ state = DPCPU_PTR(timerstate);
+ now = state->now;
+ CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x",
+ curcpu, now.sec, (unsigned int)(now.frac >> 32),
+ (unsigned int)(now.frac & 0xffffffff));
+ done = handleevents(&now, 0);
+ return (done ? FILTER_HANDLED : FILTER_STRAY);
+}
+/*
+ * Handle all events for specified time on this CPU
+ */
+static int
+handleevents(struct bintime *now, int fake)
+{
+ struct bintime t;
+ struct trapframe *frame;
+ struct pcpu_state *state;
+ uintfptr_t pc;
+ int usermode;
+ int done, runs;
+
+ CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x",
+ curcpu, now->sec, (unsigned int)(now->frac >> 32),
+ (unsigned int)(now->frac & 0xffffffff));
+ done = 0;
+ if (fake) {
+ frame = NULL;
+ usermode = 0;
+ pc = 0;
+ } else {
+ frame = curthread->td_intr_frame;
+ usermode = TRAPF_USERMODE(frame);
+ pc = TRAPF_PC(frame);
+ }
#ifdef KDTRACE_HOOKS
/*
* If the DTrace hooks are configured and a callback function
* has been registered, then call it to process the high speed
* timers.
*/
- int cpu = curcpu;
- if (cyclic_clock_func[cpu] != NULL)
- (*cyclic_clock_func[cpu])(frame);
+ if (!fake && cyclic_clock_func[curcpu] != NULL)
+ (*cyclic_clock_func[curcpu])(frame);
#endif
-
- timer1clock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
- return (FILTER_HANDLED);
-}
-
-/* Per-CPU timer2 handler. */
-static int
-statclockhandler(struct trapframe *frame)
-{
-
- timer2clock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
- return (FILTER_HANDLED);
-}
-
-/* timer1 broadcast IPI handler. */
-int
-hardclockintr(struct trapframe *frame)
-{
-
- if (doconfigtimer(0))
- return (FILTER_HANDLED);
- return (hardclockhandler(frame));
+ runs = 0;
+ state = DPCPU_PTR(timerstate);
+ while (bintime_cmp(now, &state->nexthard, >=)) {
+ bintime_add(&state->nexthard, &hardperiod);
+ runs++;
+ }
+ if (runs) {
+ hardclock_anycpu(runs, usermode);
+ done = 1;
+ }
+ while (bintime_cmp(now, &state->nextstat, >=)) {
+ statclock(usermode);
+ bintime_add(&state->nextstat, &statperiod);
+ done = 1;
+ }
+ if (profiling) {
+ while (bintime_cmp(now, &state->nextprof, >=)) {
+ if (!fake)
+ profclock(usermode, pc);
+ bintime_add(&state->nextprof, &profperiod);
+ done = 1;
+ }
+ } else
+ state->nextprof = state->nextstat;
+ getnextcpuevent(&t, 0);
+ ET_HW_LOCK(state);
+ if (!busy) {
+ state->idle = 0;
+ state->nextevent = t;
+ loadtimer(now, 0);
+ }
+ ET_HW_UNLOCK(state);
+ return (done);
}
-/* timer2 broadcast IPI handler. */
-int
-statclockintr(struct trapframe *frame)
+/*
+ * Schedule binuptime of the next event on current CPU.
+ */
+static void
+getnextcpuevent(struct bintime *event, int idle)
{
-
- if (doconfigtimer(1))
- return (FILTER_HANDLED);
- return (statclockhandler(frame));
+ struct bintime tmp;
+ struct pcpu_state *state;
+ int skip;
+
+ state = DPCPU_PTR(timerstate);
+ *event = state->nexthard;
+ if (idle) { /* If CPU is idle - ask callouts for how long. */
+ skip = callout_tickstofirst() - 1;
+ CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip);
+ tmp = hardperiod;
+ bintime_mul(&tmp, skip);
+ bintime_add(event, &tmp);
+ } else { /* If CPU is active - handle all types of events. */
+ if (bintime_cmp(event, &state->nextstat, >))
+ *event = state->nextstat;
+ if (profiling &&
+ bintime_cmp(event, &state->nextprof, >))
+ *event = state->nextprof;
+ }
}
-/* timer1 callback. */
+/*
+ * Schedule binuptime of the next event on all CPUs.
+ */
static void
-timer1cb(struct eventtimer *et, void *arg)
+getnextevent(struct bintime *event)
{
-
+ struct pcpu_state *state;
#ifdef SMP
- /* Broadcast interrupt to other CPUs for non-per-CPU timers */
- if (smp_started && (et->et_flags & ET_FLAGS_PERCPU) == 0)
- ipi_all_but_self(IPI_HARDCLOCK);
+ int cpu;
#endif
- if (timertest) {
- if ((et->et_flags & ET_FLAGS_PERCPU) == 0 || curcpu == 0) {
- timerticks[0]++;
- if (timerticks[0] >= timer1hz) {
- ET_LOCK();
- timercheck();
- ET_UNLOCK();
+ int c;
+
+ state = DPCPU_PTR(timerstate);
+ *event = state->nextevent;
+ c = curcpu;
+#ifdef SMP
+ if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
+ CPU_FOREACH(cpu) {
+ if (curcpu == cpu)
+ continue;
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ if (bintime_cmp(event, &state->nextevent, >)) {
+ *event = state->nextevent;
+ c = cpu;
}
}
}
- hardclockhandler(curthread->td_intr_frame);
+#endif
+ CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d",
+ curcpu, event->sec, (unsigned int)(event->frac >> 32),
+ (unsigned int)(event->frac & 0xffffffff), c);
}
-/* timer2 callback. */
+/* Hardware timer callback function. */
static void
-timer2cb(struct eventtimer *et, void *arg)
+timercb(struct eventtimer *et, void *arg)
{
+ struct bintime now;
+ struct bintime *next;
+ struct pcpu_state *state;
+#ifdef SMP
+ int cpu, bcast;
+#endif
+
+ /* Do not touch anything if somebody reconfiguring timers. */
+ if (busy)
+ return;
+ /* Update present and next tick times. */
+ state = DPCPU_PTR(timerstate);
+ if (et->et_flags & ET_FLAGS_PERCPU) {
+ next = &state->nexttick;
+ } else
+ next = &nexttick;
+ if (periodic) {
+ now = *next; /* Ex-next tick time becomes present time. */
+ bintime_add(next, &timerperiod); /* Next tick in 1 period. */
+ } else {
+ binuptime(&now); /* Get present time from hardware. */
+ next->sec = -1; /* Next tick is not scheduled yet. */
+ }
+ state->now = now;
+ CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x",
+ curcpu, now.sec, (unsigned int)(now.frac >> 32),
+ (unsigned int)(now.frac & 0xffffffff));
#ifdef SMP
- /* Broadcast interrupt to other CPUs for non-per-CPU timers */
- if (smp_started && (et->et_flags & ET_FLAGS_PERCPU) == 0)
- ipi_all_but_self(IPI_STATCLOCK);
+ /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
+ bcast = 0;
+ if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
+ CPU_FOREACH(cpu) {
+ if (curcpu == cpu)
+ continue;
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ ET_HW_LOCK(state);
+ state->now = now;
+ if (bintime_cmp(&now, &state->nextevent, >=)) {
+ state->nextevent.sec++;
+ state->ipi = 1;
+ bcast = 1;
+ }
+ ET_HW_UNLOCK(state);
+ }
+ }
#endif
- if (timertest) {
- if ((et->et_flags & ET_FLAGS_PERCPU) == 0 || curcpu == 0) {
- timerticks[1]++;
- if (timerticks[1] >= timer2hz * 2) {
- ET_LOCK();
- timercheck();
- ET_UNLOCK();
+
+ /* Handle events for this time on this CPU. */
+ handleevents(&now, 0);
+
+#ifdef SMP
+ /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
+ if (bcast) {
+ CPU_FOREACH(cpu) {
+ if (curcpu == cpu)
+ continue;
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ if (state->ipi) {
+ state->ipi = 0;
+ ipi_cpu(cpu, IPI_HARDCLOCK);
}
}
}
- statclockhandler(curthread->td_intr_frame);
+#endif
}
/*
- * Check that both timers are running with at least 1/4 of configured rate.
- * If not - replace the broken one.
+ * Load new value into hardware timer.
*/
static void
-timercheck(void)
+loadtimer(struct bintime *now, int start)
{
-
- if (!timertest)
- return;
- timertest = 0;
- if (timerticks[0] * 4 < timer1hz) {
- printf("Event timer \"%s\" is dead.\n", timer[0]->et_name);
- timer1hz = 0;
- configtimer(0);
- et_ban(timer[0]);
- et_free(timer[0]);
- timer[0] = et_find(NULL, ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (timer[0] == NULL) {
- timer2hz = 0;
- configtimer(1);
- et_free(timer[1]);
- timer[1] = NULL;
- timer[0] = timer[1];
+ struct pcpu_state *state;
+ struct bintime new;
+ struct bintime *next;
+ uint64_t tmp;
+ int eq;
+
+ if (periodic) {
+ if (start) {
+ /*
+ * Try to start all periodic timers aligned
+ * to period to make events synchronous.
+ */
+ tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28);
+ tmp = (tmp % (timerperiod.frac >> 28)) << 28;
+ tmp = timerperiod.frac - tmp;
+ new = timerperiod;
+ bintime_addx(&new, tmp);
+ CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
+ curcpu, now->sec, (unsigned int)(now->frac >> 32),
+ new.sec, (unsigned int)(new.frac >> 32));
+ et_start(timer, &new, &timerperiod);
+ }
+ } else {
+ if (timer->et_flags & ET_FLAGS_PERCPU) {
+ state = DPCPU_PTR(timerstate);
+ next = &state->nexttick;
+ } else
+ next = &nexttick;
+ getnextevent(&new);
+ eq = bintime_cmp(&new, next, ==);
+ CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d",
+ curcpu, new.sec, (unsigned int)(new.frac >> 32),
+ (unsigned int)(new.frac & 0xffffffff),
+ eq);
+ if (!eq) {
+ *next = new;
+ bintime_sub(&new, now);
+ et_start(timer, &new, NULL);
}
- et_init(timer[0], timer1cb, NULL, NULL);
- cpu_restartclocks();
- return;
- }
- if (timerticks[1] * 4 < timer2hz) {
- printf("Event timer \"%s\" is dead.\n", timer[1]->et_name);
- timer2hz = 0;
- configtimer(1);
- et_ban(timer[1]);
- et_free(timer[1]);
- timer[1] = et_find(NULL, ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (timer[1] != NULL)
- et_init(timer[1], timer2cb, NULL, NULL);
- cpu_restartclocks();
- return;
}
}
/*
- * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
+ * Prepare event timer parameters after configuration changes.
*/
-inline static int
-doconfigtimer(int i)
+static void
+setuptimer(void)
{
- tc *conf;
+ int freq;
+
+ if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
+ periodic = 0;
+ else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
+ periodic = 1;
+ freq = hz * singlemul;
+ while (freq < (profiling ? profhz : stathz))
+ freq += hz;
+ freq = round_freq(timer, freq);
+ FREQ2BT(freq, &timerperiod);
+}
- conf = DPCPU_PTR(configtimer);
- if (atomic_load_acq_int(*conf + i)) {
- if (i == 0 ? timer1hz : timer2hz)
- et_start(timer[i], NULL, &timerperiod[i]);
- else
- et_stop(timer[i]);
- atomic_store_rel_int(*conf + i, 0);
+/*
+ * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
+ */
+static int
+doconfigtimer(void)
+{
+ struct bintime now;
+ struct pcpu_state *state;
+
+ state = DPCPU_PTR(timerstate);
+ switch (atomic_load_acq_int(&state->action)) {
+ case 1:
+ binuptime(&now);
+ ET_HW_LOCK(state);
+ loadtimer(&now, 1);
+ ET_HW_UNLOCK(state);
+ state->handle = 0;
+ atomic_store_rel_int(&state->action, 0);
+ return (1);
+ case 2:
+ ET_HW_LOCK(state);
+ et_stop(timer);
+ ET_HW_UNLOCK(state);
+ state->handle = 0;
+ atomic_store_rel_int(&state->action, 0);
+ return (1);
+ }
+ if (atomic_readandclear_int(&state->handle) && !busy) {
+ binuptime(&now);
+ handleevents(&now, 0);
return (1);
}
return (0);
@@ -254,45 +472,79 @@ doconfigtimer(int i)
* For per-CPU timers use IPI to make other CPUs to reconfigure.
*/
static void
-configtimer(int i)
+configtimer(int start)
{
-#ifdef SMP
- tc *conf;
+ struct bintime now, next;
+ struct pcpu_state *state;
int cpu;
+ if (start) {
+ setuptimer();
+ binuptime(&now);
+ }
critical_enter();
-#endif
- /* Start/stop global timer or per-CPU timer of this CPU. */
- if (i == 0 ? timer1hz : timer2hz)
- et_start(timer[i], NULL, &timerperiod[i]);
- else
- et_stop(timer[i]);
+ ET_HW_LOCK(DPCPU_PTR(timerstate));
+ if (start) {
+ /* Initialize time machine parameters. */
+ next = now;
+ bintime_add(&next, &timerperiod);
+ if (periodic)
+ nexttick = next;
+ else
+ nexttick.sec = -1;
+ CPU_FOREACH(cpu) {
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ state->now = now;
+ state->nextevent = next;
+ if (periodic)
+ state->nexttick = next;
+ else
+ state->nexttick.sec = -1;
+ state->nexthard = next;
+ state->nextstat = next;
+ state->nextprof = next;
+ hardclock_sync(cpu);
+ }
+ busy = 0;
+ /* Start global timer or per-CPU timer of this CPU. */
+ loadtimer(&now, 1);
+ } else {
+ busy = 1;
+ /* Stop global timer or per-CPU timer of this CPU. */
+ et_stop(timer);
+ }
+ ET_HW_UNLOCK(DPCPU_PTR(timerstate));
#ifdef SMP
- if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
+ /* If timer is global or there is no other CPUs yet - we are done. */
+ if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
critical_exit();
return;
}
/* Set reconfigure flags for other CPUs. */
CPU_FOREACH(cpu) {
- conf = DPCPU_ID_PTR(cpu, configtimer);
- atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ atomic_store_rel_int(&state->action,
+ (cpu == curcpu) ? 0 : ( start ? 1 : 2));
}
- /* Send reconfigure IPI. */
- ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
+ /* Broadcast reconfigure IPI. */
+ ipi_all_but_self(IPI_HARDCLOCK);
/* Wait for reconfiguration completed. */
restart:
cpu_spinwait();
CPU_FOREACH(cpu) {
if (cpu == curcpu)
continue;
- conf = DPCPU_ID_PTR(cpu, configtimer);
- if (atomic_load_acq_int(*conf + i))
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ if (atomic_load_acq_int(&state->action))
goto restart;
}
- critical_exit();
#endif
+ critical_exit();
}
+/*
+ * Calculate nearest frequency supported by hardware timer.
+ */
static int
round_freq(struct eventtimer *et, int freq)
{
@@ -314,23 +566,49 @@ round_freq(struct eventtimer *et, int freq)
}
/*
- * Configure and start event timers.
+ * Configure and start event timers (BSP part).
*/
void
cpu_initclocks_bsp(void)
{
- int base, div;
+ struct pcpu_state *state;
+ int base, div, cpu;
- timer[0] = et_find(timername[0], ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (timer[0] == NULL)
- timer[0] = et_find(NULL, ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (timer[0] == NULL)
+ mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
+ CPU_FOREACH(cpu) {
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
+ }
+#ifdef SMP
+ callout_new_inserted = cpu_new_callout;
+#endif
+ /* Grab requested timer or the best of present. */
+ if (timername[0])
+ timer = et_find(timername, 0, 0);
+ if (timer == NULL && periodic) {
+ timer = et_find(NULL,
+ ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
+ }
+ if (timer == NULL) {
+ timer = et_find(NULL,
+ ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
+ }
+ if (timer == NULL && !periodic) {
+ timer = et_find(NULL,
+ ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
+ }
+ if (timer == NULL)
panic("No usable event timer found!");
- et_init(timer[0], timer1cb, NULL, NULL);
- timer[1] = et_find(timername[1][0] ? timername[1] : NULL,
- ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (timer[1])
- et_init(timer[1], timer2cb, NULL, NULL);
+ et_init(timer, timercb, NULL, NULL);
+
+ /* Adapt to timer capabilities. */
+ if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
+ periodic = 0;
+ else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
+ periodic = 1;
+ if (timer->et_flags & ET_FLAGS_C3STOP)
+ cpu_disable_deep_sleep++;
+
/*
* We honor the requested 'hz' value.
* We want to run stathz in the neighborhood of 128hz.
@@ -344,8 +622,8 @@ cpu_initclocks_bsp(void)
else
singlemul = 4;
}
- if (timer[1] == NULL) {
- base = round_freq(timer[0], hz * singlemul);
+ if (periodic) {
+ base = round_freq(timer, hz * singlemul);
singlemul = max((base + hz / 2) / hz, 1);
hz = (base + singlemul / 2) / singlemul;
if (base <= 128)
@@ -359,175 +637,236 @@ cpu_initclocks_bsp(void)
profhz = stathz;
while ((profhz + stathz) <= 128 * 64)
profhz += stathz;
- profhz = round_freq(timer[0], profhz);
+ profhz = round_freq(timer, profhz);
} else {
- hz = round_freq(timer[0], hz);
- stathz = round_freq(timer[1], 127);
- profhz = round_freq(timer[1], stathz * 64);
+ hz = round_freq(timer, hz);
+ stathz = round_freq(timer, 127);
+ profhz = round_freq(timer, stathz * 64);
}
tick = 1000000 / hz;
+ FREQ2BT(hz, &hardperiod);
+ FREQ2BT(stathz, &statperiod);
+ FREQ2BT(profhz, &profperiod);
ET_LOCK();
- cpu_restartclocks();
+ configtimer(1);
ET_UNLOCK();
}
-/* Start per-CPU event timers on APs. */
+/*
+ * Start per-CPU event timers on APs.
+ */
void
cpu_initclocks_ap(void)
{
+ struct bintime now;
+ struct pcpu_state *state;
+
+ if (timer->et_flags & ET_FLAGS_PERCPU) {
+ state = DPCPU_PTR(timerstate);
+ binuptime(&now);
+ ET_HW_LOCK(state);
+ loadtimer(&now, 1);
+ ET_HW_UNLOCK(state);
+ }
+}
+
+/*
+ * Switch to profiling clock rates.
+ */
+void
+cpu_startprofclock(void)
+{
ET_LOCK();
- if (timer[0]->et_flags & ET_FLAGS_PERCPU)
- et_start(timer[0], NULL, &timerperiod[0]);
- if (timer[1] && timer[1]->et_flags & ET_FLAGS_PERCPU)
- et_start(timer[1], NULL, &timerperiod[1]);
+ if (periodic) {
+ configtimer(0);
+ profiling = 1;
+ configtimer(1);
+ } else
+ profiling = 1;
ET_UNLOCK();
}
-/* Reconfigure and restart event timers after configuration changes. */
-static void
-cpu_restartclocks(void)
+/*
+ * Switch to regular clock rates.
+ */
+void
+cpu_stopprofclock(void)
{
- /* Stop all event timers. */
- timertest = 0;
- if (timer1hz) {
- timer1hz = 0;
+ ET_LOCK();
+ if (periodic) {
configtimer(0);
- }
- if (timer[1] && timer2hz) {
- timer2hz = 0;
+ profiling = 0;
configtimer(1);
- }
- /* Calculate new event timers parameters. */
- if (timer[1] == NULL) {
- timer1hz = hz * singlemul;
- while (timer1hz < (profiling_on ? profhz : stathz))
- timer1hz += hz;
- timer2hz = 0;
- } else {
- timer1hz = hz;
- timer2hz = profiling_on ? profhz : stathz;
- timer2hz = round_freq(timer[1], timer2hz);
- }
- timer1hz = round_freq(timer[0], timer1hz);
- printf("Starting kernel event timers: %s @ %dHz, %s @ %dHz\n",
- timer[0]->et_name, timer1hz,
- timer[1] ? timer[1]->et_name : "NONE", timer2hz);
- /* Restart event timers. */
- FREQ2BT(timer1hz, &timerperiod[0]);
- configtimer(0);
- if (timer[1]) {
- timerticks[0] = 0;
- timerticks[1] = 0;
- FREQ2BT(timer2hz, &timerperiod[1]);
- configtimer(1);
- timertest = 1;
- }
+ } else
+ profiling = 0;
+ ET_UNLOCK();
}
-/* Switch to profiling clock rates. */
+/*
+ * Switch to idle mode (all ticks handled).
+ */
void
-cpu_startprofclock(void)
+cpu_idleclock(void)
{
+ struct bintime now, t;
+ struct pcpu_state *state;
- ET_LOCK();
- profiling_on = 1;
- cpu_restartclocks();
- ET_UNLOCK();
+ if (idletick || busy ||
+ (periodic && (timer->et_flags & ET_FLAGS_PERCPU)))
+ return;
+ state = DPCPU_PTR(timerstate);
+ if (periodic)
+ now = state->now;
+ else
+ binuptime(&now);
+ CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x",
+ curcpu, now.sec, (unsigned int)(now.frac >> 32),
+ (unsigned int)(now.frac & 0xffffffff));
+ getnextcpuevent(&t, 1);
+ ET_HW_LOCK(state);
+ state->idle = 1;
+ state->nextevent = t;
+ if (!periodic)
+ loadtimer(&now, 0);
+ ET_HW_UNLOCK(state);
}
-/* Switch to regular clock rates. */
+/*
+ * Switch to active mode (skip empty ticks).
+ */
void
-cpu_stopprofclock(void)
+cpu_activeclock(void)
{
+ struct bintime now;
+ struct pcpu_state *state;
+ struct thread *td;
- ET_LOCK();
- profiling_on = 0;
- cpu_restartclocks();
- ET_UNLOCK();
+ state = DPCPU_PTR(timerstate);
+ if (state->idle == 0 || busy)
+ return;
+ if (periodic)
+ now = state->now;
+ else
+ binuptime(&now);
+ CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x",
+ curcpu, now.sec, (unsigned int)(now.frac >> 32),
+ (unsigned int)(now.frac & 0xffffffff));
+ spinlock_enter();
+ td = curthread;
+ td->td_intr_nesting_level++;
+ handleevents(&now, 1);
+ td->td_intr_nesting_level--;
+ spinlock_exit();
}
-/* Report or change the active event timers hardware. */
+#ifdef SMP
+static void
+cpu_new_callout(int cpu, int ticks)
+{
+ struct bintime tmp;
+ struct pcpu_state *state;
+
+ CTR3(KTR_SPARE2, "new co at %d: on %d in %d",
+ curcpu, cpu, ticks);
+ state = DPCPU_ID_PTR(cpu, timerstate);
+ ET_HW_LOCK(state);
+ if (state->idle == 0 || busy) {
+ ET_HW_UNLOCK(state);
+ return;
+ }
+ /*
+ * If timer is periodic - just update next event time for target CPU.
+ */
+ if (periodic) {
+ state->nextevent = state->nexthard;
+ tmp = hardperiod;
+ bintime_mul(&tmp, ticks - 1);
+ bintime_add(&state->nextevent, &tmp);
+ ET_HW_UNLOCK(state);
+ return;
+ }
+ /*
+ * Otherwise we have to wake that CPU up, as we can't get present
+ * bintime to reprogram global timer from here. If timer is per-CPU,
+ * we by definition can't do it from here.
+ */
+ ET_HW_UNLOCK(state);
+ if (timer->et_flags & ET_FLAGS_PERCPU) {
+ state->handle = 1;
+ ipi_cpu(cpu, IPI_HARDCLOCK);
+ } else {
+ if (!cpu_idle_wakeup(cpu))
+ ipi_cpu(cpu, IPI_AST);
+ }
+}
+#endif
+
+/*
+ * Report or change the active event timers hardware.
+ */
static int
-sysctl_kern_eventtimer_timer1(SYSCTL_HANDLER_ARGS)
+sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
{
char buf[32];
struct eventtimer *et;
int error;
ET_LOCK();
- et = timer[0];
+ et = timer;
snprintf(buf, sizeof(buf), "%s", et->et_name);
ET_UNLOCK();
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
ET_LOCK();
- et = timer[0];
+ et = timer;
if (error != 0 || req->newptr == NULL ||
- strcmp(buf, et->et_name) == 0) {
+ strcasecmp(buf, et->et_name) == 0) {
ET_UNLOCK();
return (error);
}
- et = et_find(buf, ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
+ et = et_find(buf, 0, 0);
if (et == NULL) {
ET_UNLOCK();
return (ENOENT);
}
- timer1hz = 0;
configtimer(0);
- et_free(timer[0]);
- timer[0] = et;
- et_init(timer[0], timer1cb, NULL, NULL);
- cpu_restartclocks();
+ et_free(timer);
+ if (et->et_flags & ET_FLAGS_C3STOP)
+ cpu_disable_deep_sleep++;
+ if (timer->et_flags & ET_FLAGS_C3STOP)
+ cpu_disable_deep_sleep--;
+ timer = et;
+ et_init(timer, timercb, NULL, NULL);
+ configtimer(1);
ET_UNLOCK();
return (error);
}
-SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer1,
+SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- 0, 0, sysctl_kern_eventtimer_timer1, "A", "Primary event timer");
+ 0, 0, sysctl_kern_eventtimer_timer, "A", "Kernel event timer");
+/*
+ * Report or change the active event timer periodicity.
+ */
static int
-sysctl_kern_eventtimer_timer2(SYSCTL_HANDLER_ARGS)
+sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
{
- char buf[32];
- struct eventtimer *et;
- int error;
+ int error, val;
- ET_LOCK();
- et = timer[1];
- if (et == NULL)
- snprintf(buf, sizeof(buf), "NONE");
- else
- snprintf(buf, sizeof(buf), "%s", et->et_name);
- ET_UNLOCK();
- error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
- ET_LOCK();
- et = timer[1];
- if (error != 0 || req->newptr == NULL ||
- strcmp(buf, et ? et->et_name : "NONE") == 0) {
- ET_UNLOCK();
+ val = periodic;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
return (error);
- }
- et = et_find(buf, ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
- if (et == NULL && strcasecmp(buf, "NONE") != 0) {
- ET_UNLOCK();
- return (ENOENT);
- }
- if (timer[1] != NULL) {
- timer2hz = 0;
- configtimer(1);
- et_free(timer[1]);
- }
- timer[1] = et;
- if (timer[1] != NULL)
- et_init(timer[1], timer2cb, NULL, NULL);
- cpu_restartclocks();
+ ET_LOCK();
+ configtimer(0);
+ periodic = val;
+ configtimer(1);
ET_UNLOCK();
return (error);
}
-SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer2,
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- 0, 0, sysctl_kern_eventtimer_timer2, "A", "Secondary event timer");
+SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ 0, 0, sysctl_kern_eventtimer_periodic, "I", "Kernel event timer periodic");
#endif
-
diff --git a/sys/kern/kern_et.c b/sys/kern/kern_et.c
index 17b9c6764226..8c375561c42b 100644
--- a/sys/kern/kern_et.c
+++ b/sys/kern/kern_et.c
@@ -38,7 +38,7 @@ SLIST_HEAD(et_eventtimers_list, eventtimer);
static struct et_eventtimers_list eventtimers = SLIST_HEAD_INITIALIZER(et_eventtimers);
struct mtx et_eventtimers_mtx;
-MTX_SYSINIT(et_eventtimers_init, &et_eventtimers_mtx, "et_mtx", MTX_SPIN);
+MTX_SYSINIT(et_eventtimers_init, &et_eventtimers_mtx, "et_mtx", MTX_DEF);
SYSCTL_NODE(_kern, OID_AUTO, eventtimer, CTLFLAG_RW, 0, "Event timers");
SYSCTL_NODE(_kern_eventtimer, OID_AUTO, et, CTLFLAG_RW, 0, "");
diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c
index d97334849ced..811b24f30e53 100644
--- a/sys/kern/kern_tc.c
+++ b/sys/kern/kern_tc.c
@@ -770,16 +770,11 @@ void
tc_ticktock(void)
{
static int count;
- static time_t last_calib;
if (++count < tc_tick)
return;
count = 0;
tc_windup();
- if (time_uptime != last_calib && !(time_uptime & 0xf)) {
- cpu_tick_calibrate(0);
- last_calib = time_uptime;
- }
}
static void
@@ -830,9 +825,20 @@ tc_cpu_ticks(void)
return (u + base);
}
+void
+cpu_tick_calibration(void)
+{
+ static time_t last_calib;
+
+ if (time_uptime != last_calib && !(time_uptime & 0xf)) {
+ cpu_tick_calibrate(0);
+ last_calib = time_uptime;
+ }
+}
+
/*
* This function gets called every 16 seconds on only one designated
- * CPU in the system from hardclock() via tc_ticktock().
+ * CPU in the system from hardclock() via cpu_tick_calibration()().
*
* Whenever the real time clock is stepped we get called with reset=1
* to make sure we handle suspend/resume and similar events correctly.
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 32d5691812d9..569779235481 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -111,6 +111,7 @@ struct callout_cpu {
int cc_softticks;
int cc_cancel;
int cc_waiting;
+ int cc_firsttick;
};
#ifdef SMP
@@ -126,6 +127,7 @@ struct callout_cpu cc_cpu;
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
static int timeout_cpu;
+void (*callout_new_inserted)(int cpu, int ticks) = NULL;
MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
@@ -260,7 +262,7 @@ callout_tick(void)
need_softclock = 0;
cc = CC_SELF();
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
- cc->cc_ticks++;
+ cc->cc_firsttick = cc->cc_ticks = ticks;
for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
bucket = cc->cc_softticks & callwheelmask;
if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
@@ -277,6 +279,34 @@ callout_tick(void)
swi_sched(cc->cc_cookie, 0);
}
+int
+callout_tickstofirst(void)
+{
+ struct callout_cpu *cc;
+ struct callout *c;
+ struct callout_tailq *sc;
+ int curticks;
+ int skip = 1;
+
+ cc = CC_SELF();
+ mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
+ curticks = cc->cc_ticks;
+ while( skip < ncallout && skip < hz/8 ) {
+ sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
+ /* search scanning ticks */
+ TAILQ_FOREACH( c, sc, c_links.tqe ){
+ if (c && (c->c_time <= curticks + ncallout)
+ && (c->c_time > 0))
+ goto out;
+ }
+ skip++;
+ }
+out:
+ cc->cc_firsttick = curticks + skip;
+ mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
+ return (skip);
+}
+
static struct callout_cpu *
callout_lock(struct callout *c)
{
@@ -639,9 +669,14 @@ retry:
c->c_arg = arg;
c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
c->c_func = ftn;
- c->c_time = cc->cc_ticks + to_ticks;
+ c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
+ if ((c->c_time - cc->cc_firsttick) < 0) {
+ cc->cc_firsttick = c->c_time;
+ (*callout_new_inserted)(cpu,
+ to_ticks + (ticks - cc->cc_ticks));
+ }
CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
CC_UNLOCK(cc);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 780dc6d1760b..9face648de93 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1547,7 +1547,7 @@ sched_pctcpu(struct thread *td)
}
void
-sched_tick(void)
+sched_tick(int cnt)
{
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index bb2d34a37cac..e1cc172592ff 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -196,7 +196,7 @@ static int preempt_thresh = 0;
#endif
static int static_boost = PRI_MIN_TIMESHARE;
static int sched_idlespins = 10000;
-static int sched_idlespinthresh = 64;
+static int sched_idlespinthresh = 16;
/*
* tdq - per processor runqs and statistics. All fields are protected by the
@@ -2163,7 +2163,7 @@ sched_clock(struct thread *td)
* is easier than trying to scale based on stathz.
*/
void
-sched_tick(void)
+sched_tick(int cnt)
{
struct td_sched *ts;
@@ -2175,7 +2175,7 @@ sched_tick(void)
if (ts->ts_incrtick == ticks)
return;
/* Adjust ticks for pctcpu */
- ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
+ ts->ts_ticks += cnt << SCHED_TICK_SHIFT;
ts->ts_ltick = ticks;
ts->ts_incrtick = ticks;
/*
@@ -2549,7 +2549,7 @@ sched_idletd(void *dummy)
if (tdq->tdq_load == 0) {
tdq->tdq_cpu_idle = 1;
if (tdq->tdq_load == 0) {
- cpu_idle(switchcnt > sched_idlespinthresh);
+ cpu_idle(switchcnt > sched_idlespinthresh * 4);
tdq->tdq_switchcnt++;
}
tdq->tdq_cpu_idle = 0;