diff options
author | Ian Lepore <ian@FreeBSD.org> | 2017-07-31 01:18:21 +0000 |
---|---|---|
committer | Ian Lepore <ian@FreeBSD.org> | 2017-07-31 01:18:21 +0000 |
commit | cd9d9e5417583b89e1d0a505182ebc33e4d1fad3 (patch) | |
tree | 677913015155f19ac1a9c052f1c80d714ea10f89 /sys | |
parent | d0f68f913b2768369f266c0ff4b13b09586fd56b (diff) | |
download | src-cd9d9e5417583b89e1d0a505182ebc33e4d1fad3.tar.gz src-cd9d9e5417583b89e1d0a505182ebc33e4d1fad3.zip |
Add clock_schedule(), a feature that allows realtime clock drivers to
request that their clock_settime() methods be called at a given offset
from top-of-second. This adds a timeout_task to the rtc_instance so that
each clock can be separately added to taskqueue_thread with the scheduling
it prefers, instead of looping through all the clocks at once with a
single task on taskqueue_thread. If a driver doesn't call clock_schedule()
the default is the old behavior: clock_settime() is queued immediately.
The motivation behind this is that I was on the path of adding identical
code to a third RTC driver to figure out a delta to top-of-second and
sleep for that amount of time because writing the the RTC registers resets
the hardware's concept of top-of-second. (Sometimes it's not top-of-second,
some RTC clocks tick over a half second after you set their time registers.)
Worst-case would be to sleep for almost a full second, which is a rude thing
to do on a shared task queue thread.
Notes
Notes:
svn path=/head/; revision=321745
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/subr_rtc.c | 77 | ||||
-rw-r--r-- | sys/sys/clock.h | 14 |
2 files changed, 69 insertions, 22 deletions
diff --git a/sys/kern/subr_rtc.c b/sys/kern/subr_rtc.c index a0884852ab33..29b088bcc78d 100644 --- a/sys/kern/subr_rtc.c +++ b/sys/kern/subr_rtc.c @@ -95,7 +95,10 @@ struct rtc_instance { device_t clockdev; int resolution; int flags; + u_int schedns; struct timespec resadj; + struct timeout_task + stask; LIST_ENTRY(rtc_instance) rtc_entries; }; @@ -104,7 +107,6 @@ struct rtc_instance { * Clocks are updated using a task running on taskqueue_thread. */ static void settime_task_func(void *arg, int pending); -static struct task settime_task = TASK_INITIALIZER(0, settime_task_func, NULL); /* * Registered clocks are kept in a list which is sorted by resolution; the more @@ -116,9 +118,9 @@ static struct sx rtc_list_lock; SX_SYSINIT(rtc_list_lock_init, &rtc_list_lock, "rtc list"); /* - * On the task thread, invoke the clock_settime() method of each registered - * clock. Do so holding only an sxlock, so that clock drivers are free to do - * whatever kind of locking or sleeping they need to. + * On the task thread, invoke the clock_settime() method of the clock. Do so + * holding no locks, so that clock drivers are free to do whatever kind of + * locking or sleeping they need to. */ static void settime_task_func(void *arg, int pending) @@ -126,21 +128,18 @@ settime_task_func(void *arg, int pending) struct timespec ts; struct rtc_instance *rtc; - sx_xlock(&rtc_list_lock); - LIST_FOREACH(rtc, &rtc_list, rtc_entries) { - if (!(rtc->flags & CLOCKF_SETTIME_NO_TS)) { - getnanotime(&ts); - if (!(rtc->flags & CLOCKF_SETTIME_NO_ADJ)) { - ts.tv_sec -= utc_offset(); - timespecadd(&ts, &rtc->resadj); - } - } else { - ts.tv_sec = 0; - ts.tv_nsec = 0; + rtc = arg; + if (!(rtc->flags & CLOCKF_SETTIME_NO_TS)) { + getnanotime(&ts); + if (!(rtc->flags & CLOCKF_SETTIME_NO_ADJ)) { + ts.tv_sec -= utc_offset(); + timespecadd(&ts, &rtc->resadj); } - CLOCK_SETTIME(rtc->clockdev, &ts); + } else { + ts.tv_sec = 0; + ts.tv_nsec = 0; } - sx_xunlock(&rtc_list_lock); + CLOCK_SETTIME(rtc->clockdev, &ts); } void @@ -152,8 +151,11 @@ clock_register_flags(device_t clockdev, long resolution, int flags) newrtc->clockdev = clockdev; newrtc->resolution = (int)resolution; newrtc->flags = flags; + newrtc->schedns = 0; newrtc->resadj.tv_sec = newrtc->resolution / 2 / 1000000; newrtc->resadj.tv_nsec = newrtc->resolution / 2 % 1000000 * 1000; + TIMEOUT_TASK_INIT(taskqueue_thread, &newrtc->stask, 0, + settime_task_func, newrtc); sx_xlock(&rtc_list_lock); if (LIST_EMPTY(&rtc_list)) { @@ -192,7 +194,27 @@ clock_unregister(device_t clockdev) LIST_FOREACH_SAFE(rtc, &rtc_list, rtc_entries, tmp) { if (rtc->clockdev == clockdev) { LIST_REMOVE(rtc, rtc_entries); - free(rtc, M_DEVBUF); + break; + } + } + sx_xunlock(&rtc_list_lock); + if (rtc != NULL) { + taskqueue_cancel_timeout(taskqueue_thread, &rtc->stask, NULL); + taskqueue_drain_timeout(taskqueue_thread, &rtc->stask); + free(rtc, M_DEVBUF); + } +} + +void +clock_schedule(device_t clockdev, u_int offsetns) +{ + struct rtc_instance *rtc; + + sx_xlock(&rtc_list_lock); + LIST_FOREACH(rtc, &rtc_list, rtc_entries) { + if (rtc->clockdev == clockdev) { + rtc->schedns = offsetns; + break; } } sx_xunlock(&rtc_list_lock); @@ -275,9 +297,26 @@ inittodr(time_t base) void resettodr(void) { + struct timespec now; + struct rtc_instance *rtc; + sbintime_t sbt; + long waitns; if (disable_rtc_set) return; - taskqueue_enqueue(taskqueue_thread, &settime_task); + sx_xlock(&rtc_list_lock); + LIST_FOREACH(rtc, &rtc_list, rtc_entries) { + if (rtc->schedns != 0) { + getnanotime(&now); + waitns = rtc->schedns - now.tv_nsec; + if (waitns < 0) + waitns += 1000000000; + sbt = nstosbt(waitns); + } else + sbt = 0; + taskqueue_enqueue_timeout_sbt(taskqueue_thread, + &rtc->stask, -sbt, 0, C_PREL(31)); + } + sx_xunlock(&rtc_list_lock); } diff --git a/sys/sys/clock.h b/sys/sys/clock.h index 696ff55eee14..a5e625a81422 100644 --- a/sys/sys/clock.h +++ b/sys/sys/clock.h @@ -77,9 +77,16 @@ int clock_ct_to_ts(struct clocktime *, struct timespec *); void clock_ts_to_ct(struct timespec *, struct clocktime *); /* - * Time-of-day clock register/unregister functions, and associated flags. These - * functions can sleep. Upon return from unregister, the clock's methods are - * not running and will not be called again. + * Time-of-day clock functions and flags. These functions might sleep. + * + * clock_register and clock_unregister() do what they say. Upon return from + * unregister, the clock's methods are not running and will not be called again. + * + * clock_schedule() requests that a registered clock's clock_settime() calls + * happen at the given offset into the second. The default is 0, meaning no + * specific scheduling. To schedule the call as soon after top-of-second as + * possible, specify 1. Each clock has its own schedule, but taskqueue_thread + * is shared by many tasks; the timing of the call is not guaranteed. * * Flags: * @@ -102,6 +109,7 @@ void clock_ts_to_ct(struct timespec *, struct clocktime *); void clock_register(device_t _clockdev, long _resolution_us); void clock_register_flags(device_t _clockdev, long _resolution_us, int _flags); +void clock_schedule(device_t clockdev, u_int _offsetns); void clock_unregister(device_t _clockdev); /* |