source: rtems/cpukit/score/src/kern_tc.c @ 0daa8ab

5
Last change on this file since 0daa8ab was 4cd52cc4, checked in by Sebastian Huber <sebastian.huber@…>, on 07/05/17 at 06:08:24

score: Avoid clash with <strings.h> provided fls()

  • Property mode set to 100644
File size: 57.4 KB
Line 
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
11 *
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
14 */
15
16#ifdef __rtems__
17#include <sys/lock.h>
18#define _KERNEL
19#define binuptime(_bt) _Timecounter_Binuptime(_bt)
20#define nanouptime(_tsp) _Timecounter_Nanouptime(_tsp)
21#define microuptime(_tvp) _Timecounter_Microuptime(_tvp)
22#define bintime(_bt) _Timecounter_Bintime(_bt)
23#define nanotime(_tsp) _Timecounter_Nanotime(_tsp)
24#define microtime(_tvp) _Timecounter_Microtime(_tvp)
25#define getbinuptime(_bt) _Timecounter_Getbinuptime(_bt)
26#define getnanouptime(_tsp) _Timecounter_Getnanouptime(_tsp)
27#define getmicrouptime(_tvp) _Timecounter_Getmicrouptime(_tvp)
28#define getbintime(_bt) _Timecounter_Getbintime(_bt)
29#define getnanotime(_tsp) _Timecounter_Getnanotime(_tsp)
30#define getmicrotime(_tvp) _Timecounter_Getmicrotime(_tvp)
31#define tc_init _Timecounter_Install
32#define timecounter _Timecounter
33#define time_second _Timecounter_Time_second
34#define time_uptime _Timecounter_Time_uptime
35#define boottimebin _Timecounter_Boottimebin
36#include <rtems/score/timecounterimpl.h>
37#include <rtems/score/smp.h>
38#include <rtems/score/todimpl.h>
39#include <rtems/score/watchdogimpl.h>
40#endif /* __rtems__ */
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD r284178 2015-06-09T11:49:56Z$");
43
44#include "opt_compat.h"
45#include "opt_ntp.h"
46#include "opt_ffclock.h"
47
48#include <sys/param.h>
49#ifndef __rtems__
50#include <sys/kernel.h>
51#include <sys/limits.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/sbuf.h>
55#include <sys/sysctl.h>
56#include <sys/syslog.h>
57#include <sys/systm.h>
58#endif /* __rtems__ */
59#include <sys/timeffc.h>
60#include <sys/timepps.h>
61#include <sys/timetc.h>
62#include <sys/timex.h>
63#ifndef __rtems__
64#include <sys/vdso.h>
65#include <machine/atomic.h>
66#endif /* __rtems__ */
67#ifdef __rtems__
68#include <limits.h>
69#include <rtems.h>
70ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
71#define _Timecounter_Release(lock_context) \
72  _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
73#define hz rtems_clock_get_ticks_per_second()
74#define printf(...)
75#define bcopy(x, y, z) memcpy(y, x, z);
76#define log(...)
77static inline int
78builtin_fls(int x)
79{
80        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
81}
82#define fls(x) builtin_fls(x)
83/* FIXME: https://devel.rtems.org/ticket/2348 */
84#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
85#endif /* __rtems__ */
86
87/*
88 * A large step happens on boot.  This constant detects such steps.
89 * It is relatively small so that ntp_update_second gets called enough
90 * in the typical 'missed a couple of seconds' case, but doesn't loop
91 * forever when the time step is large.
92 */
93#define LARGE_STEP      200
94
95/*
96 * Implement a dummy timecounter which we can use until we get a real one
97 * in the air.  This allows the console and other early stuff to use
98 * time services.
99 */
100
101static uint32_t
102dummy_get_timecount(struct timecounter *tc)
103{
104#ifndef __rtems__
105        static uint32_t now;
106
107        return (++now);
108#else /* __rtems__ */
109        return 0;
110#endif /* __rtems__ */
111}
112
113static struct timecounter dummy_timecounter = {
114        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
115};
116
117struct timehands {
118        /* These fields must be initialized by the driver. */
119        struct timecounter      *th_counter;
120        int64_t                 th_adjustment;
121        uint64_t                th_scale;
122        uint32_t                th_offset_count;
123        struct bintime          th_offset;
124        struct timeval          th_microtime;
125        struct timespec         th_nanotime;
126        /* Fields not to be copied in tc_windup start with th_generation. */
127#ifndef __rtems__
128        u_int                   th_generation;
129#else /* __rtems__ */
130        Atomic_Ulong            th_generation;
131#endif /* __rtems__ */
132        struct timehands        *th_next;
133};
134
135#if defined(RTEMS_SMP)
136static struct timehands th0;
137static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
138static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
139static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
140static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
141static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
142static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
143static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
144static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
145static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
146#endif
147static struct timehands th0 = {
148        &dummy_timecounter,
149        0,
150        (uint64_t)-1 / 1000000,
151        0,
152        {1, 0},
153#ifndef __rtems__
154        {0, 0},
155        {0, 0},
156#else /* __rtems__ */
157        {TOD_SECONDS_1970_THROUGH_1988, 0},
158        {TOD_SECONDS_1970_THROUGH_1988, 0},
159#endif /* __rtems__ */
160        1,
161#if defined(RTEMS_SMP)
162        &th1
163#else
164        &th0
165#endif
166};
167
168static struct timehands *volatile timehands = &th0;
169struct timecounter *timecounter = &dummy_timecounter;
170static struct timecounter *timecounters = &dummy_timecounter;
171
172#ifndef __rtems__
173int tc_min_ticktock_freq = 1;
174#endif /* __rtems__ */
175
176#ifndef __rtems__
177volatile time_t time_second = 1;
178#else /* __rtems__ */
179volatile time_t time_second = TOD_SECONDS_1970_THROUGH_1988;
180#endif /* __rtems__ */
181volatile time_t time_uptime = 1;
182
183#ifndef __rtems__
184struct bintime boottimebin;
185#else /* __rtems__ */
186struct bintime boottimebin = {
187  .sec = TOD_SECONDS_1970_THROUGH_1988 - 1
188};
189#endif /* __rtems__ */
190#ifndef __rtems__
191struct timeval boottime;
192static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
193SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
194    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
195
196SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
197static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
198
199static int timestepwarnings;
200SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
201    &timestepwarnings, 0, "Log time steps");
202
203struct bintime bt_timethreshold;
204struct bintime bt_tickthreshold;
205sbintime_t sbt_timethreshold;
206sbintime_t sbt_tickthreshold;
207struct bintime tc_tick_bt;
208sbintime_t tc_tick_sbt;
209int tc_precexp;
210int tc_timepercentage = TC_DEFAULTPERC;
211static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
212SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
213    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
214    sysctl_kern_timecounter_adjprecision, "I",
215    "Allowed time interval deviation in percents");
216#endif /* __rtems__ */
217
218static void tc_windup(void);
219#ifndef __rtems__
220static void cpu_tick_calibrate(int);
221#else /* __rtems__ */
222static void _Timecounter_Windup(ISR_lock_Context *lock_context);
223#endif /* __rtems__ */
224
225void dtrace_getnanotime(struct timespec *tsp);
226
227#ifndef __rtems__
228static int
229sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
230{
231#ifndef __mips__
232#ifdef SCTL_MASK32
233        int tv[2];
234
235        if (req->flags & SCTL_MASK32) {
236                tv[0] = boottime.tv_sec;
237                tv[1] = boottime.tv_usec;
238                return SYSCTL_OUT(req, tv, sizeof(tv));
239        } else
240#endif
241#endif
242                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
243}
244
245static int
246sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
247{
248        uint32_t ncount;
249        struct timecounter *tc = arg1;
250
251        ncount = tc->tc_get_timecount(tc);
252        return sysctl_handle_int(oidp, &ncount, 0, req);
253}
254
255static int
256sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
257{
258        uint64_t freq;
259        struct timecounter *tc = arg1;
260
261        freq = tc->tc_frequency;
262        return sysctl_handle_64(oidp, &freq, 0, req);
263}
264#endif /* __rtems__ */
265
266/*
267 * Return the difference between the timehands' counter value now and what
268 * was when we copied it to the timehands' offset_count.
269 */
270static __inline uint32_t
271tc_delta(struct timehands *th)
272{
273        struct timecounter *tc;
274
275        tc = th->th_counter;
276        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
277            tc->tc_counter_mask);
278}
279
280static u_int
281tc_getgen(struct timehands *th)
282{
283
284#ifndef __rtems__
285#ifdef SMP
286        return (atomic_load_acq_int(&th->th_generation));
287#else
288        u_int gen;
289
290        gen = th->th_generation;
291        __compiler_membar();
292        return (gen);
293#endif
294#else /* __rtems__ */
295        return (_Atomic_Load_ulong(&th->th_generation, ATOMIC_ORDER_ACQUIRE));
296#endif /* __rtems__ */
297}
298
299static void
300tc_setgen(struct timehands *th, u_int newgen)
301{
302
303#ifndef __rtems__
304#ifdef SMP
305        atomic_store_rel_int(&th->th_generation, newgen);
306#else
307        __compiler_membar();
308        th->th_generation = newgen;
309#endif
310#else /* __rtems__ */
311        _Atomic_Store_ulong(&th->th_generation, newgen, ATOMIC_ORDER_RELEASE);
312#endif /* __rtems__ */
313}
314
315/*
316 * Functions for reading the time.  We have to loop until we are sure that
317 * the timehands that we operated on was not updated under our feet.  See
318 * the comment in <sys/time.h> for a description of these 12 functions.
319 */
320
321#ifdef FFCLOCK
322void
323fbclock_binuptime(struct bintime *bt)
324{
325        struct timehands *th;
326        unsigned int gen;
327
328        do {
329                th = timehands;
330                gen = tc_getgen(th);
331                *bt = th->th_offset;
332                bintime_addx(bt, th->th_scale * tc_delta(th));
333        } while (gen == 0 || gen != tc_getgen(th));
334}
335
336void
337fbclock_nanouptime(struct timespec *tsp)
338{
339        struct bintime bt;
340
341        fbclock_binuptime(&bt);
342        bintime2timespec(&bt, tsp);
343}
344
345void
346fbclock_microuptime(struct timeval *tvp)
347{
348        struct bintime bt;
349
350        fbclock_binuptime(&bt);
351        bintime2timeval(&bt, tvp);
352}
353
354void
355fbclock_bintime(struct bintime *bt)
356{
357
358        fbclock_binuptime(bt);
359        bintime_add(bt, &boottimebin);
360}
361
362void
363fbclock_nanotime(struct timespec *tsp)
364{
365        struct bintime bt;
366
367        fbclock_bintime(&bt);
368        bintime2timespec(&bt, tsp);
369}
370
371void
372fbclock_microtime(struct timeval *tvp)
373{
374        struct bintime bt;
375
376        fbclock_bintime(&bt);
377        bintime2timeval(&bt, tvp);
378}
379
380void
381fbclock_getbinuptime(struct bintime *bt)
382{
383        struct timehands *th;
384        unsigned int gen;
385
386        do {
387                th = timehands;
388                gen = tc_getgen(th);
389                *bt = th->th_offset;
390        } while (gen == 0 || gen != tc_getgen(th));
391}
392
393void
394fbclock_getnanouptime(struct timespec *tsp)
395{
396        struct timehands *th;
397        unsigned int gen;
398
399        do {
400                th = timehands;
401                gen = tc_getgen(th);
402                bintime2timespec(&th->th_offset, tsp);
403        } while (gen == 0 || gen != tc_getgen(th));
404}
405
406void
407fbclock_getmicrouptime(struct timeval *tvp)
408{
409        struct timehands *th;
410        unsigned int gen;
411
412        do {
413                th = timehands;
414                gen = tc_getgen(th);
415                bintime2timeval(&th->th_offset, tvp);
416        } while (gen == 0 || gen != tc_getgen(th));
417}
418
419void
420fbclock_getbintime(struct bintime *bt)
421{
422        struct timehands *th;
423        unsigned int gen;
424
425        do {
426                th = timehands;
427                gen = tc_getgen(th);
428                *bt = th->th_offset;
429        } while (gen == 0 || gen != tc_getgen(th));
430        bintime_add(bt, &boottimebin);
431}
432
433void
434fbclock_getnanotime(struct timespec *tsp)
435{
436        struct timehands *th;
437        unsigned int gen;
438
439        do {
440                th = timehands;
441                gen = tc_getgen(th);
442                *tsp = th->th_nanotime;
443        } while (gen == 0 || gen != tc_getgen(th));
444}
445
446void
447fbclock_getmicrotime(struct timeval *tvp)
448{
449        struct timehands *th;
450        unsigned int gen;
451
452        do {
453                th = timehands;
454                gen = tc_getgen(th);
455                *tvp = th->th_microtime;
456        } while (gen == 0 || gen != tc_getgen(th));
457}
458#else /* !FFCLOCK */
459void
460binuptime(struct bintime *bt)
461{
462        struct timehands *th;
463        uint32_t gen;
464
465        do {
466                th = timehands;
467                gen = tc_getgen(th);
468                *bt = th->th_offset;
469                bintime_addx(bt, th->th_scale * tc_delta(th));
470        } while (gen == 0 || gen != tc_getgen(th));
471}
472
473void
474nanouptime(struct timespec *tsp)
475{
476        struct bintime bt;
477
478        binuptime(&bt);
479        bintime2timespec(&bt, tsp);
480}
481
482void
483microuptime(struct timeval *tvp)
484{
485        struct bintime bt;
486
487        binuptime(&bt);
488        bintime2timeval(&bt, tvp);
489}
490
491void
492bintime(struct bintime *bt)
493{
494
495        binuptime(bt);
496        bintime_add(bt, &boottimebin);
497}
498
499void
500nanotime(struct timespec *tsp)
501{
502        struct bintime bt;
503
504        bintime(&bt);
505        bintime2timespec(&bt, tsp);
506}
507
508void
509microtime(struct timeval *tvp)
510{
511        struct bintime bt;
512
513        bintime(&bt);
514        bintime2timeval(&bt, tvp);
515}
516
517void
518getbinuptime(struct bintime *bt)
519{
520        struct timehands *th;
521        uint32_t gen;
522
523        do {
524                th = timehands;
525                gen = tc_getgen(th);
526                *bt = th->th_offset;
527        } while (gen == 0 || gen != tc_getgen(th));
528}
529
530void
531getnanouptime(struct timespec *tsp)
532{
533        struct timehands *th;
534        uint32_t gen;
535
536        do {
537                th = timehands;
538                gen = tc_getgen(th);
539                bintime2timespec(&th->th_offset, tsp);
540        } while (gen == 0 || gen != tc_getgen(th));
541}
542
543void
544getmicrouptime(struct timeval *tvp)
545{
546        struct timehands *th;
547        uint32_t gen;
548
549        do {
550                th = timehands;
551                gen = tc_getgen(th);
552                bintime2timeval(&th->th_offset, tvp);
553        } while (gen == 0 || gen != tc_getgen(th));
554}
555
556void
557getbintime(struct bintime *bt)
558{
559        struct timehands *th;
560        uint32_t gen;
561
562        do {
563                th = timehands;
564                gen = tc_getgen(th);
565                *bt = th->th_offset;
566        } while (gen == 0 || gen != tc_getgen(th));
567        bintime_add(bt, &boottimebin);
568}
569
570void
571getnanotime(struct timespec *tsp)
572{
573        struct timehands *th;
574        uint32_t gen;
575
576        do {
577                th = timehands;
578                gen = tc_getgen(th);
579                *tsp = th->th_nanotime;
580        } while (gen == 0 || gen != tc_getgen(th));
581}
582
583void
584getmicrotime(struct timeval *tvp)
585{
586        struct timehands *th;
587        uint32_t gen;
588
589        do {
590                th = timehands;
591                gen = tc_getgen(th);
592                *tvp = th->th_microtime;
593        } while (gen == 0 || gen != tc_getgen(th));
594}
595#endif /* FFCLOCK */
596
597#ifdef FFCLOCK
598/*
599 * Support for feed-forward synchronization algorithms. This is heavily inspired
600 * by the timehands mechanism but kept independent from it. *_windup() functions
601 * have some connection to avoid accessing the timecounter hardware more than
602 * necessary.
603 */
604
605/* Feed-forward clock estimates kept updated by the synchronization daemon. */
606struct ffclock_estimate ffclock_estimate;
607struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
608uint32_t ffclock_status;                /* Feed-forward clock status. */
609int8_t ffclock_updated;                 /* New estimates are available. */
610struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
611
612struct fftimehands {
613        struct ffclock_estimate cest;
614        struct bintime          tick_time;
615        struct bintime          tick_time_lerp;
616        ffcounter               tick_ffcount;
617        uint64_t                period_lerp;
618        volatile uint8_t        gen;
619        struct fftimehands      *next;
620};
621
622#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
623
624static struct fftimehands ffth[10];
625static struct fftimehands *volatile fftimehands = ffth;
626
627static void
628ffclock_init(void)
629{
630        struct fftimehands *cur;
631        struct fftimehands *last;
632
633        memset(ffth, 0, sizeof(ffth));
634
635        last = ffth + NUM_ELEMENTS(ffth) - 1;
636        for (cur = ffth; cur < last; cur++)
637                cur->next = cur + 1;
638        last->next = ffth;
639
640        ffclock_updated = 0;
641        ffclock_status = FFCLOCK_STA_UNSYNC;
642        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
643}
644
645/*
646 * Reset the feed-forward clock estimates. Called from inittodr() to get things
647 * kick started and uses the timecounter nominal frequency as a first period
648 * estimate. Note: this function may be called several time just after boot.
649 * Note: this is the only function that sets the value of boot time for the
650 * monotonic (i.e. uptime) version of the feed-forward clock.
651 */
652void
653ffclock_reset_clock(struct timespec *ts)
654{
655        struct timecounter *tc;
656        struct ffclock_estimate cest;
657
658        tc = timehands->th_counter;
659        memset(&cest, 0, sizeof(struct ffclock_estimate));
660
661        timespec2bintime(ts, &ffclock_boottime);
662        timespec2bintime(ts, &(cest.update_time));
663        ffclock_read_counter(&cest.update_ffcount);
664        cest.leapsec_next = 0;
665        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
666        cest.errb_abs = 0;
667        cest.errb_rate = 0;
668        cest.status = FFCLOCK_STA_UNSYNC;
669        cest.leapsec_total = 0;
670        cest.leapsec = 0;
671
672        mtx_lock(&ffclock_mtx);
673        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
674        ffclock_updated = INT8_MAX;
675        mtx_unlock(&ffclock_mtx);
676
677        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
678            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
679            (unsigned long)ts->tv_nsec);
680}
681
682/*
683 * Sub-routine to convert a time interval measured in RAW counter units to time
684 * in seconds stored in bintime format.
685 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
686 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
687 * extra cycles.
688 */
689static void
690ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
691{
692        struct bintime bt2;
693        ffcounter delta, delta_max;
694
695        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
696        bintime_clear(bt);
697        do {
698                if (ffdelta > delta_max)
699                        delta = delta_max;
700                else
701                        delta = ffdelta;
702                bt2.sec = 0;
703                bt2.frac = period;
704                bintime_mul(&bt2, (unsigned int)delta);
705                bintime_add(bt, &bt2);
706                ffdelta -= delta;
707        } while (ffdelta > 0);
708}
709
710/*
711 * Update the fftimehands.
712 * Push the tick ffcount and time(s) forward based on current clock estimate.
713 * The conversion from ffcounter to bintime relies on the difference clock
714 * principle, whose accuracy relies on computing small time intervals. If a new
715 * clock estimate has been passed by the synchronisation daemon, make it
716 * current, and compute the linear interpolation for monotonic time if needed.
717 */
718static void
719ffclock_windup(unsigned int delta)
720{
721        struct ffclock_estimate *cest;
722        struct fftimehands *ffth;
723        struct bintime bt, gap_lerp;
724        ffcounter ffdelta;
725        uint64_t frac;
726        unsigned int polling;
727        uint8_t forward_jump, ogen;
728
729        /*
730         * Pick the next timehand, copy current ffclock estimates and move tick
731         * times and counter forward.
732         */
733        forward_jump = 0;
734        ffth = fftimehands->next;
735        ogen = ffth->gen;
736        ffth->gen = 0;
737        cest = &ffth->cest;
738        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
739        ffdelta = (ffcounter)delta;
740        ffth->period_lerp = fftimehands->period_lerp;
741
742        ffth->tick_time = fftimehands->tick_time;
743        ffclock_convert_delta(ffdelta, cest->period, &bt);
744        bintime_add(&ffth->tick_time, &bt);
745
746        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
747        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
748        bintime_add(&ffth->tick_time_lerp, &bt);
749
750        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
751
752        /*
753         * Assess the status of the clock, if the last update is too old, it is
754         * likely the synchronisation daemon is dead and the clock is free
755         * running.
756         */
757        if (ffclock_updated == 0) {
758                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
759                ffclock_convert_delta(ffdelta, cest->period, &bt);
760                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
761                        ffclock_status |= FFCLOCK_STA_UNSYNC;
762        }
763
764        /*
765         * If available, grab updated clock estimates and make them current.
766         * Recompute time at this tick using the updated estimates. The clock
767         * estimates passed the feed-forward synchronisation daemon may result
768         * in time conversion that is not monotonically increasing (just after
769         * the update). time_lerp is a particular linear interpolation over the
770         * synchronisation algo polling period that ensures monotonicity for the
771         * clock ids requesting it.
772         */
773        if (ffclock_updated > 0) {
774                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
775                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
776                ffth->tick_time = cest->update_time;
777                ffclock_convert_delta(ffdelta, cest->period, &bt);
778                bintime_add(&ffth->tick_time, &bt);
779
780                /* ffclock_reset sets ffclock_updated to INT8_MAX */
781                if (ffclock_updated == INT8_MAX)
782                        ffth->tick_time_lerp = ffth->tick_time;
783
784                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
785                        forward_jump = 1;
786                else
787                        forward_jump = 0;
788
789                bintime_clear(&gap_lerp);
790                if (forward_jump) {
791                        gap_lerp = ffth->tick_time;
792                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
793                } else {
794                        gap_lerp = ffth->tick_time_lerp;
795                        bintime_sub(&gap_lerp, &ffth->tick_time);
796                }
797
798                /*
799                 * The reset from the RTC clock may be far from accurate, and
800                 * reducing the gap between real time and interpolated time
801                 * could take a very long time if the interpolated clock insists
802                 * on strict monotonicity. The clock is reset under very strict
803                 * conditions (kernel time is known to be wrong and
804                 * synchronization daemon has been restarted recently.
805                 * ffclock_boottime absorbs the jump to ensure boot time is
806                 * correct and uptime functions stay consistent.
807                 */
808                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
809                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
810                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
811                        if (forward_jump)
812                                bintime_add(&ffclock_boottime, &gap_lerp);
813                        else
814                                bintime_sub(&ffclock_boottime, &gap_lerp);
815                        ffth->tick_time_lerp = ffth->tick_time;
816                        bintime_clear(&gap_lerp);
817                }
818
819                ffclock_status = cest->status;
820                ffth->period_lerp = cest->period;
821
822                /*
823                 * Compute corrected period used for the linear interpolation of
824                 * time. The rate of linear interpolation is capped to 5000PPM
825                 * (5ms/s).
826                 */
827                if (bintime_isset(&gap_lerp)) {
828                        ffdelta = cest->update_ffcount;
829                        ffdelta -= fftimehands->cest.update_ffcount;
830                        ffclock_convert_delta(ffdelta, cest->period, &bt);
831                        polling = bt.sec;
832                        bt.sec = 0;
833                        bt.frac = 5000000 * (uint64_t)18446744073LL;
834                        bintime_mul(&bt, polling);
835                        if (bintime_cmp(&gap_lerp, &bt, >))
836                                gap_lerp = bt;
837
838                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
839                        frac = 0;
840                        if (gap_lerp.sec > 0) {
841                                frac -= 1;
842                                frac /= ffdelta / gap_lerp.sec;
843                        }
844                        frac += gap_lerp.frac / ffdelta;
845
846                        if (forward_jump)
847                                ffth->period_lerp += frac;
848                        else
849                                ffth->period_lerp -= frac;
850                }
851
852                ffclock_updated = 0;
853        }
854        if (++ogen == 0)
855                ogen = 1;
856        ffth->gen = ogen;
857        fftimehands = ffth;
858}
859
860/*
861 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
862 * the old and new hardware counter cannot be read simultaneously. tc_windup()
863 * does read the two counters 'back to back', but a few cycles are effectively
864 * lost, and not accumulated in tick_ffcount. This is a fairly radical
865 * operation for a feed-forward synchronization daemon, and it is its job to not
866 * pushing irrelevant data to the kernel. Because there is no locking here,
867 * simply force to ignore pending or next update to give daemon a chance to
868 * realize the counter has changed.
869 */
870static void
871ffclock_change_tc(struct timehands *th)
872{
873        struct fftimehands *ffth;
874        struct ffclock_estimate *cest;
875        struct timecounter *tc;
876        uint8_t ogen;
877
878        tc = th->th_counter;
879        ffth = fftimehands->next;
880        ogen = ffth->gen;
881        ffth->gen = 0;
882
883        cest = &ffth->cest;
884        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
885        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
886        cest->errb_abs = 0;
887        cest->errb_rate = 0;
888        cest->status |= FFCLOCK_STA_UNSYNC;
889
890        ffth->tick_ffcount = fftimehands->tick_ffcount;
891        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
892        ffth->tick_time = fftimehands->tick_time;
893        ffth->period_lerp = cest->period;
894
895        /* Do not lock but ignore next update from synchronization daemon. */
896        ffclock_updated--;
897
898        if (++ogen == 0)
899                ogen = 1;
900        ffth->gen = ogen;
901        fftimehands = ffth;
902}
903
904/*
905 * Retrieve feed-forward counter and time of last kernel tick.
906 */
907void
908ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
909{
910        struct fftimehands *ffth;
911        uint8_t gen;
912
913        /*
914         * No locking but check generation has not changed. Also need to make
915         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
916         */
917        do {
918                ffth = fftimehands;
919                gen = ffth->gen;
920                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
921                        *bt = ffth->tick_time_lerp;
922                else
923                        *bt = ffth->tick_time;
924                *ffcount = ffth->tick_ffcount;
925        } while (gen == 0 || gen != ffth->gen);
926}
927
928/*
929 * Absolute clock conversion. Low level function to convert ffcounter to
930 * bintime. The ffcounter is converted using the current ffclock period estimate
931 * or the "interpolated period" to ensure monotonicity.
932 * NOTE: this conversion may have been deferred, and the clock updated since the
933 * hardware counter has been read.
934 */
935void
936ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
937{
938        struct fftimehands *ffth;
939        struct bintime bt2;
940        ffcounter ffdelta;
941        uint8_t gen;
942
943        /*
944         * No locking but check generation has not changed. Also need to make
945         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
946         */
947        do {
948                ffth = fftimehands;
949                gen = ffth->gen;
950                if (ffcount > ffth->tick_ffcount)
951                        ffdelta = ffcount - ffth->tick_ffcount;
952                else
953                        ffdelta = ffth->tick_ffcount - ffcount;
954
955                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
956                        *bt = ffth->tick_time_lerp;
957                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
958                } else {
959                        *bt = ffth->tick_time;
960                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
961                }
962
963                if (ffcount > ffth->tick_ffcount)
964                        bintime_add(bt, &bt2);
965                else
966                        bintime_sub(bt, &bt2);
967        } while (gen == 0 || gen != ffth->gen);
968}
969
970/*
971 * Difference clock conversion.
972 * Low level function to Convert a time interval measured in RAW counter units
973 * into bintime. The difference clock allows measuring small intervals much more
974 * reliably than the absolute clock.
975 */
976void
977ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
978{
979        struct fftimehands *ffth;
980        uint8_t gen;
981
982        /* No locking but check generation has not changed. */
983        do {
984                ffth = fftimehands;
985                gen = ffth->gen;
986                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
987        } while (gen == 0 || gen != ffth->gen);
988}
989
990/*
991 * Access to current ffcounter value.
992 */
993void
994ffclock_read_counter(ffcounter *ffcount)
995{
996        struct timehands *th;
997        struct fftimehands *ffth;
998        unsigned int gen, delta;
999
1000        /*
1001         * ffclock_windup() called from tc_windup(), safe to rely on
1002         * th->th_generation only, for correct delta and ffcounter.
1003         */
1004        do {
1005                th = timehands;
1006                gen = tc_getgen(th);
1007                ffth = fftimehands;
1008                delta = tc_delta(th);
1009                *ffcount = ffth->tick_ffcount;
1010        } while (gen == 0 || gen != tc_getgen(th));
1011
1012        *ffcount += delta;
1013}
1014
1015void
1016binuptime(struct bintime *bt)
1017{
1018
1019        binuptime_fromclock(bt, sysclock_active);
1020}
1021
1022void
1023nanouptime(struct timespec *tsp)
1024{
1025
1026        nanouptime_fromclock(tsp, sysclock_active);
1027}
1028
1029void
1030microuptime(struct timeval *tvp)
1031{
1032
1033        microuptime_fromclock(tvp, sysclock_active);
1034}
1035
1036void
1037bintime(struct bintime *bt)
1038{
1039
1040        bintime_fromclock(bt, sysclock_active);
1041}
1042
1043void
1044nanotime(struct timespec *tsp)
1045{
1046
1047        nanotime_fromclock(tsp, sysclock_active);
1048}
1049
1050void
1051microtime(struct timeval *tvp)
1052{
1053
1054        microtime_fromclock(tvp, sysclock_active);
1055}
1056
1057void
1058getbinuptime(struct bintime *bt)
1059{
1060
1061        getbinuptime_fromclock(bt, sysclock_active);
1062}
1063
1064void
1065getnanouptime(struct timespec *tsp)
1066{
1067
1068        getnanouptime_fromclock(tsp, sysclock_active);
1069}
1070
1071void
1072getmicrouptime(struct timeval *tvp)
1073{
1074
1075        getmicrouptime_fromclock(tvp, sysclock_active);
1076}
1077
1078void
1079getbintime(struct bintime *bt)
1080{
1081
1082        getbintime_fromclock(bt, sysclock_active);
1083}
1084
1085void
1086getnanotime(struct timespec *tsp)
1087{
1088
1089        getnanotime_fromclock(tsp, sysclock_active);
1090}
1091
1092void
1093getmicrotime(struct timeval *tvp)
1094{
1095
1096        getmicrouptime_fromclock(tvp, sysclock_active);
1097}
1098
1099#endif /* FFCLOCK */
1100
1101#ifndef __rtems__
1102/*
1103 * This is a clone of getnanotime and used for walltimestamps.
1104 * The dtrace_ prefix prevents fbt from creating probes for
1105 * it so walltimestamp can be safely used in all fbt probes.
1106 */
1107void
1108dtrace_getnanotime(struct timespec *tsp)
1109{
1110        struct timehands *th;
1111        uint32_t gen;
1112
1113        do {
1114                th = timehands;
1115                gen = tc_getgen(th);
1116                *tsp = th->th_nanotime;
1117        } while (gen == 0 || gen != tc_getgen(th));
1118}
1119#endif /* __rtems__ */
1120
1121#ifdef FFCLOCK
1122/*
1123 * System clock currently providing time to the system. Modifiable via sysctl
1124 * when the FFCLOCK option is defined.
1125 */
1126int sysclock_active = SYSCLOCK_FBCK;
1127#endif
1128
1129/* Internal NTP status and error estimates. */
1130extern int time_status;
1131extern long time_esterror;
1132
1133#ifndef __rtems__
1134/*
1135 * Take a snapshot of sysclock data which can be used to compare system clocks
1136 * and generate timestamps after the fact.
1137 */
1138void
1139sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1140{
1141        struct fbclock_info *fbi;
1142        struct timehands *th;
1143        struct bintime bt;
1144        unsigned int delta, gen;
1145#ifdef FFCLOCK
1146        ffcounter ffcount;
1147        struct fftimehands *ffth;
1148        struct ffclock_info *ffi;
1149        struct ffclock_estimate cest;
1150
1151        ffi = &clock_snap->ff_info;
1152#endif
1153
1154        fbi = &clock_snap->fb_info;
1155        delta = 0;
1156
1157        do {
1158                th = timehands;
1159                gen = tc_getgen(th);
1160                fbi->th_scale = th->th_scale;
1161                fbi->tick_time = th->th_offset;
1162#ifdef FFCLOCK
1163                ffth = fftimehands;
1164                ffi->tick_time = ffth->tick_time_lerp;
1165                ffi->tick_time_lerp = ffth->tick_time_lerp;
1166                ffi->period = ffth->cest.period;
1167                ffi->period_lerp = ffth->period_lerp;
1168                clock_snap->ffcount = ffth->tick_ffcount;
1169                cest = ffth->cest;
1170#endif
1171                if (!fast)
1172                        delta = tc_delta(th);
1173        } while (gen == 0 || gen != tc_getgen(th));
1174
1175        clock_snap->delta = delta;
1176#ifdef FFCLOCK
1177        clock_snap->sysclock_active = sysclock_active;
1178#endif
1179
1180        /* Record feedback clock status and error. */
1181        clock_snap->fb_info.status = time_status;
1182        /* XXX: Very crude estimate of feedback clock error. */
1183        bt.sec = time_esterror / 1000000;
1184        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1185            (uint64_t)18446744073709ULL;
1186        clock_snap->fb_info.error = bt;
1187
1188#ifdef FFCLOCK
1189        if (!fast)
1190                clock_snap->ffcount += delta;
1191
1192        /* Record feed-forward clock leap second adjustment. */
1193        ffi->leapsec_adjustment = cest.leapsec_total;
1194        if (clock_snap->ffcount > cest.leapsec_next)
1195                ffi->leapsec_adjustment -= cest.leapsec;
1196
1197        /* Record feed-forward clock status and error. */
1198        clock_snap->ff_info.status = cest.status;
1199        ffcount = clock_snap->ffcount - cest.update_ffcount;
1200        ffclock_convert_delta(ffcount, cest.period, &bt);
1201        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1202        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1203        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1204        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1205        clock_snap->ff_info.error = bt;
1206#endif
1207}
1208
1209/*
1210 * Convert a sysclock snapshot into a struct bintime based on the specified
1211 * clock source and flags.
1212 */
1213int
1214sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1215    int whichclock, uint32_t flags)
1216{
1217#ifdef FFCLOCK
1218        struct bintime bt2;
1219        uint64_t period;
1220#endif
1221
1222        switch (whichclock) {
1223        case SYSCLOCK_FBCK:
1224                *bt = cs->fb_info.tick_time;
1225
1226                /* If snapshot was created with !fast, delta will be >0. */
1227                if (cs->delta > 0)
1228                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1229
1230                if ((flags & FBCLOCK_UPTIME) == 0)
1231                        bintime_add(bt, &boottimebin);
1232                break;
1233#ifdef FFCLOCK
1234        case SYSCLOCK_FFWD:
1235                if (flags & FFCLOCK_LERP) {
1236                        *bt = cs->ff_info.tick_time_lerp;
1237                        period = cs->ff_info.period_lerp;
1238                } else {
1239                        *bt = cs->ff_info.tick_time;
1240                        period = cs->ff_info.period;
1241                }
1242
1243                /* If snapshot was created with !fast, delta will be >0. */
1244                if (cs->delta > 0) {
1245                        ffclock_convert_delta(cs->delta, period, &bt2);
1246                        bintime_add(bt, &bt2);
1247                }
1248
1249                /* Leap second adjustment. */
1250                if (flags & FFCLOCK_LEAPSEC)
1251                        bt->sec -= cs->ff_info.leapsec_adjustment;
1252
1253                /* Boot time adjustment, for uptime/monotonic clocks. */
1254                if (flags & FFCLOCK_UPTIME)
1255                        bintime_sub(bt, &ffclock_boottime);
1256                break;
1257#endif
1258        default:
1259                return (EINVAL);
1260                break;
1261        }
1262
1263        return (0);
1264}
1265#endif /* __rtems__ */
1266
1267/*
1268 * Initialize a new timecounter and possibly use it.
1269 */
1270void
1271tc_init(struct timecounter *tc)
1272{
1273#ifndef __rtems__
1274        uint32_t u;
1275        struct sysctl_oid *tc_root;
1276
1277        u = tc->tc_frequency / tc->tc_counter_mask;
1278        /* XXX: We need some margin here, 10% is a guess */
1279        u *= 11;
1280        u /= 10;
1281        if (u > hz && tc->tc_quality >= 0) {
1282                tc->tc_quality = -2000;
1283                if (bootverbose) {
1284                        printf("Timecounter \"%s\" frequency %ju Hz",
1285                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1286                        printf(" -- Insufficient hz, needs at least %u\n", u);
1287                }
1288        } else if (tc->tc_quality >= 0 || bootverbose) {
1289                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1290                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1291                    tc->tc_quality);
1292        }
1293#endif /* __rtems__ */
1294
1295        tc->tc_next = timecounters;
1296        timecounters = tc;
1297#ifndef __rtems__
1298        /*
1299         * Set up sysctl tree for this counter.
1300         */
1301        tc_root = SYSCTL_ADD_NODE(NULL,
1302            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1303            CTLFLAG_RW, 0, "timecounter description");
1304        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1305            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1306            "mask for implemented bits");
1307        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1308            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1309            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1310        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1311            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1312             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1313        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1314            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1315            "goodness of time counter");
1316        /*
1317         * Never automatically use a timecounter with negative quality.
1318         * Even though we run on the dummy counter, switching here may be
1319         * worse since this timecounter may not be monotonous.
1320         */
1321        if (tc->tc_quality < 0)
1322                return;
1323        if (tc->tc_quality < timecounter->tc_quality)
1324                return;
1325        if (tc->tc_quality == timecounter->tc_quality &&
1326            tc->tc_frequency < timecounter->tc_frequency)
1327                return;
1328#endif /* __rtems__ */
1329        (void)tc->tc_get_timecount(tc);
1330        (void)tc->tc_get_timecount(tc);
1331        timecounter = tc;
1332#ifdef __rtems__
1333        tc_windup();
1334#endif /* __rtems__ */
1335}
1336
1337#ifndef __rtems__
1338/* Report the frequency of the current timecounter. */
1339uint64_t
1340tc_getfrequency(void)
1341{
1342
1343        return (timehands->th_counter->tc_frequency);
1344}
1345#endif /* __rtems__ */
1346
1347/*
1348 * Step our concept of UTC.  This is done by modifying our estimate of
1349 * when we booted.
1350 * XXX: not locked.
1351 */
1352void
1353#ifndef __rtems__
1354tc_setclock(struct timespec *ts)
1355#else /* __rtems__ */
1356_Timecounter_Set_clock(const struct bintime *_bt,
1357    ISR_lock_Context *lock_context)
1358#endif /* __rtems__ */
1359{
1360#ifndef __rtems__
1361        struct timespec tbef, taft;
1362#endif /* __rtems__ */
1363        struct bintime bt, bt2;
1364
1365#ifndef __rtems__
1366        cpu_tick_calibrate(1);
1367        nanotime(&tbef);
1368        timespec2bintime(ts, &bt);
1369#else /* __rtems__ */
1370        bt = *_bt;
1371#endif /* __rtems__ */
1372        binuptime(&bt2);
1373        bintime_sub(&bt, &bt2);
1374        bintime_add(&bt2, &boottimebin);
1375        boottimebin = bt;
1376#ifndef __rtems__
1377        bintime2timeval(&bt, &boottime);
1378
1379        /* XXX fiddle all the little crinkly bits around the fiords... */
1380        tc_windup();
1381        nanotime(&taft);
1382        if (timestepwarnings) {
1383                log(LOG_INFO,
1384                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1385                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1386                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1387                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1388        }
1389        cpu_tick_calibrate(1);
1390#else /* __rtems__ */
1391        _Timecounter_Windup(lock_context);
1392#endif /* __rtems__ */
1393}
1394
1395/*
1396 * Initialize the next struct timehands in the ring and make
1397 * it the active timehands.  Along the way we might switch to a different
1398 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1399 */
1400static void
1401tc_windup(void)
1402#ifdef __rtems__
1403{
1404        ISR_lock_Context lock_context;
1405
1406        _Timecounter_Acquire(&lock_context);
1407        _Timecounter_Windup(&lock_context);
1408}
1409
1410static void
1411_Timecounter_Windup(ISR_lock_Context *lock_context)
1412#endif /* __rtems__ */
1413{
1414        struct bintime bt;
1415        struct timehands *th, *tho;
1416        uint64_t scale;
1417        uint32_t delta, ncount, ogen;
1418        int i;
1419        time_t t;
1420
1421        /*
1422         * Make the next timehands a copy of the current one, but do not
1423         * overwrite the generation or next pointer.  While we update
1424         * the contents, the generation must be zero.
1425         */
1426        tho = timehands;
1427#if defined(RTEMS_SMP)
1428        th = tho->th_next;
1429#else
1430        th = tho;
1431#endif
1432        ogen = th->th_generation;
1433        tc_setgen(th, 0);
1434#if defined(RTEMS_SMP)
1435        bcopy(tho, th, offsetof(struct timehands, th_generation));
1436#endif
1437
1438        /*
1439         * Capture a timecounter delta on the current timecounter and if
1440         * changing timecounters, a counter value from the new timecounter.
1441         * Update the offset fields accordingly.
1442         */
1443        delta = tc_delta(th);
1444        if (th->th_counter != timecounter)
1445                ncount = timecounter->tc_get_timecount(timecounter);
1446        else
1447                ncount = 0;
1448#ifdef FFCLOCK
1449        ffclock_windup(delta);
1450#endif
1451        th->th_offset_count += delta;
1452        th->th_offset_count &= th->th_counter->tc_counter_mask;
1453        while (delta > th->th_counter->tc_frequency) {
1454                /* Eat complete unadjusted seconds. */
1455                delta -= th->th_counter->tc_frequency;
1456                th->th_offset.sec++;
1457        }
1458        if ((delta > th->th_counter->tc_frequency / 2) &&
1459            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1460                /* The product th_scale * delta just barely overflows. */
1461                th->th_offset.sec++;
1462        }
1463        bintime_addx(&th->th_offset, th->th_scale * delta);
1464
1465        /*
1466         * Hardware latching timecounters may not generate interrupts on
1467         * PPS events, so instead we poll them.  There is a finite risk that
1468         * the hardware might capture a count which is later than the one we
1469         * got above, and therefore possibly in the next NTP second which might
1470         * have a different rate than the current NTP second.  It doesn't
1471         * matter in practice.
1472         */
1473        if (tho->th_counter->tc_poll_pps)
1474                tho->th_counter->tc_poll_pps(tho->th_counter);
1475
1476        /*
1477         * Deal with NTP second processing.  The for loop normally
1478         * iterates at most once, but in extreme situations it might
1479         * keep NTP sane if timeouts are not run for several seconds.
1480         * At boot, the time step can be large when the TOD hardware
1481         * has been read, so on really large steps, we call
1482         * ntp_update_second only twice.  We need to call it twice in
1483         * case we missed a leap second.
1484         */
1485        bt = th->th_offset;
1486        bintime_add(&bt, &boottimebin);
1487        i = bt.sec - tho->th_microtime.tv_sec;
1488        if (i > LARGE_STEP)
1489                i = 2;
1490        for (; i > 0; i--) {
1491                t = bt.sec;
1492                ntp_update_second(&th->th_adjustment, &bt.sec);
1493                if (bt.sec != t)
1494                        boottimebin.sec += bt.sec - t;
1495        }
1496        /* Update the UTC timestamps used by the get*() functions. */
1497        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1498        bintime2timeval(&bt, &th->th_microtime);
1499        bintime2timespec(&bt, &th->th_nanotime);
1500
1501        /* Now is a good time to change timecounters. */
1502        if (th->th_counter != timecounter) {
1503#ifndef __rtems__
1504#ifndef __arm__
1505                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1506                        cpu_disable_c2_sleep++;
1507                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1508                        cpu_disable_c2_sleep--;
1509#endif
1510#endif /* __rtems__ */
1511                th->th_counter = timecounter;
1512                th->th_offset_count = ncount;
1513#ifndef __rtems__
1514                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1515                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1516#endif /* __rtems__ */
1517#ifdef FFCLOCK
1518                ffclock_change_tc(th);
1519#endif
1520        }
1521
1522        /*-
1523         * Recalculate the scaling factor.  We want the number of 1/2^64
1524         * fractions of a second per period of the hardware counter, taking
1525         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1526         * processing provides us with.
1527         *
1528         * The th_adjustment is nanoseconds per second with 32 bit binary
1529         * fraction and we want 64 bit binary fraction of second:
1530         *
1531         *       x = a * 2^32 / 10^9 = a * 4.294967296
1532         *
1533         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1534         * we can only multiply by about 850 without overflowing, that
1535         * leaves no suitably precise fractions for multiply before divide.
1536         *
1537         * Divide before multiply with a fraction of 2199/512 results in a
1538         * systematic undercompensation of 10PPM of th_adjustment.  On a
1539         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1540         *
1541         * We happily sacrifice the lowest of the 64 bits of our result
1542         * to the goddess of code clarity.
1543         *
1544         */
1545        scale = (uint64_t)1 << 63;
1546        scale += (th->th_adjustment / 1024) * 2199;
1547        scale /= th->th_counter->tc_frequency;
1548        th->th_scale = scale * 2;
1549
1550        /*
1551         * Now that the struct timehands is again consistent, set the new
1552         * generation number, making sure to not make it zero.
1553         */
1554        if (++ogen == 0)
1555                ogen = 1;
1556        tc_setgen(th, ogen);
1557
1558        /* Go live with the new struct timehands. */
1559#ifdef FFCLOCK
1560        switch (sysclock_active) {
1561        case SYSCLOCK_FBCK:
1562#endif
1563                time_second = th->th_microtime.tv_sec;
1564                time_uptime = th->th_offset.sec;
1565#ifdef FFCLOCK
1566                break;
1567        case SYSCLOCK_FFWD:
1568                time_second = fftimehands->tick_time_lerp.sec;
1569                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1570                break;
1571        }
1572#endif
1573
1574#if defined(RTEMS_SMP)
1575        timehands = th;
1576#endif
1577#ifndef __rtems__
1578        timekeep_push_vdso();
1579#endif /* __rtems__ */
1580#ifdef __rtems__
1581        _Timecounter_Release(lock_context);
1582#endif /* __rtems__ */
1583}
1584
1585#ifndef __rtems__
1586/* Report or change the active timecounter hardware. */
1587static int
1588sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1589{
1590        char newname[32];
1591        struct timecounter *newtc, *tc;
1592        int error;
1593
1594        tc = timecounter;
1595        strlcpy(newname, tc->tc_name, sizeof(newname));
1596
1597        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1598        if (error != 0 || req->newptr == NULL ||
1599            strcmp(newname, tc->tc_name) == 0)
1600                return (error);
1601        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1602                if (strcmp(newname, newtc->tc_name) != 0)
1603                        continue;
1604
1605                /* Warm up new timecounter. */
1606                (void)newtc->tc_get_timecount(newtc);
1607                (void)newtc->tc_get_timecount(newtc);
1608
1609                timecounter = newtc;
1610
1611                /*
1612                 * The vdso timehands update is deferred until the next
1613                 * 'tc_windup()'.
1614                 *
1615                 * This is prudent given that 'timekeep_push_vdso()' does not
1616                 * use any locking and that it can be called in hard interrupt
1617                 * context via 'tc_windup()'.
1618                 */
1619                return (0);
1620        }
1621        return (EINVAL);
1622}
1623
1624SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1625    0, 0, sysctl_kern_timecounter_hardware, "A",
1626    "Timecounter hardware selected");
1627
1628
1629/* Report or change the active timecounter hardware. */
1630static int
1631sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1632{
1633        struct sbuf sb;
1634        struct timecounter *tc;
1635        int error;
1636
1637        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1638        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1639                if (tc != timecounters)
1640                        sbuf_putc(&sb, ' ');
1641                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1642        }
1643        error = sbuf_finish(&sb);
1644        sbuf_delete(&sb);
1645        return (error);
1646}
1647
1648SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1649    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1650#endif /* __rtems__ */
1651
1652#ifndef __rtems__
1653/*
1654 * RFC 2783 PPS-API implementation.
1655 */
1656
1657/*
1658 *  Return true if the driver is aware of the abi version extensions in the
1659 *  pps_state structure, and it supports at least the given abi version number.
1660 */
1661static inline int
1662abi_aware(struct pps_state *pps, int vers)
1663{
1664
1665        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1666}
1667
1668static int
1669pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1670{
1671        int err, timo;
1672        pps_seq_t aseq, cseq;
1673        struct timeval tv;
1674
1675        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1676                return (EINVAL);
1677
1678        /*
1679         * If no timeout is requested, immediately return whatever values were
1680         * most recently captured.  If timeout seconds is -1, that's a request
1681         * to block without a timeout.  WITNESS won't let us sleep forever
1682         * without a lock (we really don't need a lock), so just repeatedly
1683         * sleep a long time.
1684         */
1685        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1686                if (fapi->timeout.tv_sec == -1)
1687                        timo = 0x7fffffff;
1688                else {
1689                        tv.tv_sec = fapi->timeout.tv_sec;
1690                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1691                        timo = tvtohz(&tv);
1692                }
1693                aseq = pps->ppsinfo.assert_sequence;
1694                cseq = pps->ppsinfo.clear_sequence;
1695                while (aseq == pps->ppsinfo.assert_sequence &&
1696                    cseq == pps->ppsinfo.clear_sequence) {
1697                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1698                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1699                                        err = msleep_spin(pps, pps->driver_mtx,
1700                                            "ppsfch", timo);
1701                                } else {
1702                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1703                                            "ppsfch", timo);
1704                                }
1705                        } else {
1706                                err = tsleep(pps, PCATCH, "ppsfch", timo);
1707                        }
1708                        if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) {
1709                                continue;
1710                        } else if (err != 0) {
1711                                return (err);
1712                        }
1713                }
1714        }
1715
1716        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1717        fapi->pps_info_buf = pps->ppsinfo;
1718
1719        return (0);
1720}
1721
1722int
1723pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1724{
1725        pps_params_t *app;
1726        struct pps_fetch_args *fapi;
1727#ifdef FFCLOCK
1728        struct pps_fetch_ffc_args *fapi_ffc;
1729#endif
1730#ifdef PPS_SYNC
1731        struct pps_kcbind_args *kapi;
1732#endif
1733
1734        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1735        switch (cmd) {
1736        case PPS_IOC_CREATE:
1737                return (0);
1738        case PPS_IOC_DESTROY:
1739                return (0);
1740        case PPS_IOC_SETPARAMS:
1741                app = (pps_params_t *)data;
1742                if (app->mode & ~pps->ppscap)
1743                        return (EINVAL);
1744#ifdef FFCLOCK
1745                /* Ensure only a single clock is selected for ffc timestamp. */
1746                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1747                        return (EINVAL);
1748#endif
1749                pps->ppsparam = *app;
1750                return (0);
1751        case PPS_IOC_GETPARAMS:
1752                app = (pps_params_t *)data;
1753                *app = pps->ppsparam;
1754                app->api_version = PPS_API_VERS_1;
1755                return (0);
1756        case PPS_IOC_GETCAP:
1757                *(int*)data = pps->ppscap;
1758                return (0);
1759        case PPS_IOC_FETCH:
1760                fapi = (struct pps_fetch_args *)data;
1761                return (pps_fetch(fapi, pps));
1762#ifdef FFCLOCK
1763        case PPS_IOC_FETCH_FFCOUNTER:
1764                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1765                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1766                    PPS_TSFMT_TSPEC)
1767                        return (EINVAL);
1768                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1769                        return (EOPNOTSUPP);
1770                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1771                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1772                /* Overwrite timestamps if feedback clock selected. */
1773                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1774                case PPS_TSCLK_FBCK:
1775                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1776                            pps->ppsinfo.assert_timestamp;
1777                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1778                            pps->ppsinfo.clear_timestamp;
1779                        break;
1780                case PPS_TSCLK_FFWD:
1781                        break;
1782                default:
1783                        break;
1784                }
1785                return (0);
1786#endif /* FFCLOCK */
1787        case PPS_IOC_KCBIND:
1788#ifdef PPS_SYNC
1789                kapi = (struct pps_kcbind_args *)data;
1790                /* XXX Only root should be able to do this */
1791                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1792                        return (EINVAL);
1793                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1794                        return (EINVAL);
1795                if (kapi->edge & ~pps->ppscap)
1796                        return (EINVAL);
1797                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1798                    (pps->kcmode & KCMODE_ABIFLAG);
1799                return (0);
1800#else
1801                return (EOPNOTSUPP);
1802#endif
1803        default:
1804                return (ENOIOCTL);
1805        }
1806}
1807
1808void
1809pps_init(struct pps_state *pps)
1810{
1811        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1812        if (pps->ppscap & PPS_CAPTUREASSERT)
1813                pps->ppscap |= PPS_OFFSETASSERT;
1814        if (pps->ppscap & PPS_CAPTURECLEAR)
1815                pps->ppscap |= PPS_OFFSETCLEAR;
1816#ifdef FFCLOCK
1817        pps->ppscap |= PPS_TSCLK_MASK;
1818#endif
1819        pps->kcmode &= ~KCMODE_ABIFLAG;
1820}
1821
1822void
1823pps_init_abi(struct pps_state *pps)
1824{
1825
1826        pps_init(pps);
1827        if (pps->driver_abi > 0) {
1828                pps->kcmode |= KCMODE_ABIFLAG;
1829                pps->kernel_abi = PPS_ABI_VERSION;
1830        }
1831}
1832
1833void
1834pps_capture(struct pps_state *pps)
1835{
1836        struct timehands *th;
1837
1838        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1839        th = timehands;
1840        pps->capgen = tc_getgen(th);
1841        pps->capth = th;
1842#ifdef FFCLOCK
1843        pps->capffth = fftimehands;
1844#endif
1845        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1846        if (pps->capgen != tc_getgen(th))
1847                pps->capgen = 0;
1848}
1849
1850void
1851pps_event(struct pps_state *pps, int event)
1852{
1853        struct bintime bt;
1854        struct timespec ts, *tsp, *osp;
1855        uint32_t tcount, *pcount;
1856        int foff, fhard;
1857        pps_seq_t *pseq;
1858#ifdef FFCLOCK
1859        struct timespec *tsp_ffc;
1860        pps_seq_t *pseq_ffc;
1861        ffcounter *ffcount;
1862#endif
1863
1864        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1865        /* If the timecounter was wound up underneath us, bail out. */
1866        if (pps->capgen == 0 || pps->capgen != tc_getgen(pps->capth))
1867                return;
1868
1869        /* Things would be easier with arrays. */
1870        if (event == PPS_CAPTUREASSERT) {
1871                tsp = &pps->ppsinfo.assert_timestamp;
1872                osp = &pps->ppsparam.assert_offset;
1873                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1874                fhard = pps->kcmode & PPS_CAPTUREASSERT;
1875                pcount = &pps->ppscount[0];
1876                pseq = &pps->ppsinfo.assert_sequence;
1877#ifdef FFCLOCK
1878                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1879                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1880                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1881#endif
1882        } else {
1883                tsp = &pps->ppsinfo.clear_timestamp;
1884                osp = &pps->ppsparam.clear_offset;
1885                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1886                fhard = pps->kcmode & PPS_CAPTURECLEAR;
1887                pcount = &pps->ppscount[1];
1888                pseq = &pps->ppsinfo.clear_sequence;
1889#ifdef FFCLOCK
1890                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1891                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1892                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1893#endif
1894        }
1895
1896        /*
1897         * If the timecounter changed, we cannot compare the count values, so
1898         * we have to drop the rest of the PPS-stuff until the next event.
1899         */
1900        if (pps->ppstc != pps->capth->th_counter) {
1901                pps->ppstc = pps->capth->th_counter;
1902                *pcount = pps->capcount;
1903                pps->ppscount[2] = pps->capcount;
1904                return;
1905        }
1906
1907        /* Convert the count to a timespec. */
1908        tcount = pps->capcount - pps->capth->th_offset_count;
1909        tcount &= pps->capth->th_counter->tc_counter_mask;
1910        bt = pps->capth->th_offset;
1911        bintime_addx(&bt, pps->capth->th_scale * tcount);
1912        bintime_add(&bt, &boottimebin);
1913        bintime2timespec(&bt, &ts);
1914
1915        /* If the timecounter was wound up underneath us, bail out. */
1916        if (pps->capgen != tc_getgen(pps->capth))
1917                return;
1918
1919        *pcount = pps->capcount;
1920        (*pseq)++;
1921        *tsp = ts;
1922
1923        if (foff) {
1924                timespecadd(tsp, osp);
1925                if (tsp->tv_nsec < 0) {
1926                        tsp->tv_nsec += 1000000000;
1927                        tsp->tv_sec -= 1;
1928                }
1929        }
1930
1931#ifdef FFCLOCK
1932        *ffcount = pps->capffth->tick_ffcount + tcount;
1933        bt = pps->capffth->tick_time;
1934        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1935        bintime_add(&bt, &pps->capffth->tick_time);
1936        bintime2timespec(&bt, &ts);
1937        (*pseq_ffc)++;
1938        *tsp_ffc = ts;
1939#endif
1940
1941#ifdef PPS_SYNC
1942        if (fhard) {
1943                uint64_t scale;
1944
1945                /*
1946                 * Feed the NTP PLL/FLL.
1947                 * The FLL wants to know how many (hardware) nanoseconds
1948                 * elapsed since the previous event.
1949                 */
1950                tcount = pps->capcount - pps->ppscount[2];
1951                pps->ppscount[2] = pps->capcount;
1952                tcount &= pps->capth->th_counter->tc_counter_mask;
1953                scale = (uint64_t)1 << 63;
1954                scale /= pps->capth->th_counter->tc_frequency;
1955                scale *= 2;
1956                bt.sec = 0;
1957                bt.frac = 0;
1958                bintime_addx(&bt, scale * tcount);
1959                bintime2timespec(&bt, &ts);
1960                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1961        }
1962#endif
1963
1964        /* Wakeup anyone sleeping in pps_fetch().  */
1965        wakeup(pps);
1966}
1967#else /* __rtems__ */
1968/* FIXME: https://devel.rtems.org/ticket/2349 */
1969#endif /* __rtems__ */
1970
1971/*
1972 * Timecounters need to be updated every so often to prevent the hardware
1973 * counter from overflowing.  Updating also recalculates the cached values
1974 * used by the get*() family of functions, so their precision depends on
1975 * the update frequency.
1976 */
1977
1978#ifndef __rtems__
1979static int tc_tick;
1980SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1981    "Approximate number of hardclock ticks in a millisecond");
1982#endif /* __rtems__ */
1983
1984#ifndef __rtems__
1985void
1986tc_ticktock(int cnt)
1987{
1988        static int count;
1989
1990        count += cnt;
1991        if (count < tc_tick)
1992                return;
1993        count = 0;
1994        tc_windup();
1995}
1996#else /* __rtems__ */
1997void
1998_Timecounter_Tick(void)
1999{
2000        Per_CPU_Control *cpu_self = _Per_CPU_Get();
2001
2002        if (_Per_CPU_Is_boot_processor(cpu_self)) {
2003                tc_windup();
2004        }
2005
2006        _Watchdog_Tick(cpu_self);
2007}
2008
2009void
2010_Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
2011    ISR_lock_Context *lock_context)
2012{
2013        struct bintime bt;
2014        struct timehands *th;
2015        uint32_t ogen;
2016
2017        th = timehands;
2018        ogen = th->th_generation;
2019        th->th_offset_count = offset;
2020        bintime_addx(&th->th_offset, th->th_scale * delta);
2021
2022        bt = th->th_offset;
2023        bintime_add(&bt, &boottimebin);
2024
2025        /* Update the UTC timestamps used by the get*() functions. */
2026        /* XXX shouldn't do this here.  Should force non-`get' versions. */
2027        bintime2timeval(&bt, &th->th_microtime);
2028        bintime2timespec(&bt, &th->th_nanotime);
2029
2030        /*
2031         * Now that the struct timehands is again consistent, set the new
2032         * generation number, making sure to not make it zero.
2033         */
2034        if (++ogen == 0)
2035                ogen = 1;
2036        th->th_generation = ogen;
2037
2038        /* Go live with the new struct timehands. */
2039        time_second = th->th_microtime.tv_sec;
2040        time_uptime = th->th_offset.sec;
2041
2042        _Timecounter_Release(lock_context);
2043
2044        _Watchdog_Tick(_Per_CPU_Get_snapshot());
2045}
2046#endif /* __rtems__ */
2047
2048#ifndef __rtems__
2049static void __inline
2050tc_adjprecision(void)
2051{
2052        int t;
2053
2054        if (tc_timepercentage > 0) {
2055                t = (99 + tc_timepercentage) / tc_timepercentage;
2056                tc_precexp = fls(t + (t >> 1)) - 1;
2057                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2058                FREQ2BT(hz, &bt_tickthreshold);
2059                bintime_shift(&bt_timethreshold, tc_precexp);
2060                bintime_shift(&bt_tickthreshold, tc_precexp);
2061        } else {
2062                tc_precexp = 31;
2063                bt_timethreshold.sec = INT_MAX;
2064                bt_timethreshold.frac = ~(uint64_t)0;
2065                bt_tickthreshold = bt_timethreshold;
2066        }
2067        sbt_timethreshold = bttosbt(bt_timethreshold);
2068        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2069}
2070#endif /* __rtems__ */
2071
2072#ifndef __rtems__
2073static int
2074sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2075{
2076        int error, val;
2077
2078        val = tc_timepercentage;
2079        error = sysctl_handle_int(oidp, &val, 0, req);
2080        if (error != 0 || req->newptr == NULL)
2081                return (error);
2082        tc_timepercentage = val;
2083        if (cold)
2084                goto done;
2085        tc_adjprecision();
2086done:
2087        return (0);
2088}
2089
2090static void
2091inittimecounter(void *dummy)
2092{
2093        u_int p;
2094        int tick_rate;
2095
2096        /*
2097         * Set the initial timeout to
2098         * max(1, <approx. number of hardclock ticks in a millisecond>).
2099         * People should probably not use the sysctl to set the timeout
2100         * to smaller than its inital value, since that value is the
2101         * smallest reasonable one.  If they want better timestamps they
2102         * should use the non-"get"* functions.
2103         */
2104        if (hz > 1000)
2105                tc_tick = (hz + 500) / 1000;
2106        else
2107                tc_tick = 1;
2108        tc_adjprecision();
2109        FREQ2BT(hz, &tick_bt);
2110        tick_sbt = bttosbt(tick_bt);
2111        tick_rate = hz / tc_tick;
2112        FREQ2BT(tick_rate, &tc_tick_bt);
2113        tc_tick_sbt = bttosbt(tc_tick_bt);
2114        p = (tc_tick * 1000000) / hz;
2115        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2116
2117#ifdef FFCLOCK
2118        ffclock_init();
2119#endif
2120        /* warm up new timecounter (again) and get rolling. */
2121        (void)timecounter->tc_get_timecount(timecounter);
2122        (void)timecounter->tc_get_timecount(timecounter);
2123        tc_windup();
2124}
2125
2126SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2127
2128/* Cpu tick handling -------------------------------------------------*/
2129
2130static int cpu_tick_variable;
2131static uint64_t cpu_tick_frequency;
2132
2133static uint64_t
2134tc_cpu_ticks(void)
2135{
2136        static uint64_t base;
2137        static unsigned last;
2138        unsigned u;
2139        struct timecounter *tc;
2140
2141        tc = timehands->th_counter;
2142        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2143        if (u < last)
2144                base += (uint64_t)tc->tc_counter_mask + 1;
2145        last = u;
2146        return (u + base);
2147}
2148
2149void
2150cpu_tick_calibration(void)
2151{
2152        static time_t last_calib;
2153
2154        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2155                cpu_tick_calibrate(0);
2156                last_calib = time_uptime;
2157        }
2158}
2159
2160/*
2161 * This function gets called every 16 seconds on only one designated
2162 * CPU in the system from hardclock() via cpu_tick_calibration()().
2163 *
2164 * Whenever the real time clock is stepped we get called with reset=1
2165 * to make sure we handle suspend/resume and similar events correctly.
2166 */
2167
2168static void
2169cpu_tick_calibrate(int reset)
2170{
2171        static uint64_t c_last;
2172        uint64_t c_this, c_delta;
2173        static struct bintime  t_last;
2174        struct bintime t_this, t_delta;
2175        uint32_t divi;
2176
2177        if (reset) {
2178                /* The clock was stepped, abort & reset */
2179                t_last.sec = 0;
2180                return;
2181        }
2182
2183        /* we don't calibrate fixed rate cputicks */
2184        if (!cpu_tick_variable)
2185                return;
2186
2187        getbinuptime(&t_this);
2188        c_this = cpu_ticks();
2189        if (t_last.sec != 0) {
2190                c_delta = c_this - c_last;
2191                t_delta = t_this;
2192                bintime_sub(&t_delta, &t_last);
2193                /*
2194                 * Headroom:
2195                 *      2^(64-20) / 16[s] =
2196                 *      2^(44) / 16[s] =
2197                 *      17.592.186.044.416 / 16 =
2198                 *      1.099.511.627.776 [Hz]
2199                 */
2200                divi = t_delta.sec << 20;
2201                divi |= t_delta.frac >> (64 - 20);
2202                c_delta <<= 20;
2203                c_delta /= divi;
2204                if (c_delta > cpu_tick_frequency) {
2205                        if (0 && bootverbose)
2206                                printf("cpu_tick increased to %ju Hz\n",
2207                                    c_delta);
2208                        cpu_tick_frequency = c_delta;
2209                }
2210        }
2211        c_last = c_this;
2212        t_last = t_this;
2213}
2214
2215void
2216set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2217{
2218
2219        if (func == NULL) {
2220                cpu_ticks = tc_cpu_ticks;
2221        } else {
2222                cpu_tick_frequency = freq;
2223                cpu_tick_variable = var;
2224                cpu_ticks = func;
2225        }
2226}
2227
2228uint64_t
2229cpu_tickrate(void)
2230{
2231
2232        if (cpu_ticks == tc_cpu_ticks)
2233                return (tc_getfrequency());
2234        return (cpu_tick_frequency);
2235}
2236
2237/*
2238 * We need to be slightly careful converting cputicks to microseconds.
2239 * There is plenty of margin in 64 bits of microseconds (half a million
2240 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2241 * before divide conversion (to retain precision) we find that the
2242 * margin shrinks to 1.5 hours (one millionth of 146y).
2243 * With a three prong approach we never lose significant bits, no
2244 * matter what the cputick rate and length of timeinterval is.
2245 */
2246
2247uint64_t
2248cputick2usec(uint64_t tick)
2249{
2250
2251        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2252                return (tick / (cpu_tickrate() / 1000000LL));
2253        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2254                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2255        else
2256                return ((tick * 1000000LL) / cpu_tickrate());
2257}
2258
2259cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2260#endif /* __rtems__ */
2261
2262#ifndef __rtems__
2263static int vdso_th_enable = 1;
2264static int
2265sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2266{
2267        int old_vdso_th_enable, error;
2268
2269        old_vdso_th_enable = vdso_th_enable;
2270        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2271        if (error != 0)
2272                return (error);
2273        vdso_th_enable = old_vdso_th_enable;
2274        return (0);
2275}
2276SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2277    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2278    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2279
2280uint32_t
2281tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2282{
2283        struct timehands *th;
2284        uint32_t enabled;
2285
2286        th = timehands;
2287        vdso_th->th_algo = VDSO_TH_ALGO_1;
2288        vdso_th->th_scale = th->th_scale;
2289        vdso_th->th_offset_count = th->th_offset_count;
2290        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2291        vdso_th->th_offset = th->th_offset;
2292        vdso_th->th_boottime = boottimebin;
2293        enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2294        if (!vdso_th_enable)
2295                enabled = 0;
2296        return (enabled);
2297}
2298#endif /* __rtems__ */
2299
2300#ifdef COMPAT_FREEBSD32
2301uint32_t
2302tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2303{
2304        struct timehands *th;
2305        uint32_t enabled;
2306
2307        th = timehands;
2308        vdso_th32->th_algo = VDSO_TH_ALGO_1;
2309        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2310        vdso_th32->th_offset_count = th->th_offset_count;
2311        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2312        vdso_th32->th_offset.sec = th->th_offset.sec;
2313        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2314        vdso_th32->th_boottime.sec = boottimebin.sec;
2315        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2316        enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2317        if (!vdso_th_enable)
2318                enabled = 0;
2319        return (enabled);
2320}
2321#endif
Note: See TracBrowser for help on using the repository browser.