source: rtems/cpukit/score/src/kern_tc.c @ 07b76fd

5
Last change on this file since 07b76fd was 07b76fd, checked in by Sebastian Huber <sebastian.huber@…>, on Dec 15, 2015 at 10:25:47 AM

score: Avoid dependency on bcopy()

  • Property mode set to 100644
File size: 56.5 KB
Line 
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
11 *
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
14 */
15
16#ifdef __rtems__
17#define _KERNEL
18#define bintime _Timecounter_Bintime
19#define binuptime _Timecounter_Binuptime
20#define boottimebin _Timecounter_Boottimebin
21#define getbintime _Timecounter_Getbintime
22#define getbinuptime _Timecounter_Getbinuptime
23#define getmicrotime _Timecounter_Getmicrotime
24#define getmicrouptime _Timecounter_Getmicrouptime
25#define getnanotime _Timecounter_Getnanotime
26#define getnanouptime _Timecounter_Getnanouptime
27#define microtime _Timecounter_Microtime
28#define microuptime _Timecounter_Microuptime
29#define nanotime _Timecounter_Nanotime
30#define nanouptime _Timecounter_Nanouptime
31#define tc_init _Timecounter_Install
32#define timecounter _Timecounter
33#define time_second _Timecounter_Time_second
34#define time_uptime _Timecounter_Time_uptime
35#include <rtems/score/timecounterimpl.h>
36#include <rtems/score/watchdogimpl.h>
37#endif /* __rtems__ */
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD r284178 2015-06-09T11:49:56Z$");
40
41#include "opt_compat.h"
42#include "opt_ntp.h"
43#include "opt_ffclock.h"
44
45#include <sys/param.h>
46#ifndef __rtems__
47#include <sys/kernel.h>
48#include <sys/limits.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/sbuf.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <sys/systm.h>
55#endif /* __rtems__ */
56#include <sys/timeffc.h>
57#include <sys/timepps.h>
58#include <sys/timetc.h>
59#include <sys/timex.h>
60#ifndef __rtems__
61#include <sys/vdso.h>
62#include <machine/atomic.h>
63#endif /* __rtems__ */
64#ifdef __rtems__
65#include <limits.h>
66#include <rtems.h>
67ISR_LOCK_DEFINE(static, _Timecounter_Lock, "Timecounter");
68#define hz rtems_clock_get_ticks_per_second()
69#define printf(...)
70#define bcopy(x, y, z) memcpy(y, x, z);
71#define log(...)
72static inline int
73fls(int x)
74{
75        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
76}
77/* FIXME: https://devel.rtems.org/ticket/2348 */
78#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
79#endif /* __rtems__ */
80
81/*
82 * A large step happens on boot.  This constant detects such steps.
83 * It is relatively small so that ntp_update_second gets called enough
84 * in the typical 'missed a couple of seconds' case, but doesn't loop
85 * forever when the time step is large.
86 */
87#define LARGE_STEP      200
88
89/*
90 * Implement a dummy timecounter which we can use until we get a real one
91 * in the air.  This allows the console and other early stuff to use
92 * time services.
93 */
94
95static uint32_t
96dummy_get_timecount(struct timecounter *tc)
97{
98#ifndef __rtems__
99        static uint32_t now;
100
101        return (++now);
102#else /* __rtems__ */
103        return 0;
104#endif /* __rtems__ */
105}
106
107static struct timecounter dummy_timecounter = {
108        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
109};
110
111struct timehands {
112        /* These fields must be initialized by the driver. */
113        struct timecounter      *th_counter;
114        int64_t                 th_adjustment;
115        uint64_t                th_scale;
116        uint32_t                th_offset_count;
117        struct bintime          th_offset;
118        struct timeval          th_microtime;
119        struct timespec         th_nanotime;
120        /* Fields not to be copied in tc_windup start with th_generation. */
121#ifndef __rtems__
122        u_int                   th_generation;
123#else /* __rtems__ */
124        Atomic_Ulong            th_generation;
125#endif /* __rtems__ */
126        struct timehands        *th_next;
127};
128
129#if defined(RTEMS_SMP)
130static struct timehands th0;
131static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
132static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
133static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
134static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
135static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
136static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
137static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
138static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
139static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
140#endif
141static struct timehands th0 = {
142        &dummy_timecounter,
143        0,
144        (uint64_t)-1 / 1000000,
145        0,
146        {1, 0},
147        {0, 0},
148        {0, 0},
149        1,
150#if defined(RTEMS_SMP)
151        &th1
152#else
153        &th0
154#endif
155};
156
157static struct timehands *volatile timehands = &th0;
158struct timecounter *timecounter = &dummy_timecounter;
159static struct timecounter *timecounters = &dummy_timecounter;
160
161#ifndef __rtems__
162int tc_min_ticktock_freq = 1;
163#endif /* __rtems__ */
164
165volatile time_t time_second = 1;
166volatile time_t time_uptime = 1;
167
168struct bintime boottimebin;
169#ifndef __rtems__
170struct timeval boottime;
171static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
172SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
173    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
174
175SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
176static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
177
178static int timestepwarnings;
179SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
180    &timestepwarnings, 0, "Log time steps");
181
182struct bintime bt_timethreshold;
183struct bintime bt_tickthreshold;
184sbintime_t sbt_timethreshold;
185sbintime_t sbt_tickthreshold;
186struct bintime tc_tick_bt;
187sbintime_t tc_tick_sbt;
188int tc_precexp;
189int tc_timepercentage = TC_DEFAULTPERC;
190static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
191SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
192    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
193    sysctl_kern_timecounter_adjprecision, "I",
194    "Allowed time interval deviation in percents");
195#endif /* __rtems__ */
196
197static void tc_windup(void);
198#ifndef __rtems__
199static void cpu_tick_calibrate(int);
200#endif /* __rtems__ */
201
202void dtrace_getnanotime(struct timespec *tsp);
203
204#ifndef __rtems__
205static int
206sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
207{
208#ifndef __mips__
209#ifdef SCTL_MASK32
210        int tv[2];
211
212        if (req->flags & SCTL_MASK32) {
213                tv[0] = boottime.tv_sec;
214                tv[1] = boottime.tv_usec;
215                return SYSCTL_OUT(req, tv, sizeof(tv));
216        } else
217#endif
218#endif
219                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
220}
221
222static int
223sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
224{
225        uint32_t ncount;
226        struct timecounter *tc = arg1;
227
228        ncount = tc->tc_get_timecount(tc);
229        return sysctl_handle_int(oidp, &ncount, 0, req);
230}
231
232static int
233sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
234{
235        uint64_t freq;
236        struct timecounter *tc = arg1;
237
238        freq = tc->tc_frequency;
239        return sysctl_handle_64(oidp, &freq, 0, req);
240}
241#endif /* __rtems__ */
242
243/*
244 * Return the difference between the timehands' counter value now and what
245 * was when we copied it to the timehands' offset_count.
246 */
247static __inline uint32_t
248tc_delta(struct timehands *th)
249{
250        struct timecounter *tc;
251
252        tc = th->th_counter;
253        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
254            tc->tc_counter_mask);
255}
256
257static u_int
258tc_getgen(struct timehands *th)
259{
260
261#ifndef __rtems__
262#ifdef SMP
263        return (atomic_load_acq_int(&th->th_generation));
264#else
265        u_int gen;
266
267        gen = th->th_generation;
268        __compiler_membar();
269        return (gen);
270#endif
271#else /* __rtems__ */
272        return (_Atomic_Load_ulong(&th->th_generation, ATOMIC_ORDER_ACQUIRE));
273#endif /* __rtems__ */
274}
275
276static void
277tc_setgen(struct timehands *th, u_int newgen)
278{
279
280#ifndef __rtems__
281#ifdef SMP
282        atomic_store_rel_int(&th->th_generation, newgen);
283#else
284        __compiler_membar();
285        th->th_generation = newgen;
286#endif
287#else /* __rtems__ */
288        _Atomic_Store_ulong(&th->th_generation, newgen, ATOMIC_ORDER_RELEASE);
289#endif /* __rtems__ */
290}
291
292/*
293 * Functions for reading the time.  We have to loop until we are sure that
294 * the timehands that we operated on was not updated under our feet.  See
295 * the comment in <sys/time.h> for a description of these 12 functions.
296 */
297
298#ifdef FFCLOCK
299void
300fbclock_binuptime(struct bintime *bt)
301{
302        struct timehands *th;
303        unsigned int gen;
304
305        do {
306                th = timehands;
307                gen = tc_getgen(th);
308                *bt = th->th_offset;
309                bintime_addx(bt, th->th_scale * tc_delta(th));
310        } while (gen == 0 || gen != tc_getgen(th));
311}
312
313void
314fbclock_nanouptime(struct timespec *tsp)
315{
316        struct bintime bt;
317
318        fbclock_binuptime(&bt);
319        bintime2timespec(&bt, tsp);
320}
321
322void
323fbclock_microuptime(struct timeval *tvp)
324{
325        struct bintime bt;
326
327        fbclock_binuptime(&bt);
328        bintime2timeval(&bt, tvp);
329}
330
331void
332fbclock_bintime(struct bintime *bt)
333{
334
335        fbclock_binuptime(bt);
336        bintime_add(bt, &boottimebin);
337}
338
339void
340fbclock_nanotime(struct timespec *tsp)
341{
342        struct bintime bt;
343
344        fbclock_bintime(&bt);
345        bintime2timespec(&bt, tsp);
346}
347
348void
349fbclock_microtime(struct timeval *tvp)
350{
351        struct bintime bt;
352
353        fbclock_bintime(&bt);
354        bintime2timeval(&bt, tvp);
355}
356
357void
358fbclock_getbinuptime(struct bintime *bt)
359{
360        struct timehands *th;
361        unsigned int gen;
362
363        do {
364                th = timehands;
365                gen = tc_getgen(th);
366                *bt = th->th_offset;
367        } while (gen == 0 || gen != tc_getgen(th));
368}
369
370void
371fbclock_getnanouptime(struct timespec *tsp)
372{
373        struct timehands *th;
374        unsigned int gen;
375
376        do {
377                th = timehands;
378                gen = tc_getgen(th);
379                bintime2timespec(&th->th_offset, tsp);
380        } while (gen == 0 || gen != tc_getgen(th));
381}
382
383void
384fbclock_getmicrouptime(struct timeval *tvp)
385{
386        struct timehands *th;
387        unsigned int gen;
388
389        do {
390                th = timehands;
391                gen = tc_getgen(th);
392                bintime2timeval(&th->th_offset, tvp);
393        } while (gen == 0 || gen != tc_getgen(th));
394}
395
396void
397fbclock_getbintime(struct bintime *bt)
398{
399        struct timehands *th;
400        unsigned int gen;
401
402        do {
403                th = timehands;
404                gen = tc_getgen(th);
405                *bt = th->th_offset;
406        } while (gen == 0 || gen != tc_getgen(th));
407        bintime_add(bt, &boottimebin);
408}
409
410void
411fbclock_getnanotime(struct timespec *tsp)
412{
413        struct timehands *th;
414        unsigned int gen;
415
416        do {
417                th = timehands;
418                gen = tc_getgen(th);
419                *tsp = th->th_nanotime;
420        } while (gen == 0 || gen != tc_getgen(th));
421}
422
423void
424fbclock_getmicrotime(struct timeval *tvp)
425{
426        struct timehands *th;
427        unsigned int gen;
428
429        do {
430                th = timehands;
431                gen = tc_getgen(th);
432                *tvp = th->th_microtime;
433        } while (gen == 0 || gen != tc_getgen(th));
434}
435#else /* !FFCLOCK */
436void
437binuptime(struct bintime *bt)
438{
439        struct timehands *th;
440        uint32_t gen;
441
442        do {
443                th = timehands;
444                gen = tc_getgen(th);
445                *bt = th->th_offset;
446                bintime_addx(bt, th->th_scale * tc_delta(th));
447        } while (gen == 0 || gen != tc_getgen(th));
448}
449
450void
451nanouptime(struct timespec *tsp)
452{
453        struct bintime bt;
454
455        binuptime(&bt);
456        bintime2timespec(&bt, tsp);
457}
458
459void
460microuptime(struct timeval *tvp)
461{
462        struct bintime bt;
463
464        binuptime(&bt);
465        bintime2timeval(&bt, tvp);
466}
467
468void
469bintime(struct bintime *bt)
470{
471
472        binuptime(bt);
473        bintime_add(bt, &boottimebin);
474}
475
476void
477nanotime(struct timespec *tsp)
478{
479        struct bintime bt;
480
481        bintime(&bt);
482        bintime2timespec(&bt, tsp);
483}
484
485void
486microtime(struct timeval *tvp)
487{
488        struct bintime bt;
489
490        bintime(&bt);
491        bintime2timeval(&bt, tvp);
492}
493
494void
495getbinuptime(struct bintime *bt)
496{
497        struct timehands *th;
498        uint32_t gen;
499
500        do {
501                th = timehands;
502                gen = tc_getgen(th);
503                *bt = th->th_offset;
504        } while (gen == 0 || gen != tc_getgen(th));
505}
506
507void
508getnanouptime(struct timespec *tsp)
509{
510        struct timehands *th;
511        uint32_t gen;
512
513        do {
514                th = timehands;
515                gen = tc_getgen(th);
516                bintime2timespec(&th->th_offset, tsp);
517        } while (gen == 0 || gen != tc_getgen(th));
518}
519
520void
521getmicrouptime(struct timeval *tvp)
522{
523        struct timehands *th;
524        uint32_t gen;
525
526        do {
527                th = timehands;
528                gen = tc_getgen(th);
529                bintime2timeval(&th->th_offset, tvp);
530        } while (gen == 0 || gen != tc_getgen(th));
531}
532
533void
534getbintime(struct bintime *bt)
535{
536        struct timehands *th;
537        uint32_t gen;
538
539        do {
540                th = timehands;
541                gen = tc_getgen(th);
542                *bt = th->th_offset;
543        } while (gen == 0 || gen != tc_getgen(th));
544        bintime_add(bt, &boottimebin);
545}
546
547void
548getnanotime(struct timespec *tsp)
549{
550        struct timehands *th;
551        uint32_t gen;
552
553        do {
554                th = timehands;
555                gen = tc_getgen(th);
556                *tsp = th->th_nanotime;
557        } while (gen == 0 || gen != tc_getgen(th));
558}
559
560void
561getmicrotime(struct timeval *tvp)
562{
563        struct timehands *th;
564        uint32_t gen;
565
566        do {
567                th = timehands;
568                gen = tc_getgen(th);
569                *tvp = th->th_microtime;
570        } while (gen == 0 || gen != tc_getgen(th));
571}
572#endif /* FFCLOCK */
573
574#ifdef FFCLOCK
575/*
576 * Support for feed-forward synchronization algorithms. This is heavily inspired
577 * by the timehands mechanism but kept independent from it. *_windup() functions
578 * have some connection to avoid accessing the timecounter hardware more than
579 * necessary.
580 */
581
582/* Feed-forward clock estimates kept updated by the synchronization daemon. */
583struct ffclock_estimate ffclock_estimate;
584struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
585uint32_t ffclock_status;                /* Feed-forward clock status. */
586int8_t ffclock_updated;                 /* New estimates are available. */
587struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
588
589struct fftimehands {
590        struct ffclock_estimate cest;
591        struct bintime          tick_time;
592        struct bintime          tick_time_lerp;
593        ffcounter               tick_ffcount;
594        uint64_t                period_lerp;
595        volatile uint8_t        gen;
596        struct fftimehands      *next;
597};
598
599#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
600
601static struct fftimehands ffth[10];
602static struct fftimehands *volatile fftimehands = ffth;
603
604static void
605ffclock_init(void)
606{
607        struct fftimehands *cur;
608        struct fftimehands *last;
609
610        memset(ffth, 0, sizeof(ffth));
611
612        last = ffth + NUM_ELEMENTS(ffth) - 1;
613        for (cur = ffth; cur < last; cur++)
614                cur->next = cur + 1;
615        last->next = ffth;
616
617        ffclock_updated = 0;
618        ffclock_status = FFCLOCK_STA_UNSYNC;
619        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
620}
621
622/*
623 * Reset the feed-forward clock estimates. Called from inittodr() to get things
624 * kick started and uses the timecounter nominal frequency as a first period
625 * estimate. Note: this function may be called several time just after boot.
626 * Note: this is the only function that sets the value of boot time for the
627 * monotonic (i.e. uptime) version of the feed-forward clock.
628 */
629void
630ffclock_reset_clock(struct timespec *ts)
631{
632        struct timecounter *tc;
633        struct ffclock_estimate cest;
634
635        tc = timehands->th_counter;
636        memset(&cest, 0, sizeof(struct ffclock_estimate));
637
638        timespec2bintime(ts, &ffclock_boottime);
639        timespec2bintime(ts, &(cest.update_time));
640        ffclock_read_counter(&cest.update_ffcount);
641        cest.leapsec_next = 0;
642        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
643        cest.errb_abs = 0;
644        cest.errb_rate = 0;
645        cest.status = FFCLOCK_STA_UNSYNC;
646        cest.leapsec_total = 0;
647        cest.leapsec = 0;
648
649        mtx_lock(&ffclock_mtx);
650        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
651        ffclock_updated = INT8_MAX;
652        mtx_unlock(&ffclock_mtx);
653
654        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
655            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
656            (unsigned long)ts->tv_nsec);
657}
658
659/*
660 * Sub-routine to convert a time interval measured in RAW counter units to time
661 * in seconds stored in bintime format.
662 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
663 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
664 * extra cycles.
665 */
666static void
667ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
668{
669        struct bintime bt2;
670        ffcounter delta, delta_max;
671
672        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
673        bintime_clear(bt);
674        do {
675                if (ffdelta > delta_max)
676                        delta = delta_max;
677                else
678                        delta = ffdelta;
679                bt2.sec = 0;
680                bt2.frac = period;
681                bintime_mul(&bt2, (unsigned int)delta);
682                bintime_add(bt, &bt2);
683                ffdelta -= delta;
684        } while (ffdelta > 0);
685}
686
687/*
688 * Update the fftimehands.
689 * Push the tick ffcount and time(s) forward based on current clock estimate.
690 * The conversion from ffcounter to bintime relies on the difference clock
691 * principle, whose accuracy relies on computing small time intervals. If a new
692 * clock estimate has been passed by the synchronisation daemon, make it
693 * current, and compute the linear interpolation for monotonic time if needed.
694 */
695static void
696ffclock_windup(unsigned int delta)
697{
698        struct ffclock_estimate *cest;
699        struct fftimehands *ffth;
700        struct bintime bt, gap_lerp;
701        ffcounter ffdelta;
702        uint64_t frac;
703        unsigned int polling;
704        uint8_t forward_jump, ogen;
705
706        /*
707         * Pick the next timehand, copy current ffclock estimates and move tick
708         * times and counter forward.
709         */
710        forward_jump = 0;
711        ffth = fftimehands->next;
712        ogen = ffth->gen;
713        ffth->gen = 0;
714        cest = &ffth->cest;
715        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
716        ffdelta = (ffcounter)delta;
717        ffth->period_lerp = fftimehands->period_lerp;
718
719        ffth->tick_time = fftimehands->tick_time;
720        ffclock_convert_delta(ffdelta, cest->period, &bt);
721        bintime_add(&ffth->tick_time, &bt);
722
723        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
724        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
725        bintime_add(&ffth->tick_time_lerp, &bt);
726
727        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
728
729        /*
730         * Assess the status of the clock, if the last update is too old, it is
731         * likely the synchronisation daemon is dead and the clock is free
732         * running.
733         */
734        if (ffclock_updated == 0) {
735                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
736                ffclock_convert_delta(ffdelta, cest->period, &bt);
737                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
738                        ffclock_status |= FFCLOCK_STA_UNSYNC;
739        }
740
741        /*
742         * If available, grab updated clock estimates and make them current.
743         * Recompute time at this tick using the updated estimates. The clock
744         * estimates passed the feed-forward synchronisation daemon may result
745         * in time conversion that is not monotonically increasing (just after
746         * the update). time_lerp is a particular linear interpolation over the
747         * synchronisation algo polling period that ensures monotonicity for the
748         * clock ids requesting it.
749         */
750        if (ffclock_updated > 0) {
751                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
752                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
753                ffth->tick_time = cest->update_time;
754                ffclock_convert_delta(ffdelta, cest->period, &bt);
755                bintime_add(&ffth->tick_time, &bt);
756
757                /* ffclock_reset sets ffclock_updated to INT8_MAX */
758                if (ffclock_updated == INT8_MAX)
759                        ffth->tick_time_lerp = ffth->tick_time;
760
761                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
762                        forward_jump = 1;
763                else
764                        forward_jump = 0;
765
766                bintime_clear(&gap_lerp);
767                if (forward_jump) {
768                        gap_lerp = ffth->tick_time;
769                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
770                } else {
771                        gap_lerp = ffth->tick_time_lerp;
772                        bintime_sub(&gap_lerp, &ffth->tick_time);
773                }
774
775                /*
776                 * The reset from the RTC clock may be far from accurate, and
777                 * reducing the gap between real time and interpolated time
778                 * could take a very long time if the interpolated clock insists
779                 * on strict monotonicity. The clock is reset under very strict
780                 * conditions (kernel time is known to be wrong and
781                 * synchronization daemon has been restarted recently.
782                 * ffclock_boottime absorbs the jump to ensure boot time is
783                 * correct and uptime functions stay consistent.
784                 */
785                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
786                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
787                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
788                        if (forward_jump)
789                                bintime_add(&ffclock_boottime, &gap_lerp);
790                        else
791                                bintime_sub(&ffclock_boottime, &gap_lerp);
792                        ffth->tick_time_lerp = ffth->tick_time;
793                        bintime_clear(&gap_lerp);
794                }
795
796                ffclock_status = cest->status;
797                ffth->period_lerp = cest->period;
798
799                /*
800                 * Compute corrected period used for the linear interpolation of
801                 * time. The rate of linear interpolation is capped to 5000PPM
802                 * (5ms/s).
803                 */
804                if (bintime_isset(&gap_lerp)) {
805                        ffdelta = cest->update_ffcount;
806                        ffdelta -= fftimehands->cest.update_ffcount;
807                        ffclock_convert_delta(ffdelta, cest->period, &bt);
808                        polling = bt.sec;
809                        bt.sec = 0;
810                        bt.frac = 5000000 * (uint64_t)18446744073LL;
811                        bintime_mul(&bt, polling);
812                        if (bintime_cmp(&gap_lerp, &bt, >))
813                                gap_lerp = bt;
814
815                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
816                        frac = 0;
817                        if (gap_lerp.sec > 0) {
818                                frac -= 1;
819                                frac /= ffdelta / gap_lerp.sec;
820                        }
821                        frac += gap_lerp.frac / ffdelta;
822
823                        if (forward_jump)
824                                ffth->period_lerp += frac;
825                        else
826                                ffth->period_lerp -= frac;
827                }
828
829                ffclock_updated = 0;
830        }
831        if (++ogen == 0)
832                ogen = 1;
833        ffth->gen = ogen;
834        fftimehands = ffth;
835}
836
837/*
838 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
839 * the old and new hardware counter cannot be read simultaneously. tc_windup()
840 * does read the two counters 'back to back', but a few cycles are effectively
841 * lost, and not accumulated in tick_ffcount. This is a fairly radical
842 * operation for a feed-forward synchronization daemon, and it is its job to not
843 * pushing irrelevant data to the kernel. Because there is no locking here,
844 * simply force to ignore pending or next update to give daemon a chance to
845 * realize the counter has changed.
846 */
847static void
848ffclock_change_tc(struct timehands *th)
849{
850        struct fftimehands *ffth;
851        struct ffclock_estimate *cest;
852        struct timecounter *tc;
853        uint8_t ogen;
854
855        tc = th->th_counter;
856        ffth = fftimehands->next;
857        ogen = ffth->gen;
858        ffth->gen = 0;
859
860        cest = &ffth->cest;
861        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
862        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
863        cest->errb_abs = 0;
864        cest->errb_rate = 0;
865        cest->status |= FFCLOCK_STA_UNSYNC;
866
867        ffth->tick_ffcount = fftimehands->tick_ffcount;
868        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
869        ffth->tick_time = fftimehands->tick_time;
870        ffth->period_lerp = cest->period;
871
872        /* Do not lock but ignore next update from synchronization daemon. */
873        ffclock_updated--;
874
875        if (++ogen == 0)
876                ogen = 1;
877        ffth->gen = ogen;
878        fftimehands = ffth;
879}
880
881/*
882 * Retrieve feed-forward counter and time of last kernel tick.
883 */
884void
885ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
886{
887        struct fftimehands *ffth;
888        uint8_t gen;
889
890        /*
891         * No locking but check generation has not changed. Also need to make
892         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
893         */
894        do {
895                ffth = fftimehands;
896                gen = ffth->gen;
897                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
898                        *bt = ffth->tick_time_lerp;
899                else
900                        *bt = ffth->tick_time;
901                *ffcount = ffth->tick_ffcount;
902        } while (gen == 0 || gen != ffth->gen);
903}
904
905/*
906 * Absolute clock conversion. Low level function to convert ffcounter to
907 * bintime. The ffcounter is converted using the current ffclock period estimate
908 * or the "interpolated period" to ensure monotonicity.
909 * NOTE: this conversion may have been deferred, and the clock updated since the
910 * hardware counter has been read.
911 */
912void
913ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
914{
915        struct fftimehands *ffth;
916        struct bintime bt2;
917        ffcounter ffdelta;
918        uint8_t gen;
919
920        /*
921         * No locking but check generation has not changed. Also need to make
922         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
923         */
924        do {
925                ffth = fftimehands;
926                gen = ffth->gen;
927                if (ffcount > ffth->tick_ffcount)
928                        ffdelta = ffcount - ffth->tick_ffcount;
929                else
930                        ffdelta = ffth->tick_ffcount - ffcount;
931
932                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
933                        *bt = ffth->tick_time_lerp;
934                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
935                } else {
936                        *bt = ffth->tick_time;
937                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
938                }
939
940                if (ffcount > ffth->tick_ffcount)
941                        bintime_add(bt, &bt2);
942                else
943                        bintime_sub(bt, &bt2);
944        } while (gen == 0 || gen != ffth->gen);
945}
946
947/*
948 * Difference clock conversion.
949 * Low level function to Convert a time interval measured in RAW counter units
950 * into bintime. The difference clock allows measuring small intervals much more
951 * reliably than the absolute clock.
952 */
953void
954ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
955{
956        struct fftimehands *ffth;
957        uint8_t gen;
958
959        /* No locking but check generation has not changed. */
960        do {
961                ffth = fftimehands;
962                gen = ffth->gen;
963                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
964        } while (gen == 0 || gen != ffth->gen);
965}
966
967/*
968 * Access to current ffcounter value.
969 */
970void
971ffclock_read_counter(ffcounter *ffcount)
972{
973        struct timehands *th;
974        struct fftimehands *ffth;
975        unsigned int gen, delta;
976
977        /*
978         * ffclock_windup() called from tc_windup(), safe to rely on
979         * th->th_generation only, for correct delta and ffcounter.
980         */
981        do {
982                th = timehands;
983                gen = tc_getgen(th);
984                ffth = fftimehands;
985                delta = tc_delta(th);
986                *ffcount = ffth->tick_ffcount;
987        } while (gen == 0 || gen != tc_getgen(th));
988
989        *ffcount += delta;
990}
991
992void
993binuptime(struct bintime *bt)
994{
995
996        binuptime_fromclock(bt, sysclock_active);
997}
998
999void
1000nanouptime(struct timespec *tsp)
1001{
1002
1003        nanouptime_fromclock(tsp, sysclock_active);
1004}
1005
1006void
1007microuptime(struct timeval *tvp)
1008{
1009
1010        microuptime_fromclock(tvp, sysclock_active);
1011}
1012
1013void
1014bintime(struct bintime *bt)
1015{
1016
1017        bintime_fromclock(bt, sysclock_active);
1018}
1019
1020void
1021nanotime(struct timespec *tsp)
1022{
1023
1024        nanotime_fromclock(tsp, sysclock_active);
1025}
1026
1027void
1028microtime(struct timeval *tvp)
1029{
1030
1031        microtime_fromclock(tvp, sysclock_active);
1032}
1033
1034void
1035getbinuptime(struct bintime *bt)
1036{
1037
1038        getbinuptime_fromclock(bt, sysclock_active);
1039}
1040
1041void
1042getnanouptime(struct timespec *tsp)
1043{
1044
1045        getnanouptime_fromclock(tsp, sysclock_active);
1046}
1047
1048void
1049getmicrouptime(struct timeval *tvp)
1050{
1051
1052        getmicrouptime_fromclock(tvp, sysclock_active);
1053}
1054
1055void
1056getbintime(struct bintime *bt)
1057{
1058
1059        getbintime_fromclock(bt, sysclock_active);
1060}
1061
1062void
1063getnanotime(struct timespec *tsp)
1064{
1065
1066        getnanotime_fromclock(tsp, sysclock_active);
1067}
1068
1069void
1070getmicrotime(struct timeval *tvp)
1071{
1072
1073        getmicrouptime_fromclock(tvp, sysclock_active);
1074}
1075
1076#endif /* FFCLOCK */
1077
1078#ifndef __rtems__
1079/*
1080 * This is a clone of getnanotime and used for walltimestamps.
1081 * The dtrace_ prefix prevents fbt from creating probes for
1082 * it so walltimestamp can be safely used in all fbt probes.
1083 */
1084void
1085dtrace_getnanotime(struct timespec *tsp)
1086{
1087        struct timehands *th;
1088        uint32_t gen;
1089
1090        do {
1091                th = timehands;
1092                gen = tc_getgen(th);
1093                *tsp = th->th_nanotime;
1094        } while (gen == 0 || gen != tc_getgen(th));
1095}
1096#endif /* __rtems__ */
1097
1098#ifdef FFCLOCK
1099/*
1100 * System clock currently providing time to the system. Modifiable via sysctl
1101 * when the FFCLOCK option is defined.
1102 */
1103int sysclock_active = SYSCLOCK_FBCK;
1104#endif
1105
1106/* Internal NTP status and error estimates. */
1107extern int time_status;
1108extern long time_esterror;
1109
1110#ifndef __rtems__
1111/*
1112 * Take a snapshot of sysclock data which can be used to compare system clocks
1113 * and generate timestamps after the fact.
1114 */
1115void
1116sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1117{
1118        struct fbclock_info *fbi;
1119        struct timehands *th;
1120        struct bintime bt;
1121        unsigned int delta, gen;
1122#ifdef FFCLOCK
1123        ffcounter ffcount;
1124        struct fftimehands *ffth;
1125        struct ffclock_info *ffi;
1126        struct ffclock_estimate cest;
1127
1128        ffi = &clock_snap->ff_info;
1129#endif
1130
1131        fbi = &clock_snap->fb_info;
1132        delta = 0;
1133
1134        do {
1135                th = timehands;
1136                gen = tc_getgen(th);
1137                fbi->th_scale = th->th_scale;
1138                fbi->tick_time = th->th_offset;
1139#ifdef FFCLOCK
1140                ffth = fftimehands;
1141                ffi->tick_time = ffth->tick_time_lerp;
1142                ffi->tick_time_lerp = ffth->tick_time_lerp;
1143                ffi->period = ffth->cest.period;
1144                ffi->period_lerp = ffth->period_lerp;
1145                clock_snap->ffcount = ffth->tick_ffcount;
1146                cest = ffth->cest;
1147#endif
1148                if (!fast)
1149                        delta = tc_delta(th);
1150        } while (gen == 0 || gen != tc_getgen(th));
1151
1152        clock_snap->delta = delta;
1153#ifdef FFCLOCK
1154        clock_snap->sysclock_active = sysclock_active;
1155#endif
1156
1157        /* Record feedback clock status and error. */
1158        clock_snap->fb_info.status = time_status;
1159        /* XXX: Very crude estimate of feedback clock error. */
1160        bt.sec = time_esterror / 1000000;
1161        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1162            (uint64_t)18446744073709ULL;
1163        clock_snap->fb_info.error = bt;
1164
1165#ifdef FFCLOCK
1166        if (!fast)
1167                clock_snap->ffcount += delta;
1168
1169        /* Record feed-forward clock leap second adjustment. */
1170        ffi->leapsec_adjustment = cest.leapsec_total;
1171        if (clock_snap->ffcount > cest.leapsec_next)
1172                ffi->leapsec_adjustment -= cest.leapsec;
1173
1174        /* Record feed-forward clock status and error. */
1175        clock_snap->ff_info.status = cest.status;
1176        ffcount = clock_snap->ffcount - cest.update_ffcount;
1177        ffclock_convert_delta(ffcount, cest.period, &bt);
1178        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1179        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1180        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1181        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1182        clock_snap->ff_info.error = bt;
1183#endif
1184}
1185
1186/*
1187 * Convert a sysclock snapshot into a struct bintime based on the specified
1188 * clock source and flags.
1189 */
1190int
1191sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1192    int whichclock, uint32_t flags)
1193{
1194#ifdef FFCLOCK
1195        struct bintime bt2;
1196        uint64_t period;
1197#endif
1198
1199        switch (whichclock) {
1200        case SYSCLOCK_FBCK:
1201                *bt = cs->fb_info.tick_time;
1202
1203                /* If snapshot was created with !fast, delta will be >0. */
1204                if (cs->delta > 0)
1205                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1206
1207                if ((flags & FBCLOCK_UPTIME) == 0)
1208                        bintime_add(bt, &boottimebin);
1209                break;
1210#ifdef FFCLOCK
1211        case SYSCLOCK_FFWD:
1212                if (flags & FFCLOCK_LERP) {
1213                        *bt = cs->ff_info.tick_time_lerp;
1214                        period = cs->ff_info.period_lerp;
1215                } else {
1216                        *bt = cs->ff_info.tick_time;
1217                        period = cs->ff_info.period;
1218                }
1219
1220                /* If snapshot was created with !fast, delta will be >0. */
1221                if (cs->delta > 0) {
1222                        ffclock_convert_delta(cs->delta, period, &bt2);
1223                        bintime_add(bt, &bt2);
1224                }
1225
1226                /* Leap second adjustment. */
1227                if (flags & FFCLOCK_LEAPSEC)
1228                        bt->sec -= cs->ff_info.leapsec_adjustment;
1229
1230                /* Boot time adjustment, for uptime/monotonic clocks. */
1231                if (flags & FFCLOCK_UPTIME)
1232                        bintime_sub(bt, &ffclock_boottime);
1233                break;
1234#endif
1235        default:
1236                return (EINVAL);
1237                break;
1238        }
1239
1240        return (0);
1241}
1242#endif /* __rtems__ */
1243
1244/*
1245 * Initialize a new timecounter and possibly use it.
1246 */
1247void
1248tc_init(struct timecounter *tc)
1249{
1250#ifndef __rtems__
1251        uint32_t u;
1252        struct sysctl_oid *tc_root;
1253
1254        u = tc->tc_frequency / tc->tc_counter_mask;
1255        /* XXX: We need some margin here, 10% is a guess */
1256        u *= 11;
1257        u /= 10;
1258        if (u > hz && tc->tc_quality >= 0) {
1259                tc->tc_quality = -2000;
1260                if (bootverbose) {
1261                        printf("Timecounter \"%s\" frequency %ju Hz",
1262                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1263                        printf(" -- Insufficient hz, needs at least %u\n", u);
1264                }
1265        } else if (tc->tc_quality >= 0 || bootverbose) {
1266                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1267                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1268                    tc->tc_quality);
1269        }
1270#endif /* __rtems__ */
1271
1272        tc->tc_next = timecounters;
1273        timecounters = tc;
1274#ifndef __rtems__
1275        /*
1276         * Set up sysctl tree for this counter.
1277         */
1278        tc_root = SYSCTL_ADD_NODE(NULL,
1279            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1280            CTLFLAG_RW, 0, "timecounter description");
1281        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1282            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1283            "mask for implemented bits");
1284        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1285            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1286            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1287        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1288            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1289             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1290        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1291            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1292            "goodness of time counter");
1293        /*
1294         * Never automatically use a timecounter with negative quality.
1295         * Even though we run on the dummy counter, switching here may be
1296         * worse since this timecounter may not be monotonous.
1297         */
1298        if (tc->tc_quality < 0)
1299                return;
1300        if (tc->tc_quality < timecounter->tc_quality)
1301                return;
1302        if (tc->tc_quality == timecounter->tc_quality &&
1303            tc->tc_frequency < timecounter->tc_frequency)
1304                return;
1305#endif /* __rtems__ */
1306        (void)tc->tc_get_timecount(tc);
1307        (void)tc->tc_get_timecount(tc);
1308        timecounter = tc;
1309#ifdef __rtems__
1310        tc_windup();
1311#endif /* __rtems__ */
1312}
1313
1314#ifndef __rtems__
1315/* Report the frequency of the current timecounter. */
1316uint64_t
1317tc_getfrequency(void)
1318{
1319
1320        return (timehands->th_counter->tc_frequency);
1321}
1322#endif /* __rtems__ */
1323
1324/*
1325 * Step our concept of UTC.  This is done by modifying our estimate of
1326 * when we booted.
1327 * XXX: not locked.
1328 */
1329#ifndef __rtems__
1330void
1331tc_setclock(struct timespec *ts)
1332#else /* __rtems__ */
1333void
1334_Timecounter_Set_clock(const struct timespec *ts)
1335#endif /* __rtems__ */
1336{
1337#ifndef __rtems__
1338        struct timespec tbef, taft;
1339#endif /* __rtems__ */
1340        struct bintime bt, bt2;
1341
1342#ifndef __rtems__
1343        cpu_tick_calibrate(1);
1344        nanotime(&tbef);
1345#endif /* __rtems__ */
1346        timespec2bintime(ts, &bt);
1347        binuptime(&bt2);
1348        bintime_sub(&bt, &bt2);
1349        bintime_add(&bt2, &boottimebin);
1350        boottimebin = bt;
1351#ifndef __rtems__
1352        bintime2timeval(&bt, &boottime);
1353#endif /* __rtems__ */
1354
1355        /* XXX fiddle all the little crinkly bits around the fiords... */
1356        tc_windup();
1357#ifndef __rtems__
1358        nanotime(&taft);
1359        if (timestepwarnings) {
1360                log(LOG_INFO,
1361                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1362                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1363                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1364                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1365        }
1366        cpu_tick_calibrate(1);
1367#endif /* __rtems__ */
1368}
1369
1370/*
1371 * Initialize the next struct timehands in the ring and make
1372 * it the active timehands.  Along the way we might switch to a different
1373 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1374 */
1375static void
1376tc_windup(void)
1377{
1378        struct bintime bt;
1379        struct timehands *th, *tho;
1380        uint64_t scale;
1381        uint32_t delta, ncount, ogen;
1382        int i;
1383        time_t t;
1384#ifdef __rtems__
1385        ISR_lock_Context lock_context;
1386
1387        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1388#endif /* __rtems__ */
1389
1390        /*
1391         * Make the next timehands a copy of the current one, but do not
1392         * overwrite the generation or next pointer.  While we update
1393         * the contents, the generation must be zero.
1394         */
1395        tho = timehands;
1396        th = tho->th_next;
1397        ogen = th->th_generation;
1398        tc_setgen(th, 0);
1399        bcopy(tho, th, offsetof(struct timehands, th_generation));
1400
1401        /*
1402         * Capture a timecounter delta on the current timecounter and if
1403         * changing timecounters, a counter value from the new timecounter.
1404         * Update the offset fields accordingly.
1405         */
1406        delta = tc_delta(th);
1407        if (th->th_counter != timecounter)
1408                ncount = timecounter->tc_get_timecount(timecounter);
1409        else
1410                ncount = 0;
1411#ifdef FFCLOCK
1412        ffclock_windup(delta);
1413#endif
1414        th->th_offset_count += delta;
1415        th->th_offset_count &= th->th_counter->tc_counter_mask;
1416        while (delta > th->th_counter->tc_frequency) {
1417                /* Eat complete unadjusted seconds. */
1418                delta -= th->th_counter->tc_frequency;
1419                th->th_offset.sec++;
1420        }
1421        if ((delta > th->th_counter->tc_frequency / 2) &&
1422            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1423                /* The product th_scale * delta just barely overflows. */
1424                th->th_offset.sec++;
1425        }
1426        bintime_addx(&th->th_offset, th->th_scale * delta);
1427
1428        /*
1429         * Hardware latching timecounters may not generate interrupts on
1430         * PPS events, so instead we poll them.  There is a finite risk that
1431         * the hardware might capture a count which is later than the one we
1432         * got above, and therefore possibly in the next NTP second which might
1433         * have a different rate than the current NTP second.  It doesn't
1434         * matter in practice.
1435         */
1436        if (tho->th_counter->tc_poll_pps)
1437                tho->th_counter->tc_poll_pps(tho->th_counter);
1438
1439        /*
1440         * Deal with NTP second processing.  The for loop normally
1441         * iterates at most once, but in extreme situations it might
1442         * keep NTP sane if timeouts are not run for several seconds.
1443         * At boot, the time step can be large when the TOD hardware
1444         * has been read, so on really large steps, we call
1445         * ntp_update_second only twice.  We need to call it twice in
1446         * case we missed a leap second.
1447         */
1448        bt = th->th_offset;
1449        bintime_add(&bt, &boottimebin);
1450        i = bt.sec - tho->th_microtime.tv_sec;
1451        if (i > LARGE_STEP)
1452                i = 2;
1453        for (; i > 0; i--) {
1454                t = bt.sec;
1455                ntp_update_second(&th->th_adjustment, &bt.sec);
1456                if (bt.sec != t)
1457                        boottimebin.sec += bt.sec - t;
1458        }
1459        /* Update the UTC timestamps used by the get*() functions. */
1460        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1461        bintime2timeval(&bt, &th->th_microtime);
1462        bintime2timespec(&bt, &th->th_nanotime);
1463
1464        /* Now is a good time to change timecounters. */
1465        if (th->th_counter != timecounter) {
1466#ifndef __rtems__
1467#ifndef __arm__
1468                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1469                        cpu_disable_c2_sleep++;
1470                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1471                        cpu_disable_c2_sleep--;
1472#endif
1473#endif /* __rtems__ */
1474                th->th_counter = timecounter;
1475                th->th_offset_count = ncount;
1476#ifndef __rtems__
1477                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1478                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1479#endif /* __rtems__ */
1480#ifdef FFCLOCK
1481                ffclock_change_tc(th);
1482#endif
1483        }
1484
1485        /*-
1486         * Recalculate the scaling factor.  We want the number of 1/2^64
1487         * fractions of a second per period of the hardware counter, taking
1488         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1489         * processing provides us with.
1490         *
1491         * The th_adjustment is nanoseconds per second with 32 bit binary
1492         * fraction and we want 64 bit binary fraction of second:
1493         *
1494         *       x = a * 2^32 / 10^9 = a * 4.294967296
1495         *
1496         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1497         * we can only multiply by about 850 without overflowing, that
1498         * leaves no suitably precise fractions for multiply before divide.
1499         *
1500         * Divide before multiply with a fraction of 2199/512 results in a
1501         * systematic undercompensation of 10PPM of th_adjustment.  On a
1502         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1503         *
1504         * We happily sacrifice the lowest of the 64 bits of our result
1505         * to the goddess of code clarity.
1506         *
1507         */
1508        scale = (uint64_t)1 << 63;
1509        scale += (th->th_adjustment / 1024) * 2199;
1510        scale /= th->th_counter->tc_frequency;
1511        th->th_scale = scale * 2;
1512
1513        /*
1514         * Now that the struct timehands is again consistent, set the new
1515         * generation number, making sure to not make it zero.
1516         */
1517        if (++ogen == 0)
1518                ogen = 1;
1519        tc_setgen(th, ogen);
1520
1521        /* Go live with the new struct timehands. */
1522#ifdef FFCLOCK
1523        switch (sysclock_active) {
1524        case SYSCLOCK_FBCK:
1525#endif
1526                time_second = th->th_microtime.tv_sec;
1527                time_uptime = th->th_offset.sec;
1528#ifdef FFCLOCK
1529                break;
1530        case SYSCLOCK_FFWD:
1531                time_second = fftimehands->tick_time_lerp.sec;
1532                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1533                break;
1534        }
1535#endif
1536
1537        timehands = th;
1538#ifndef __rtems__
1539        timekeep_push_vdso();
1540#endif /* __rtems__ */
1541#ifdef __rtems__
1542        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
1543#endif /* __rtems__ */
1544}
1545
1546#ifndef __rtems__
1547/* Report or change the active timecounter hardware. */
1548static int
1549sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1550{
1551        char newname[32];
1552        struct timecounter *newtc, *tc;
1553        int error;
1554
1555        tc = timecounter;
1556        strlcpy(newname, tc->tc_name, sizeof(newname));
1557
1558        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1559        if (error != 0 || req->newptr == NULL ||
1560            strcmp(newname, tc->tc_name) == 0)
1561                return (error);
1562        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1563                if (strcmp(newname, newtc->tc_name) != 0)
1564                        continue;
1565
1566                /* Warm up new timecounter. */
1567                (void)newtc->tc_get_timecount(newtc);
1568                (void)newtc->tc_get_timecount(newtc);
1569
1570                timecounter = newtc;
1571
1572                /*
1573                 * The vdso timehands update is deferred until the next
1574                 * 'tc_windup()'.
1575                 *
1576                 * This is prudent given that 'timekeep_push_vdso()' does not
1577                 * use any locking and that it can be called in hard interrupt
1578                 * context via 'tc_windup()'.
1579                 */
1580                return (0);
1581        }
1582        return (EINVAL);
1583}
1584
1585SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1586    0, 0, sysctl_kern_timecounter_hardware, "A",
1587    "Timecounter hardware selected");
1588
1589
1590/* Report or change the active timecounter hardware. */
1591static int
1592sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1593{
1594        struct sbuf sb;
1595        struct timecounter *tc;
1596        int error;
1597
1598        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1599        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1600                if (tc != timecounters)
1601                        sbuf_putc(&sb, ' ');
1602                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1603        }
1604        error = sbuf_finish(&sb);
1605        sbuf_delete(&sb);
1606        return (error);
1607}
1608
1609SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1610    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1611#endif /* __rtems__ */
1612
1613#ifndef __rtems__
1614/*
1615 * RFC 2783 PPS-API implementation.
1616 */
1617
1618/*
1619 *  Return true if the driver is aware of the abi version extensions in the
1620 *  pps_state structure, and it supports at least the given abi version number.
1621 */
1622static inline int
1623abi_aware(struct pps_state *pps, int vers)
1624{
1625
1626        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1627}
1628
1629static int
1630pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1631{
1632        int err, timo;
1633        pps_seq_t aseq, cseq;
1634        struct timeval tv;
1635
1636        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1637                return (EINVAL);
1638
1639        /*
1640         * If no timeout is requested, immediately return whatever values were
1641         * most recently captured.  If timeout seconds is -1, that's a request
1642         * to block without a timeout.  WITNESS won't let us sleep forever
1643         * without a lock (we really don't need a lock), so just repeatedly
1644         * sleep a long time.
1645         */
1646        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1647                if (fapi->timeout.tv_sec == -1)
1648                        timo = 0x7fffffff;
1649                else {
1650                        tv.tv_sec = fapi->timeout.tv_sec;
1651                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1652                        timo = tvtohz(&tv);
1653                }
1654                aseq = pps->ppsinfo.assert_sequence;
1655                cseq = pps->ppsinfo.clear_sequence;
1656                while (aseq == pps->ppsinfo.assert_sequence &&
1657                    cseq == pps->ppsinfo.clear_sequence) {
1658                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1659                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1660                                        err = msleep_spin(pps, pps->driver_mtx,
1661                                            "ppsfch", timo);
1662                                } else {
1663                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1664                                            "ppsfch", timo);
1665                                }
1666                        } else {
1667                                err = tsleep(pps, PCATCH, "ppsfch", timo);
1668                        }
1669                        if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) {
1670                                continue;
1671                        } else if (err != 0) {
1672                                return (err);
1673                        }
1674                }
1675        }
1676
1677        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1678        fapi->pps_info_buf = pps->ppsinfo;
1679
1680        return (0);
1681}
1682
1683int
1684pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1685{
1686        pps_params_t *app;
1687        struct pps_fetch_args *fapi;
1688#ifdef FFCLOCK
1689        struct pps_fetch_ffc_args *fapi_ffc;
1690#endif
1691#ifdef PPS_SYNC
1692        struct pps_kcbind_args *kapi;
1693#endif
1694
1695        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1696        switch (cmd) {
1697        case PPS_IOC_CREATE:
1698                return (0);
1699        case PPS_IOC_DESTROY:
1700                return (0);
1701        case PPS_IOC_SETPARAMS:
1702                app = (pps_params_t *)data;
1703                if (app->mode & ~pps->ppscap)
1704                        return (EINVAL);
1705#ifdef FFCLOCK
1706                /* Ensure only a single clock is selected for ffc timestamp. */
1707                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1708                        return (EINVAL);
1709#endif
1710                pps->ppsparam = *app;
1711                return (0);
1712        case PPS_IOC_GETPARAMS:
1713                app = (pps_params_t *)data;
1714                *app = pps->ppsparam;
1715                app->api_version = PPS_API_VERS_1;
1716                return (0);
1717        case PPS_IOC_GETCAP:
1718                *(int*)data = pps->ppscap;
1719                return (0);
1720        case PPS_IOC_FETCH:
1721                fapi = (struct pps_fetch_args *)data;
1722                return (pps_fetch(fapi, pps));
1723#ifdef FFCLOCK
1724        case PPS_IOC_FETCH_FFCOUNTER:
1725                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1726                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1727                    PPS_TSFMT_TSPEC)
1728                        return (EINVAL);
1729                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1730                        return (EOPNOTSUPP);
1731                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1732                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1733                /* Overwrite timestamps if feedback clock selected. */
1734                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1735                case PPS_TSCLK_FBCK:
1736                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1737                            pps->ppsinfo.assert_timestamp;
1738                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1739                            pps->ppsinfo.clear_timestamp;
1740                        break;
1741                case PPS_TSCLK_FFWD:
1742                        break;
1743                default:
1744                        break;
1745                }
1746                return (0);
1747#endif /* FFCLOCK */
1748        case PPS_IOC_KCBIND:
1749#ifdef PPS_SYNC
1750                kapi = (struct pps_kcbind_args *)data;
1751                /* XXX Only root should be able to do this */
1752                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1753                        return (EINVAL);
1754                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1755                        return (EINVAL);
1756                if (kapi->edge & ~pps->ppscap)
1757                        return (EINVAL);
1758                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1759                    (pps->kcmode & KCMODE_ABIFLAG);
1760                return (0);
1761#else
1762                return (EOPNOTSUPP);
1763#endif
1764        default:
1765                return (ENOIOCTL);
1766        }
1767}
1768
1769void
1770pps_init(struct pps_state *pps)
1771{
1772        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1773        if (pps->ppscap & PPS_CAPTUREASSERT)
1774                pps->ppscap |= PPS_OFFSETASSERT;
1775        if (pps->ppscap & PPS_CAPTURECLEAR)
1776                pps->ppscap |= PPS_OFFSETCLEAR;
1777#ifdef FFCLOCK
1778        pps->ppscap |= PPS_TSCLK_MASK;
1779#endif
1780        pps->kcmode &= ~KCMODE_ABIFLAG;
1781}
1782
1783void
1784pps_init_abi(struct pps_state *pps)
1785{
1786
1787        pps_init(pps);
1788        if (pps->driver_abi > 0) {
1789                pps->kcmode |= KCMODE_ABIFLAG;
1790                pps->kernel_abi = PPS_ABI_VERSION;
1791        }
1792}
1793
1794void
1795pps_capture(struct pps_state *pps)
1796{
1797        struct timehands *th;
1798
1799        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1800        th = timehands;
1801        pps->capgen = tc_getgen(th);
1802        pps->capth = th;
1803#ifdef FFCLOCK
1804        pps->capffth = fftimehands;
1805#endif
1806        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1807        if (pps->capgen != tc_getgen(th))
1808                pps->capgen = 0;
1809}
1810
1811void
1812pps_event(struct pps_state *pps, int event)
1813{
1814        struct bintime bt;
1815        struct timespec ts, *tsp, *osp;
1816        uint32_t tcount, *pcount;
1817        int foff, fhard;
1818        pps_seq_t *pseq;
1819#ifdef FFCLOCK
1820        struct timespec *tsp_ffc;
1821        pps_seq_t *pseq_ffc;
1822        ffcounter *ffcount;
1823#endif
1824
1825        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1826        /* If the timecounter was wound up underneath us, bail out. */
1827        if (pps->capgen == 0 || pps->capgen != tc_getgen(pps->capth))
1828                return;
1829
1830        /* Things would be easier with arrays. */
1831        if (event == PPS_CAPTUREASSERT) {
1832                tsp = &pps->ppsinfo.assert_timestamp;
1833                osp = &pps->ppsparam.assert_offset;
1834                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1835                fhard = pps->kcmode & PPS_CAPTUREASSERT;
1836                pcount = &pps->ppscount[0];
1837                pseq = &pps->ppsinfo.assert_sequence;
1838#ifdef FFCLOCK
1839                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1840                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1841                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1842#endif
1843        } else {
1844                tsp = &pps->ppsinfo.clear_timestamp;
1845                osp = &pps->ppsparam.clear_offset;
1846                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1847                fhard = pps->kcmode & PPS_CAPTURECLEAR;
1848                pcount = &pps->ppscount[1];
1849                pseq = &pps->ppsinfo.clear_sequence;
1850#ifdef FFCLOCK
1851                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1852                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1853                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1854#endif
1855        }
1856
1857        /*
1858         * If the timecounter changed, we cannot compare the count values, so
1859         * we have to drop the rest of the PPS-stuff until the next event.
1860         */
1861        if (pps->ppstc != pps->capth->th_counter) {
1862                pps->ppstc = pps->capth->th_counter;
1863                *pcount = pps->capcount;
1864                pps->ppscount[2] = pps->capcount;
1865                return;
1866        }
1867
1868        /* Convert the count to a timespec. */
1869        tcount = pps->capcount - pps->capth->th_offset_count;
1870        tcount &= pps->capth->th_counter->tc_counter_mask;
1871        bt = pps->capth->th_offset;
1872        bintime_addx(&bt, pps->capth->th_scale * tcount);
1873        bintime_add(&bt, &boottimebin);
1874        bintime2timespec(&bt, &ts);
1875
1876        /* If the timecounter was wound up underneath us, bail out. */
1877        if (pps->capgen != tc_getgen(pps->capth))
1878                return;
1879
1880        *pcount = pps->capcount;
1881        (*pseq)++;
1882        *tsp = ts;
1883
1884        if (foff) {
1885                timespecadd(tsp, osp);
1886                if (tsp->tv_nsec < 0) {
1887                        tsp->tv_nsec += 1000000000;
1888                        tsp->tv_sec -= 1;
1889                }
1890        }
1891
1892#ifdef FFCLOCK
1893        *ffcount = pps->capffth->tick_ffcount + tcount;
1894        bt = pps->capffth->tick_time;
1895        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1896        bintime_add(&bt, &pps->capffth->tick_time);
1897        bintime2timespec(&bt, &ts);
1898        (*pseq_ffc)++;
1899        *tsp_ffc = ts;
1900#endif
1901
1902#ifdef PPS_SYNC
1903        if (fhard) {
1904                uint64_t scale;
1905
1906                /*
1907                 * Feed the NTP PLL/FLL.
1908                 * The FLL wants to know how many (hardware) nanoseconds
1909                 * elapsed since the previous event.
1910                 */
1911                tcount = pps->capcount - pps->ppscount[2];
1912                pps->ppscount[2] = pps->capcount;
1913                tcount &= pps->capth->th_counter->tc_counter_mask;
1914                scale = (uint64_t)1 << 63;
1915                scale /= pps->capth->th_counter->tc_frequency;
1916                scale *= 2;
1917                bt.sec = 0;
1918                bt.frac = 0;
1919                bintime_addx(&bt, scale * tcount);
1920                bintime2timespec(&bt, &ts);
1921                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1922        }
1923#endif
1924
1925        /* Wakeup anyone sleeping in pps_fetch().  */
1926        wakeup(pps);
1927}
1928#else /* __rtems__ */
1929/* FIXME: https://devel.rtems.org/ticket/2349 */
1930#endif /* __rtems__ */
1931
1932/*
1933 * Timecounters need to be updated every so often to prevent the hardware
1934 * counter from overflowing.  Updating also recalculates the cached values
1935 * used by the get*() family of functions, so their precision depends on
1936 * the update frequency.
1937 */
1938
1939#ifndef __rtems__
1940static int tc_tick;
1941SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1942    "Approximate number of hardclock ticks in a millisecond");
1943#endif /* __rtems__ */
1944
1945#ifndef __rtems__
1946void
1947tc_ticktock(int cnt)
1948{
1949        static int count;
1950
1951        count += cnt;
1952        if (count < tc_tick)
1953                return;
1954        count = 0;
1955#else /* __rtems__ */
1956void
1957_Timecounter_Tick(void)
1958{
1959#endif /* __rtems__ */
1960        tc_windup();
1961#ifdef __rtems__
1962        _Watchdog_Tick();
1963#endif /* __rtems__ */
1964}
1965#ifdef __rtems__
1966void
1967_Timecounter_Tick_simple(uint32_t delta, uint32_t offset)
1968{
1969        struct bintime bt;
1970        struct timehands *th;
1971        uint32_t ogen;
1972        ISR_lock_Context lock_context;
1973
1974        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1975
1976        th = timehands;
1977        ogen = th->th_generation;
1978        th->th_offset_count = offset;
1979        bintime_addx(&th->th_offset, th->th_scale * delta);
1980
1981        bt = th->th_offset;
1982        bintime_add(&bt, &boottimebin);
1983
1984        /* Update the UTC timestamps used by the get*() functions. */
1985        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1986        bintime2timeval(&bt, &th->th_microtime);
1987        bintime2timespec(&bt, &th->th_nanotime);
1988
1989        /*
1990         * Now that the struct timehands is again consistent, set the new
1991         * generation number, making sure to not make it zero.
1992         */
1993        if (++ogen == 0)
1994                ogen = 1;
1995        th->th_generation = ogen;
1996
1997        /* Go live with the new struct timehands. */
1998        time_second = th->th_microtime.tv_sec;
1999        time_uptime = th->th_offset.sec;
2000
2001        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
2002
2003        _Watchdog_Tick();
2004}
2005#endif /* __rtems__ */
2006
2007#ifndef __rtems__
2008static void __inline
2009tc_adjprecision(void)
2010{
2011        int t;
2012
2013        if (tc_timepercentage > 0) {
2014                t = (99 + tc_timepercentage) / tc_timepercentage;
2015                tc_precexp = fls(t + (t >> 1)) - 1;
2016                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2017                FREQ2BT(hz, &bt_tickthreshold);
2018                bintime_shift(&bt_timethreshold, tc_precexp);
2019                bintime_shift(&bt_tickthreshold, tc_precexp);
2020        } else {
2021                tc_precexp = 31;
2022                bt_timethreshold.sec = INT_MAX;
2023                bt_timethreshold.frac = ~(uint64_t)0;
2024                bt_tickthreshold = bt_timethreshold;
2025        }
2026        sbt_timethreshold = bttosbt(bt_timethreshold);
2027        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2028}
2029#endif /* __rtems__ */
2030
2031#ifndef __rtems__
2032static int
2033sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2034{
2035        int error, val;
2036
2037        val = tc_timepercentage;
2038        error = sysctl_handle_int(oidp, &val, 0, req);
2039        if (error != 0 || req->newptr == NULL)
2040                return (error);
2041        tc_timepercentage = val;
2042        if (cold)
2043                goto done;
2044        tc_adjprecision();
2045done:
2046        return (0);
2047}
2048#endif /* __rtems__ */
2049
2050#ifndef __rtems__
2051static void
2052inittimecounter(void *dummy)
2053#else /* __rtems__ */
2054void
2055_Timecounter_Initialize(void)
2056#endif /* __rtems__ */
2057{
2058#ifndef __rtems__
2059        u_int p;
2060        int tick_rate;
2061
2062        /*
2063         * Set the initial timeout to
2064         * max(1, <approx. number of hardclock ticks in a millisecond>).
2065         * People should probably not use the sysctl to set the timeout
2066         * to smaller than its inital value, since that value is the
2067         * smallest reasonable one.  If they want better timestamps they
2068         * should use the non-"get"* functions.
2069         */
2070        if (hz > 1000)
2071                tc_tick = (hz + 500) / 1000;
2072        else
2073                tc_tick = 1;
2074        tc_adjprecision();
2075        FREQ2BT(hz, &tick_bt);
2076        tick_sbt = bttosbt(tick_bt);
2077        tick_rate = hz / tc_tick;
2078        FREQ2BT(tick_rate, &tc_tick_bt);
2079        tc_tick_sbt = bttosbt(tc_tick_bt);
2080        p = (tc_tick * 1000000) / hz;
2081        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2082#endif /* __rtems__ */
2083
2084#ifdef FFCLOCK
2085        ffclock_init();
2086#endif
2087        /* warm up new timecounter (again) and get rolling. */
2088        (void)timecounter->tc_get_timecount(timecounter);
2089        (void)timecounter->tc_get_timecount(timecounter);
2090        tc_windup();
2091}
2092
2093#ifndef __rtems__
2094SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2095#endif /* __rtems__ */
2096
2097#ifndef __rtems__
2098/* Cpu tick handling -------------------------------------------------*/
2099
2100static int cpu_tick_variable;
2101static uint64_t cpu_tick_frequency;
2102
2103static uint64_t
2104tc_cpu_ticks(void)
2105{
2106        static uint64_t base;
2107        static unsigned last;
2108        unsigned u;
2109        struct timecounter *tc;
2110
2111        tc = timehands->th_counter;
2112        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2113        if (u < last)
2114                base += (uint64_t)tc->tc_counter_mask + 1;
2115        last = u;
2116        return (u + base);
2117}
2118
2119void
2120cpu_tick_calibration(void)
2121{
2122        static time_t last_calib;
2123
2124        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2125                cpu_tick_calibrate(0);
2126                last_calib = time_uptime;
2127        }
2128}
2129
2130/*
2131 * This function gets called every 16 seconds on only one designated
2132 * CPU in the system from hardclock() via cpu_tick_calibration()().
2133 *
2134 * Whenever the real time clock is stepped we get called with reset=1
2135 * to make sure we handle suspend/resume and similar events correctly.
2136 */
2137
2138static void
2139cpu_tick_calibrate(int reset)
2140{
2141        static uint64_t c_last;
2142        uint64_t c_this, c_delta;
2143        static struct bintime  t_last;
2144        struct bintime t_this, t_delta;
2145        uint32_t divi;
2146
2147        if (reset) {
2148                /* The clock was stepped, abort & reset */
2149                t_last.sec = 0;
2150                return;
2151        }
2152
2153        /* we don't calibrate fixed rate cputicks */
2154        if (!cpu_tick_variable)
2155                return;
2156
2157        getbinuptime(&t_this);
2158        c_this = cpu_ticks();
2159        if (t_last.sec != 0) {
2160                c_delta = c_this - c_last;
2161                t_delta = t_this;
2162                bintime_sub(&t_delta, &t_last);
2163                /*
2164                 * Headroom:
2165                 *      2^(64-20) / 16[s] =
2166                 *      2^(44) / 16[s] =
2167                 *      17.592.186.044.416 / 16 =
2168                 *      1.099.511.627.776 [Hz]
2169                 */
2170                divi = t_delta.sec << 20;
2171                divi |= t_delta.frac >> (64 - 20);
2172                c_delta <<= 20;
2173                c_delta /= divi;
2174                if (c_delta > cpu_tick_frequency) {
2175                        if (0 && bootverbose)
2176                                printf("cpu_tick increased to %ju Hz\n",
2177                                    c_delta);
2178                        cpu_tick_frequency = c_delta;
2179                }
2180        }
2181        c_last = c_this;
2182        t_last = t_this;
2183}
2184
2185void
2186set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2187{
2188
2189        if (func == NULL) {
2190                cpu_ticks = tc_cpu_ticks;
2191        } else {
2192                cpu_tick_frequency = freq;
2193                cpu_tick_variable = var;
2194                cpu_ticks = func;
2195        }
2196}
2197
2198uint64_t
2199cpu_tickrate(void)
2200{
2201
2202        if (cpu_ticks == tc_cpu_ticks) 
2203                return (tc_getfrequency());
2204        return (cpu_tick_frequency);
2205}
2206
2207/*
2208 * We need to be slightly careful converting cputicks to microseconds.
2209 * There is plenty of margin in 64 bits of microseconds (half a million
2210 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2211 * before divide conversion (to retain precision) we find that the
2212 * margin shrinks to 1.5 hours (one millionth of 146y).
2213 * With a three prong approach we never lose significant bits, no
2214 * matter what the cputick rate and length of timeinterval is.
2215 */
2216
2217uint64_t
2218cputick2usec(uint64_t tick)
2219{
2220
2221        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2222                return (tick / (cpu_tickrate() / 1000000LL));
2223        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2224                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2225        else
2226                return ((tick * 1000000LL) / cpu_tickrate());
2227}
2228
2229cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2230#endif /* __rtems__ */
2231
2232#ifndef __rtems__
2233static int vdso_th_enable = 1;
2234static int
2235sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2236{
2237        int old_vdso_th_enable, error;
2238
2239        old_vdso_th_enable = vdso_th_enable;
2240        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2241        if (error != 0)
2242                return (error);
2243        vdso_th_enable = old_vdso_th_enable;
2244        return (0);
2245}
2246SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2247    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2248    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2249
2250uint32_t
2251tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2252{
2253        struct timehands *th;
2254        uint32_t enabled;
2255
2256        th = timehands;
2257        vdso_th->th_algo = VDSO_TH_ALGO_1;
2258        vdso_th->th_scale = th->th_scale;
2259        vdso_th->th_offset_count = th->th_offset_count;
2260        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2261        vdso_th->th_offset = th->th_offset;
2262        vdso_th->th_boottime = boottimebin;
2263        enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2264        if (!vdso_th_enable)
2265                enabled = 0;
2266        return (enabled);
2267}
2268#endif /* __rtems__ */
2269
2270#ifdef COMPAT_FREEBSD32
2271uint32_t
2272tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2273{
2274        struct timehands *th;
2275        uint32_t enabled;
2276
2277        th = timehands;
2278        vdso_th32->th_algo = VDSO_TH_ALGO_1;
2279        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2280        vdso_th32->th_offset_count = th->th_offset_count;
2281        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2282        vdso_th32->th_offset.sec = th->th_offset.sec;
2283        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2284        vdso_th32->th_boottime.sec = boottimebin.sec;
2285        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2286        enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2287        if (!vdso_th_enable)
2288                enabled = 0;
2289        return (enabled);
2290}
2291#endif
Note: See TracBrowser for help on using the repository browser.