source: rtems/cpukit/score/src/kern_tc.c @ 4cd742e

5
Last change on this file since 4cd742e was 4cd742e, checked in by Ngie Cooper <ngie@…>, on 11/02/15 at 03:14:37

timecounter: Merge FreeBSD change r290257

Define fhard in pps_event(..) only when PPS_SYNC is defined to mute an -Wunused-but-set-variable warning

Reported by: FreeBSD_HEAD_amd64_gcc4.9 jenkins job
Sponsored by: EMC / Isilon Storage Division

Update #3175.

  • Property mode set to 100644
File size: 59.6 KB
Line 
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
11 *
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
14 */
15
16#ifdef __rtems__
17#include <sys/lock.h>
18#define _KERNEL
19#define binuptime(_bt) _Timecounter_Binuptime(_bt)
20#define nanouptime(_tsp) _Timecounter_Nanouptime(_tsp)
21#define microuptime(_tvp) _Timecounter_Microuptime(_tvp)
22#define bintime(_bt) _Timecounter_Bintime(_bt)
23#define nanotime(_tsp) _Timecounter_Nanotime(_tsp)
24#define microtime(_tvp) _Timecounter_Microtime(_tvp)
25#define getbinuptime(_bt) _Timecounter_Getbinuptime(_bt)
26#define getnanouptime(_tsp) _Timecounter_Getnanouptime(_tsp)
27#define getmicrouptime(_tvp) _Timecounter_Getmicrouptime(_tvp)
28#define getbintime(_bt) _Timecounter_Getbintime(_bt)
29#define getnanotime(_tsp) _Timecounter_Getnanotime(_tsp)
30#define getmicrotime(_tvp) _Timecounter_Getmicrotime(_tvp)
31#define tc_init _Timecounter_Install
32#define timecounter _Timecounter
33#define time_second _Timecounter_Time_second
34#define time_uptime _Timecounter_Time_uptime
35#define boottimebin _Timecounter_Boottimebin
36#include <rtems/score/timecounterimpl.h>
37#include <rtems/score/atomic.h>
38#include <rtems/score/smp.h>
39#include <rtems/score/todimpl.h>
40#include <rtems/score/watchdogimpl.h>
41#endif /* __rtems__ */
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD r284178 2015-06-09T11:49:56Z$");
44
45#include "opt_compat.h"
46#include "opt_ntp.h"
47#include "opt_ffclock.h"
48
49#include <sys/param.h>
50#ifndef __rtems__
51#include <sys/kernel.h>
52#include <sys/limits.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/sbuf.h>
56#include <sys/sysctl.h>
57#include <sys/syslog.h>
58#include <sys/systm.h>
59#endif /* __rtems__ */
60#include <sys/timeffc.h>
61#include <sys/timepps.h>
62#include <sys/timetc.h>
63#include <sys/timex.h>
64#ifndef __rtems__
65#include <sys/vdso.h>
66#endif /* __rtems__ */
67#ifdef __rtems__
68#include <limits.h>
69#include <string.h>
70#include <rtems.h>
71ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
72#define _Timecounter_Release(lock_context) \
73  _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
74#define hz rtems_clock_get_ticks_per_second()
75#define printf(...)
76#define bcopy(x, y, z) memcpy(y, x, z);
77#define log(...)
78static inline int
79builtin_fls(int x)
80{
81        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
82}
83#define fls(x) builtin_fls(x)
84/* FIXME: https://devel.rtems.org/ticket/2348 */
85#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
86
87static inline void
88atomic_thread_fence_acq(void)
89{
90
91        _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
92}
93
94static inline void
95atomic_thread_fence_rel(void)
96{
97
98        _Atomic_Fence(ATOMIC_ORDER_RELEASE);
99}
100
101static inline u_int
102atomic_load_acq_int(Atomic_Uint *i)
103{
104
105        return (_Atomic_Load_uint(i, ATOMIC_ORDER_ACQUIRE));
106}
107
108static inline void
109atomic_store_rel_int(Atomic_Uint *i, u_int val)
110{
111
112        _Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
113}
114#endif /* __rtems__ */
115
116/*
117 * A large step happens on boot.  This constant detects such steps.
118 * It is relatively small so that ntp_update_second gets called enough
119 * in the typical 'missed a couple of seconds' case, but doesn't loop
120 * forever when the time step is large.
121 */
122#define LARGE_STEP      200
123
124/*
125 * Implement a dummy timecounter which we can use until we get a real one
126 * in the air.  This allows the console and other early stuff to use
127 * time services.
128 */
129
130static uint32_t
131dummy_get_timecount(struct timecounter *tc)
132{
133#ifndef __rtems__
134        static uint32_t now;
135
136        return (++now);
137#else /* __rtems__ */
138        return 0;
139#endif /* __rtems__ */
140}
141
142static struct timecounter dummy_timecounter = {
143        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
144};
145
146struct timehands {
147        /* These fields must be initialized by the driver. */
148        struct timecounter      *th_counter;
149        int64_t                 th_adjustment;
150        uint64_t                th_scale;
151        uint32_t                th_offset_count;
152        struct bintime          th_offset;
153        struct timeval          th_microtime;
154        struct timespec         th_nanotime;
155        /* Fields not to be copied in tc_windup start with th_generation. */
156#ifndef __rtems__
157        u_int                   th_generation;
158#else /* __rtems__ */
159        Atomic_Uint             th_generation;
160#endif /* __rtems__ */
161        struct timehands        *th_next;
162};
163
164#if defined(RTEMS_SMP)
165static struct timehands th0;
166static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
167static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
168static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
169static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
170static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
171static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
172static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
173static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
174static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
175#endif
176static struct timehands th0 = {
177        &dummy_timecounter,
178        0,
179        (uint64_t)-1 / 1000000,
180        0,
181        {1, 0},
182#ifndef __rtems__
183        {0, 0},
184        {0, 0},
185#else /* __rtems__ */
186        {TOD_SECONDS_1970_THROUGH_1988, 0},
187        {TOD_SECONDS_1970_THROUGH_1988, 0},
188#endif /* __rtems__ */
189        1,
190#if defined(RTEMS_SMP)
191        &th1
192#else
193        &th0
194#endif
195};
196
197static struct timehands *volatile timehands = &th0;
198struct timecounter *timecounter = &dummy_timecounter;
199static struct timecounter *timecounters = &dummy_timecounter;
200
201#ifndef __rtems__
202int tc_min_ticktock_freq = 1;
203#endif /* __rtems__ */
204
205#ifndef __rtems__
206volatile time_t time_second = 1;
207#else /* __rtems__ */
208volatile time_t time_second = TOD_SECONDS_1970_THROUGH_1988;
209#endif /* __rtems__ */
210volatile time_t time_uptime = 1;
211
212#ifndef __rtems__
213struct bintime boottimebin;
214#else /* __rtems__ */
215struct bintime boottimebin = {
216  .sec = TOD_SECONDS_1970_THROUGH_1988 - 1
217};
218#endif /* __rtems__ */
219#ifndef __rtems__
220struct timeval boottime;
221static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
222SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
223    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
224
225SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
226static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
227
228static int timestepwarnings;
229SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
230    &timestepwarnings, 0, "Log time steps");
231
232struct bintime bt_timethreshold;
233struct bintime bt_tickthreshold;
234sbintime_t sbt_timethreshold;
235sbintime_t sbt_tickthreshold;
236struct bintime tc_tick_bt;
237sbintime_t tc_tick_sbt;
238int tc_precexp;
239int tc_timepercentage = TC_DEFAULTPERC;
240static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
241SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
242    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
243    sysctl_kern_timecounter_adjprecision, "I",
244    "Allowed time interval deviation in percents");
245
246static int tc_chosen;   /* Non-zero if a specific tc was chosen via sysctl. */
247#endif /* __rtems__ */
248
249static void tc_windup(void);
250#ifndef __rtems__
251static void cpu_tick_calibrate(int);
252#else /* __rtems__ */
253static void _Timecounter_Windup(ISR_lock_Context *lock_context);
254#endif /* __rtems__ */
255
256void dtrace_getnanotime(struct timespec *tsp);
257
258#ifndef __rtems__
259static int
260sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
261{
262#ifndef __mips__
263#ifdef SCTL_MASK32
264        int tv[2];
265
266        if (req->flags & SCTL_MASK32) {
267                tv[0] = boottime.tv_sec;
268                tv[1] = boottime.tv_usec;
269                return SYSCTL_OUT(req, tv, sizeof(tv));
270        } else
271#endif
272#endif
273                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
274}
275
276static int
277sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
278{
279        uint32_t ncount;
280        struct timecounter *tc = arg1;
281
282        ncount = tc->tc_get_timecount(tc);
283        return sysctl_handle_int(oidp, &ncount, 0, req);
284}
285
286static int
287sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
288{
289        uint64_t freq;
290        struct timecounter *tc = arg1;
291
292        freq = tc->tc_frequency;
293        return sysctl_handle_64(oidp, &freq, 0, req);
294}
295#endif /* __rtems__ */
296
297/*
298 * Return the difference between the timehands' counter value now and what
299 * was when we copied it to the timehands' offset_count.
300 */
301static __inline uint32_t
302tc_delta(struct timehands *th)
303{
304        struct timecounter *tc;
305
306        tc = th->th_counter;
307        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
308            tc->tc_counter_mask);
309}
310
311/*
312 * Functions for reading the time.  We have to loop until we are sure that
313 * the timehands that we operated on was not updated under our feet.  See
314 * the comment in <sys/time.h> for a description of these 12 functions.
315 */
316
317#ifdef FFCLOCK
318void
319fbclock_binuptime(struct bintime *bt)
320{
321        struct timehands *th;
322        unsigned int gen;
323
324        do {
325                th = timehands;
326                gen = atomic_load_acq_int(&th->th_generation);
327                *bt = th->th_offset;
328                bintime_addx(bt, th->th_scale * tc_delta(th));
329                atomic_thread_fence_acq();
330        } while (gen == 0 || gen != th->th_generation);
331}
332
333void
334fbclock_nanouptime(struct timespec *tsp)
335{
336        struct bintime bt;
337
338        fbclock_binuptime(&bt);
339        bintime2timespec(&bt, tsp);
340}
341
342void
343fbclock_microuptime(struct timeval *tvp)
344{
345        struct bintime bt;
346
347        fbclock_binuptime(&bt);
348        bintime2timeval(&bt, tvp);
349}
350
351void
352fbclock_bintime(struct bintime *bt)
353{
354
355        fbclock_binuptime(bt);
356        bintime_add(bt, &boottimebin);
357}
358
359void
360fbclock_nanotime(struct timespec *tsp)
361{
362        struct bintime bt;
363
364        fbclock_bintime(&bt);
365        bintime2timespec(&bt, tsp);
366}
367
368void
369fbclock_microtime(struct timeval *tvp)
370{
371        struct bintime bt;
372
373        fbclock_bintime(&bt);
374        bintime2timeval(&bt, tvp);
375}
376
377void
378fbclock_getbinuptime(struct bintime *bt)
379{
380        struct timehands *th;
381        unsigned int gen;
382
383        do {
384                th = timehands;
385                gen = atomic_load_acq_int(&th->th_generation);
386                *bt = th->th_offset;
387                atomic_thread_fence_acq();
388        } while (gen == 0 || gen != th->th_generation);
389}
390
391void
392fbclock_getnanouptime(struct timespec *tsp)
393{
394        struct timehands *th;
395        unsigned int gen;
396
397        do {
398                th = timehands;
399                gen = atomic_load_acq_int(&th->th_generation);
400                bintime2timespec(&th->th_offset, tsp);
401                atomic_thread_fence_acq();
402        } while (gen == 0 || gen != th->th_generation);
403}
404
405void
406fbclock_getmicrouptime(struct timeval *tvp)
407{
408        struct timehands *th;
409        unsigned int gen;
410
411        do {
412                th = timehands;
413                gen = atomic_load_acq_int(&th->th_generation);
414                bintime2timeval(&th->th_offset, tvp);
415                atomic_thread_fence_acq();
416        } while (gen == 0 || gen != th->th_generation);
417}
418
419void
420fbclock_getbintime(struct bintime *bt)
421{
422        struct timehands *th;
423        unsigned int gen;
424
425        do {
426                th = timehands;
427                gen = atomic_load_acq_int(&th->th_generation);
428                *bt = th->th_offset;
429                atomic_thread_fence_acq();
430        } while (gen == 0 || gen != th->th_generation);
431        bintime_add(bt, &boottimebin);
432}
433
434void
435fbclock_getnanotime(struct timespec *tsp)
436{
437        struct timehands *th;
438        unsigned int gen;
439
440        do {
441                th = timehands;
442                gen = atomic_load_acq_int(&th->th_generation);
443                *tsp = th->th_nanotime;
444                atomic_thread_fence_acq();
445        } while (gen == 0 || gen != th->th_generation);
446}
447
448void
449fbclock_getmicrotime(struct timeval *tvp)
450{
451        struct timehands *th;
452        unsigned int gen;
453
454        do {
455                th = timehands;
456                gen = atomic_load_acq_int(&th->th_generation);
457                *tvp = th->th_microtime;
458                atomic_thread_fence_acq();
459        } while (gen == 0 || gen != th->th_generation);
460}
461#else /* !FFCLOCK */
462void
463binuptime(struct bintime *bt)
464{
465        struct timehands *th;
466        uint32_t gen;
467
468        do {
469                th = timehands;
470                gen = atomic_load_acq_int(&th->th_generation);
471                *bt = th->th_offset;
472                bintime_addx(bt, th->th_scale * tc_delta(th));
473                atomic_thread_fence_acq();
474        } while (gen == 0 || gen != th->th_generation);
475}
476#ifdef __rtems__
477sbintime_t
478_Timecounter_Sbinuptime(void)
479{
480        struct timehands *th;
481        uint32_t gen;
482        sbintime_t sbt;
483
484        do {
485                th = timehands;
486                gen = atomic_load_acq_int(&th->th_generation);
487                sbt = bttosbt(th->th_offset);
488                sbt += (th->th_scale * tc_delta(th)) >> 32;
489                atomic_thread_fence_acq();
490        } while (gen == 0 || gen != th->th_generation);
491
492        return (sbt);
493}
494#endif /* __rtems__ */
495
496void
497nanouptime(struct timespec *tsp)
498{
499        struct bintime bt;
500
501        binuptime(&bt);
502        bintime2timespec(&bt, tsp);
503}
504
505void
506microuptime(struct timeval *tvp)
507{
508        struct bintime bt;
509
510        binuptime(&bt);
511        bintime2timeval(&bt, tvp);
512}
513
514void
515bintime(struct bintime *bt)
516{
517
518        binuptime(bt);
519        bintime_add(bt, &boottimebin);
520}
521
522void
523nanotime(struct timespec *tsp)
524{
525        struct bintime bt;
526
527        bintime(&bt);
528        bintime2timespec(&bt, tsp);
529}
530
531void
532microtime(struct timeval *tvp)
533{
534        struct bintime bt;
535
536        bintime(&bt);
537        bintime2timeval(&bt, tvp);
538}
539
540void
541getbinuptime(struct bintime *bt)
542{
543        struct timehands *th;
544        uint32_t gen;
545
546        do {
547                th = timehands;
548                gen = atomic_load_acq_int(&th->th_generation);
549                *bt = th->th_offset;
550                atomic_thread_fence_acq();
551        } while (gen == 0 || gen != th->th_generation);
552}
553
554void
555getnanouptime(struct timespec *tsp)
556{
557        struct timehands *th;
558        uint32_t gen;
559
560        do {
561                th = timehands;
562                gen = atomic_load_acq_int(&th->th_generation);
563                bintime2timespec(&th->th_offset, tsp);
564                atomic_thread_fence_acq();
565        } while (gen == 0 || gen != th->th_generation);
566}
567
568void
569getmicrouptime(struct timeval *tvp)
570{
571        struct timehands *th;
572        uint32_t gen;
573
574        do {
575                th = timehands;
576                gen = atomic_load_acq_int(&th->th_generation);
577                bintime2timeval(&th->th_offset, tvp);
578                atomic_thread_fence_acq();
579        } while (gen == 0 || gen != th->th_generation);
580}
581
582void
583getbintime(struct bintime *bt)
584{
585        struct timehands *th;
586        uint32_t gen;
587
588        do {
589                th = timehands;
590                gen = atomic_load_acq_int(&th->th_generation);
591                *bt = th->th_offset;
592                atomic_thread_fence_acq();
593        } while (gen == 0 || gen != th->th_generation);
594        bintime_add(bt, &boottimebin);
595}
596
597void
598getnanotime(struct timespec *tsp)
599{
600        struct timehands *th;
601        uint32_t gen;
602
603        do {
604                th = timehands;
605                gen = atomic_load_acq_int(&th->th_generation);
606                *tsp = th->th_nanotime;
607                atomic_thread_fence_acq();
608        } while (gen == 0 || gen != th->th_generation);
609}
610
611void
612getmicrotime(struct timeval *tvp)
613{
614        struct timehands *th;
615        uint32_t gen;
616
617        do {
618                th = timehands;
619                gen = atomic_load_acq_int(&th->th_generation);
620                *tvp = th->th_microtime;
621                atomic_thread_fence_acq();
622        } while (gen == 0 || gen != th->th_generation);
623}
624#endif /* FFCLOCK */
625
626#ifdef FFCLOCK
627/*
628 * Support for feed-forward synchronization algorithms. This is heavily inspired
629 * by the timehands mechanism but kept independent from it. *_windup() functions
630 * have some connection to avoid accessing the timecounter hardware more than
631 * necessary.
632 */
633
634/* Feed-forward clock estimates kept updated by the synchronization daemon. */
635struct ffclock_estimate ffclock_estimate;
636struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
637uint32_t ffclock_status;                /* Feed-forward clock status. */
638int8_t ffclock_updated;                 /* New estimates are available. */
639struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
640
641struct fftimehands {
642        struct ffclock_estimate cest;
643        struct bintime          tick_time;
644        struct bintime          tick_time_lerp;
645        ffcounter               tick_ffcount;
646        uint64_t                period_lerp;
647        volatile uint8_t        gen;
648        struct fftimehands      *next;
649};
650
651#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
652
653static struct fftimehands ffth[10];
654static struct fftimehands *volatile fftimehands = ffth;
655
656static void
657ffclock_init(void)
658{
659        struct fftimehands *cur;
660        struct fftimehands *last;
661
662        memset(ffth, 0, sizeof(ffth));
663
664        last = ffth + NUM_ELEMENTS(ffth) - 1;
665        for (cur = ffth; cur < last; cur++)
666                cur->next = cur + 1;
667        last->next = ffth;
668
669        ffclock_updated = 0;
670        ffclock_status = FFCLOCK_STA_UNSYNC;
671        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
672}
673
674/*
675 * Reset the feed-forward clock estimates. Called from inittodr() to get things
676 * kick started and uses the timecounter nominal frequency as a first period
677 * estimate. Note: this function may be called several time just after boot.
678 * Note: this is the only function that sets the value of boot time for the
679 * monotonic (i.e. uptime) version of the feed-forward clock.
680 */
681void
682ffclock_reset_clock(struct timespec *ts)
683{
684        struct timecounter *tc;
685        struct ffclock_estimate cest;
686
687        tc = timehands->th_counter;
688        memset(&cest, 0, sizeof(struct ffclock_estimate));
689
690        timespec2bintime(ts, &ffclock_boottime);
691        timespec2bintime(ts, &(cest.update_time));
692        ffclock_read_counter(&cest.update_ffcount);
693        cest.leapsec_next = 0;
694        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
695        cest.errb_abs = 0;
696        cest.errb_rate = 0;
697        cest.status = FFCLOCK_STA_UNSYNC;
698        cest.leapsec_total = 0;
699        cest.leapsec = 0;
700
701        mtx_lock(&ffclock_mtx);
702        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
703        ffclock_updated = INT8_MAX;
704        mtx_unlock(&ffclock_mtx);
705
706        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
707            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
708            (unsigned long)ts->tv_nsec);
709}
710
711/*
712 * Sub-routine to convert a time interval measured in RAW counter units to time
713 * in seconds stored in bintime format.
714 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
715 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
716 * extra cycles.
717 */
718static void
719ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
720{
721        struct bintime bt2;
722        ffcounter delta, delta_max;
723
724        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
725        bintime_clear(bt);
726        do {
727                if (ffdelta > delta_max)
728                        delta = delta_max;
729                else
730                        delta = ffdelta;
731                bt2.sec = 0;
732                bt2.frac = period;
733                bintime_mul(&bt2, (unsigned int)delta);
734                bintime_add(bt, &bt2);
735                ffdelta -= delta;
736        } while (ffdelta > 0);
737}
738
739/*
740 * Update the fftimehands.
741 * Push the tick ffcount and time(s) forward based on current clock estimate.
742 * The conversion from ffcounter to bintime relies on the difference clock
743 * principle, whose accuracy relies on computing small time intervals. If a new
744 * clock estimate has been passed by the synchronisation daemon, make it
745 * current, and compute the linear interpolation for monotonic time if needed.
746 */
747static void
748ffclock_windup(unsigned int delta)
749{
750        struct ffclock_estimate *cest;
751        struct fftimehands *ffth;
752        struct bintime bt, gap_lerp;
753        ffcounter ffdelta;
754        uint64_t frac;
755        unsigned int polling;
756        uint8_t forward_jump, ogen;
757
758        /*
759         * Pick the next timehand, copy current ffclock estimates and move tick
760         * times and counter forward.
761         */
762        forward_jump = 0;
763        ffth = fftimehands->next;
764        ogen = ffth->gen;
765        ffth->gen = 0;
766        cest = &ffth->cest;
767        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
768        ffdelta = (ffcounter)delta;
769        ffth->period_lerp = fftimehands->period_lerp;
770
771        ffth->tick_time = fftimehands->tick_time;
772        ffclock_convert_delta(ffdelta, cest->period, &bt);
773        bintime_add(&ffth->tick_time, &bt);
774
775        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
776        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
777        bintime_add(&ffth->tick_time_lerp, &bt);
778
779        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
780
781        /*
782         * Assess the status of the clock, if the last update is too old, it is
783         * likely the synchronisation daemon is dead and the clock is free
784         * running.
785         */
786        if (ffclock_updated == 0) {
787                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
788                ffclock_convert_delta(ffdelta, cest->period, &bt);
789                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
790                        ffclock_status |= FFCLOCK_STA_UNSYNC;
791        }
792
793        /*
794         * If available, grab updated clock estimates and make them current.
795         * Recompute time at this tick using the updated estimates. The clock
796         * estimates passed the feed-forward synchronisation daemon may result
797         * in time conversion that is not monotonically increasing (just after
798         * the update). time_lerp is a particular linear interpolation over the
799         * synchronisation algo polling period that ensures monotonicity for the
800         * clock ids requesting it.
801         */
802        if (ffclock_updated > 0) {
803                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
804                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
805                ffth->tick_time = cest->update_time;
806                ffclock_convert_delta(ffdelta, cest->period, &bt);
807                bintime_add(&ffth->tick_time, &bt);
808
809                /* ffclock_reset sets ffclock_updated to INT8_MAX */
810                if (ffclock_updated == INT8_MAX)
811                        ffth->tick_time_lerp = ffth->tick_time;
812
813                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
814                        forward_jump = 1;
815                else
816                        forward_jump = 0;
817
818                bintime_clear(&gap_lerp);
819                if (forward_jump) {
820                        gap_lerp = ffth->tick_time;
821                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
822                } else {
823                        gap_lerp = ffth->tick_time_lerp;
824                        bintime_sub(&gap_lerp, &ffth->tick_time);
825                }
826
827                /*
828                 * The reset from the RTC clock may be far from accurate, and
829                 * reducing the gap between real time and interpolated time
830                 * could take a very long time if the interpolated clock insists
831                 * on strict monotonicity. The clock is reset under very strict
832                 * conditions (kernel time is known to be wrong and
833                 * synchronization daemon has been restarted recently.
834                 * ffclock_boottime absorbs the jump to ensure boot time is
835                 * correct and uptime functions stay consistent.
836                 */
837                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
838                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
839                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
840                        if (forward_jump)
841                                bintime_add(&ffclock_boottime, &gap_lerp);
842                        else
843                                bintime_sub(&ffclock_boottime, &gap_lerp);
844                        ffth->tick_time_lerp = ffth->tick_time;
845                        bintime_clear(&gap_lerp);
846                }
847
848                ffclock_status = cest->status;
849                ffth->period_lerp = cest->period;
850
851                /*
852                 * Compute corrected period used for the linear interpolation of
853                 * time. The rate of linear interpolation is capped to 5000PPM
854                 * (5ms/s).
855                 */
856                if (bintime_isset(&gap_lerp)) {
857                        ffdelta = cest->update_ffcount;
858                        ffdelta -= fftimehands->cest.update_ffcount;
859                        ffclock_convert_delta(ffdelta, cest->period, &bt);
860                        polling = bt.sec;
861                        bt.sec = 0;
862                        bt.frac = 5000000 * (uint64_t)18446744073LL;
863                        bintime_mul(&bt, polling);
864                        if (bintime_cmp(&gap_lerp, &bt, >))
865                                gap_lerp = bt;
866
867                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
868                        frac = 0;
869                        if (gap_lerp.sec > 0) {
870                                frac -= 1;
871                                frac /= ffdelta / gap_lerp.sec;
872                        }
873                        frac += gap_lerp.frac / ffdelta;
874
875                        if (forward_jump)
876                                ffth->period_lerp += frac;
877                        else
878                                ffth->period_lerp -= frac;
879                }
880
881                ffclock_updated = 0;
882        }
883        if (++ogen == 0)
884                ogen = 1;
885        ffth->gen = ogen;
886        fftimehands = ffth;
887}
888
889/*
890 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
891 * the old and new hardware counter cannot be read simultaneously. tc_windup()
892 * does read the two counters 'back to back', but a few cycles are effectively
893 * lost, and not accumulated in tick_ffcount. This is a fairly radical
894 * operation for a feed-forward synchronization daemon, and it is its job to not
895 * pushing irrelevant data to the kernel. Because there is no locking here,
896 * simply force to ignore pending or next update to give daemon a chance to
897 * realize the counter has changed.
898 */
899static void
900ffclock_change_tc(struct timehands *th)
901{
902        struct fftimehands *ffth;
903        struct ffclock_estimate *cest;
904        struct timecounter *tc;
905        uint8_t ogen;
906
907        tc = th->th_counter;
908        ffth = fftimehands->next;
909        ogen = ffth->gen;
910        ffth->gen = 0;
911
912        cest = &ffth->cest;
913        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
914        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
915        cest->errb_abs = 0;
916        cest->errb_rate = 0;
917        cest->status |= FFCLOCK_STA_UNSYNC;
918
919        ffth->tick_ffcount = fftimehands->tick_ffcount;
920        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
921        ffth->tick_time = fftimehands->tick_time;
922        ffth->period_lerp = cest->period;
923
924        /* Do not lock but ignore next update from synchronization daemon. */
925        ffclock_updated--;
926
927        if (++ogen == 0)
928                ogen = 1;
929        ffth->gen = ogen;
930        fftimehands = ffth;
931}
932
933/*
934 * Retrieve feed-forward counter and time of last kernel tick.
935 */
936void
937ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
938{
939        struct fftimehands *ffth;
940        uint8_t gen;
941
942        /*
943         * No locking but check generation has not changed. Also need to make
944         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
945         */
946        do {
947                ffth = fftimehands;
948                gen = ffth->gen;
949                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
950                        *bt = ffth->tick_time_lerp;
951                else
952                        *bt = ffth->tick_time;
953                *ffcount = ffth->tick_ffcount;
954        } while (gen == 0 || gen != ffth->gen);
955}
956
957/*
958 * Absolute clock conversion. Low level function to convert ffcounter to
959 * bintime. The ffcounter is converted using the current ffclock period estimate
960 * or the "interpolated period" to ensure monotonicity.
961 * NOTE: this conversion may have been deferred, and the clock updated since the
962 * hardware counter has been read.
963 */
964void
965ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
966{
967        struct fftimehands *ffth;
968        struct bintime bt2;
969        ffcounter ffdelta;
970        uint8_t gen;
971
972        /*
973         * No locking but check generation has not changed. Also need to make
974         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
975         */
976        do {
977                ffth = fftimehands;
978                gen = ffth->gen;
979                if (ffcount > ffth->tick_ffcount)
980                        ffdelta = ffcount - ffth->tick_ffcount;
981                else
982                        ffdelta = ffth->tick_ffcount - ffcount;
983
984                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
985                        *bt = ffth->tick_time_lerp;
986                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
987                } else {
988                        *bt = ffth->tick_time;
989                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
990                }
991
992                if (ffcount > ffth->tick_ffcount)
993                        bintime_add(bt, &bt2);
994                else
995                        bintime_sub(bt, &bt2);
996        } while (gen == 0 || gen != ffth->gen);
997}
998
999/*
1000 * Difference clock conversion.
1001 * Low level function to Convert a time interval measured in RAW counter units
1002 * into bintime. The difference clock allows measuring small intervals much more
1003 * reliably than the absolute clock.
1004 */
1005void
1006ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
1007{
1008        struct fftimehands *ffth;
1009        uint8_t gen;
1010
1011        /* No locking but check generation has not changed. */
1012        do {
1013                ffth = fftimehands;
1014                gen = ffth->gen;
1015                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
1016        } while (gen == 0 || gen != ffth->gen);
1017}
1018
1019/*
1020 * Access to current ffcounter value.
1021 */
1022void
1023ffclock_read_counter(ffcounter *ffcount)
1024{
1025        struct timehands *th;
1026        struct fftimehands *ffth;
1027        unsigned int gen, delta;
1028
1029        /*
1030         * ffclock_windup() called from tc_windup(), safe to rely on
1031         * th->th_generation only, for correct delta and ffcounter.
1032         */
1033        do {
1034                th = timehands;
1035                gen = atomic_load_acq_int(&th->th_generation);
1036                ffth = fftimehands;
1037                delta = tc_delta(th);
1038                *ffcount = ffth->tick_ffcount;
1039                atomic_thread_fence_acq();
1040        } while (gen == 0 || gen != th->th_generation);
1041
1042        *ffcount += delta;
1043}
1044
1045void
1046binuptime(struct bintime *bt)
1047{
1048
1049        binuptime_fromclock(bt, sysclock_active);
1050}
1051
1052void
1053nanouptime(struct timespec *tsp)
1054{
1055
1056        nanouptime_fromclock(tsp, sysclock_active);
1057}
1058
1059void
1060microuptime(struct timeval *tvp)
1061{
1062
1063        microuptime_fromclock(tvp, sysclock_active);
1064}
1065
1066void
1067bintime(struct bintime *bt)
1068{
1069
1070        bintime_fromclock(bt, sysclock_active);
1071}
1072
1073void
1074nanotime(struct timespec *tsp)
1075{
1076
1077        nanotime_fromclock(tsp, sysclock_active);
1078}
1079
1080void
1081microtime(struct timeval *tvp)
1082{
1083
1084        microtime_fromclock(tvp, sysclock_active);
1085}
1086
1087void
1088getbinuptime(struct bintime *bt)
1089{
1090
1091        getbinuptime_fromclock(bt, sysclock_active);
1092}
1093
1094void
1095getnanouptime(struct timespec *tsp)
1096{
1097
1098        getnanouptime_fromclock(tsp, sysclock_active);
1099}
1100
1101void
1102getmicrouptime(struct timeval *tvp)
1103{
1104
1105        getmicrouptime_fromclock(tvp, sysclock_active);
1106}
1107
1108void
1109getbintime(struct bintime *bt)
1110{
1111
1112        getbintime_fromclock(bt, sysclock_active);
1113}
1114
1115void
1116getnanotime(struct timespec *tsp)
1117{
1118
1119        getnanotime_fromclock(tsp, sysclock_active);
1120}
1121
1122void
1123getmicrotime(struct timeval *tvp)
1124{
1125
1126        getmicrouptime_fromclock(tvp, sysclock_active);
1127}
1128
1129#endif /* FFCLOCK */
1130
1131#ifndef __rtems__
1132/*
1133 * This is a clone of getnanotime and used for walltimestamps.
1134 * The dtrace_ prefix prevents fbt from creating probes for
1135 * it so walltimestamp can be safely used in all fbt probes.
1136 */
1137void
1138dtrace_getnanotime(struct timespec *tsp)
1139{
1140        struct timehands *th;
1141        uint32_t gen;
1142
1143        do {
1144                th = timehands;
1145                gen = atomic_load_acq_int(&th->th_generation);
1146                *tsp = th->th_nanotime;
1147                atomic_thread_fence_acq();
1148        } while (gen == 0 || gen != th->th_generation);
1149}
1150#endif /* __rtems__ */
1151
1152#ifdef FFCLOCK
1153/*
1154 * System clock currently providing time to the system. Modifiable via sysctl
1155 * when the FFCLOCK option is defined.
1156 */
1157int sysclock_active = SYSCLOCK_FBCK;
1158#endif
1159
1160/* Internal NTP status and error estimates. */
1161extern int time_status;
1162extern long time_esterror;
1163
1164#ifndef __rtems__
1165/*
1166 * Take a snapshot of sysclock data which can be used to compare system clocks
1167 * and generate timestamps after the fact.
1168 */
1169void
1170sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1171{
1172        struct fbclock_info *fbi;
1173        struct timehands *th;
1174        struct bintime bt;
1175        unsigned int delta, gen;
1176#ifdef FFCLOCK
1177        ffcounter ffcount;
1178        struct fftimehands *ffth;
1179        struct ffclock_info *ffi;
1180        struct ffclock_estimate cest;
1181
1182        ffi = &clock_snap->ff_info;
1183#endif
1184
1185        fbi = &clock_snap->fb_info;
1186        delta = 0;
1187
1188        do {
1189                th = timehands;
1190                gen = atomic_load_acq_int(&th->th_generation);
1191                fbi->th_scale = th->th_scale;
1192                fbi->tick_time = th->th_offset;
1193#ifdef FFCLOCK
1194                ffth = fftimehands;
1195                ffi->tick_time = ffth->tick_time_lerp;
1196                ffi->tick_time_lerp = ffth->tick_time_lerp;
1197                ffi->period = ffth->cest.period;
1198                ffi->period_lerp = ffth->period_lerp;
1199                clock_snap->ffcount = ffth->tick_ffcount;
1200                cest = ffth->cest;
1201#endif
1202                if (!fast)
1203                        delta = tc_delta(th);
1204                atomic_thread_fence_acq();
1205        } while (gen == 0 || gen != th->th_generation);
1206
1207        clock_snap->delta = delta;
1208#ifdef FFCLOCK
1209        clock_snap->sysclock_active = sysclock_active;
1210#endif
1211
1212        /* Record feedback clock status and error. */
1213        clock_snap->fb_info.status = time_status;
1214        /* XXX: Very crude estimate of feedback clock error. */
1215        bt.sec = time_esterror / 1000000;
1216        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1217            (uint64_t)18446744073709ULL;
1218        clock_snap->fb_info.error = bt;
1219
1220#ifdef FFCLOCK
1221        if (!fast)
1222                clock_snap->ffcount += delta;
1223
1224        /* Record feed-forward clock leap second adjustment. */
1225        ffi->leapsec_adjustment = cest.leapsec_total;
1226        if (clock_snap->ffcount > cest.leapsec_next)
1227                ffi->leapsec_adjustment -= cest.leapsec;
1228
1229        /* Record feed-forward clock status and error. */
1230        clock_snap->ff_info.status = cest.status;
1231        ffcount = clock_snap->ffcount - cest.update_ffcount;
1232        ffclock_convert_delta(ffcount, cest.period, &bt);
1233        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1234        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1235        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1236        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1237        clock_snap->ff_info.error = bt;
1238#endif
1239}
1240
1241/*
1242 * Convert a sysclock snapshot into a struct bintime based on the specified
1243 * clock source and flags.
1244 */
1245int
1246sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1247    int whichclock, uint32_t flags)
1248{
1249#ifdef FFCLOCK
1250        struct bintime bt2;
1251        uint64_t period;
1252#endif
1253
1254        switch (whichclock) {
1255        case SYSCLOCK_FBCK:
1256                *bt = cs->fb_info.tick_time;
1257
1258                /* If snapshot was created with !fast, delta will be >0. */
1259                if (cs->delta > 0)
1260                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1261
1262                if ((flags & FBCLOCK_UPTIME) == 0)
1263                        bintime_add(bt, &boottimebin);
1264                break;
1265#ifdef FFCLOCK
1266        case SYSCLOCK_FFWD:
1267                if (flags & FFCLOCK_LERP) {
1268                        *bt = cs->ff_info.tick_time_lerp;
1269                        period = cs->ff_info.period_lerp;
1270                } else {
1271                        *bt = cs->ff_info.tick_time;
1272                        period = cs->ff_info.period;
1273                }
1274
1275                /* If snapshot was created with !fast, delta will be >0. */
1276                if (cs->delta > 0) {
1277                        ffclock_convert_delta(cs->delta, period, &bt2);
1278                        bintime_add(bt, &bt2);
1279                }
1280
1281                /* Leap second adjustment. */
1282                if (flags & FFCLOCK_LEAPSEC)
1283                        bt->sec -= cs->ff_info.leapsec_adjustment;
1284
1285                /* Boot time adjustment, for uptime/monotonic clocks. */
1286                if (flags & FFCLOCK_UPTIME)
1287                        bintime_sub(bt, &ffclock_boottime);
1288                break;
1289#endif
1290        default:
1291                return (EINVAL);
1292                break;
1293        }
1294
1295        return (0);
1296}
1297#endif /* __rtems__ */
1298
1299/*
1300 * Initialize a new timecounter and possibly use it.
1301 */
1302void
1303tc_init(struct timecounter *tc)
1304{
1305#ifndef __rtems__
1306        uint32_t u;
1307        struct sysctl_oid *tc_root;
1308
1309        u = tc->tc_frequency / tc->tc_counter_mask;
1310        /* XXX: We need some margin here, 10% is a guess */
1311        u *= 11;
1312        u /= 10;
1313        if (u > hz && tc->tc_quality >= 0) {
1314                tc->tc_quality = -2000;
1315                if (bootverbose) {
1316                        printf("Timecounter \"%s\" frequency %ju Hz",
1317                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1318                        printf(" -- Insufficient hz, needs at least %u\n", u);
1319                }
1320        } else if (tc->tc_quality >= 0 || bootverbose) {
1321                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1322                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1323                    tc->tc_quality);
1324        }
1325#endif /* __rtems__ */
1326
1327        tc->tc_next = timecounters;
1328        timecounters = tc;
1329#ifndef __rtems__
1330        /*
1331         * Set up sysctl tree for this counter.
1332         */
1333        tc_root = SYSCTL_ADD_NODE(NULL,
1334            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1335            CTLFLAG_RW, 0, "timecounter description");
1336        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1337            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1338            "mask for implemented bits");
1339        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1340            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1341            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1342        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1343            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1344             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1345        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1346            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1347            "goodness of time counter");
1348        /*
1349         * Do not automatically switch if the current tc was specifically
1350         * chosen.  Never automatically use a timecounter with negative quality.
1351         * Even though we run on the dummy counter, switching here may be
1352         * worse since this timecounter may not be monotonic.
1353         */
1354        if (tc_chosen)
1355                return;
1356        if (tc->tc_quality < 0)
1357                return;
1358        if (tc->tc_quality < timecounter->tc_quality)
1359                return;
1360        if (tc->tc_quality == timecounter->tc_quality &&
1361            tc->tc_frequency < timecounter->tc_frequency)
1362                return;
1363#endif /* __rtems__ */
1364        (void)tc->tc_get_timecount(tc);
1365        (void)tc->tc_get_timecount(tc);
1366        timecounter = tc;
1367#ifdef __rtems__
1368        tc_windup();
1369#endif /* __rtems__ */
1370}
1371
1372#ifndef __rtems__
1373/* Report the frequency of the current timecounter. */
1374uint64_t
1375tc_getfrequency(void)
1376{
1377
1378        return (timehands->th_counter->tc_frequency);
1379}
1380#endif /* __rtems__ */
1381
1382/*
1383 * Step our concept of UTC.  This is done by modifying our estimate of
1384 * when we booted.
1385 * XXX: not locked.
1386 */
1387void
1388#ifndef __rtems__
1389tc_setclock(struct timespec *ts)
1390#else /* __rtems__ */
1391_Timecounter_Set_clock(const struct bintime *_bt,
1392    ISR_lock_Context *lock_context)
1393#endif /* __rtems__ */
1394{
1395#ifndef __rtems__
1396        struct timespec tbef, taft;
1397#endif /* __rtems__ */
1398        struct bintime bt, bt2;
1399
1400#ifndef __rtems__
1401        cpu_tick_calibrate(1);
1402        nanotime(&tbef);
1403        timespec2bintime(ts, &bt);
1404#else /* __rtems__ */
1405        bt = *_bt;
1406#endif /* __rtems__ */
1407        binuptime(&bt2);
1408        bintime_sub(&bt, &bt2);
1409        bintime_add(&bt2, &boottimebin);
1410        boottimebin = bt;
1411#ifndef __rtems__
1412        bintime2timeval(&bt, &boottime);
1413
1414        /* XXX fiddle all the little crinkly bits around the fiords... */
1415        tc_windup();
1416        nanotime(&taft);
1417        if (timestepwarnings) {
1418                log(LOG_INFO,
1419                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1420                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1421                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1422                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1423        }
1424        cpu_tick_calibrate(1);
1425#else /* __rtems__ */
1426        _Timecounter_Windup(lock_context);
1427#endif /* __rtems__ */
1428}
1429
1430/*
1431 * Initialize the next struct timehands in the ring and make
1432 * it the active timehands.  Along the way we might switch to a different
1433 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1434 */
1435static void
1436tc_windup(void)
1437#ifdef __rtems__
1438{
1439        ISR_lock_Context lock_context;
1440
1441        _Timecounter_Acquire(&lock_context);
1442        _Timecounter_Windup(&lock_context);
1443}
1444
1445static void
1446_Timecounter_Windup(ISR_lock_Context *lock_context)
1447#endif /* __rtems__ */
1448{
1449        struct bintime bt;
1450        struct timehands *th, *tho;
1451        uint64_t scale;
1452        uint32_t delta, ncount, ogen;
1453        int i;
1454        time_t t;
1455
1456        /*
1457         * Make the next timehands a copy of the current one, but do
1458         * not overwrite the generation or next pointer.  While we
1459         * update the contents, the generation must be zero.  We need
1460         * to ensure that the zero generation is visible before the
1461         * data updates become visible, which requires release fence.
1462         * For similar reasons, re-reading of the generation after the
1463         * data is read should use acquire fence.
1464         */
1465        tho = timehands;
1466#if defined(RTEMS_SMP)
1467        th = tho->th_next;
1468#else
1469        th = tho;
1470#endif
1471        ogen = th->th_generation;
1472        th->th_generation = 0;
1473        atomic_thread_fence_rel();
1474#if defined(RTEMS_SMP)
1475        bcopy(tho, th, offsetof(struct timehands, th_generation));
1476#endif
1477
1478        /*
1479         * Capture a timecounter delta on the current timecounter and if
1480         * changing timecounters, a counter value from the new timecounter.
1481         * Update the offset fields accordingly.
1482         */
1483        delta = tc_delta(th);
1484        if (th->th_counter != timecounter)
1485                ncount = timecounter->tc_get_timecount(timecounter);
1486        else
1487                ncount = 0;
1488#ifdef FFCLOCK
1489        ffclock_windup(delta);
1490#endif
1491        th->th_offset_count += delta;
1492        th->th_offset_count &= th->th_counter->tc_counter_mask;
1493        while (delta > th->th_counter->tc_frequency) {
1494                /* Eat complete unadjusted seconds. */
1495                delta -= th->th_counter->tc_frequency;
1496                th->th_offset.sec++;
1497        }
1498        if ((delta > th->th_counter->tc_frequency / 2) &&
1499            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1500                /* The product th_scale * delta just barely overflows. */
1501                th->th_offset.sec++;
1502        }
1503        bintime_addx(&th->th_offset, th->th_scale * delta);
1504
1505        /*
1506         * Hardware latching timecounters may not generate interrupts on
1507         * PPS events, so instead we poll them.  There is a finite risk that
1508         * the hardware might capture a count which is later than the one we
1509         * got above, and therefore possibly in the next NTP second which might
1510         * have a different rate than the current NTP second.  It doesn't
1511         * matter in practice.
1512         */
1513        if (tho->th_counter->tc_poll_pps)
1514                tho->th_counter->tc_poll_pps(tho->th_counter);
1515
1516        /*
1517         * Deal with NTP second processing.  The for loop normally
1518         * iterates at most once, but in extreme situations it might
1519         * keep NTP sane if timeouts are not run for several seconds.
1520         * At boot, the time step can be large when the TOD hardware
1521         * has been read, so on really large steps, we call
1522         * ntp_update_second only twice.  We need to call it twice in
1523         * case we missed a leap second.
1524         */
1525        bt = th->th_offset;
1526        bintime_add(&bt, &boottimebin);
1527        i = bt.sec - tho->th_microtime.tv_sec;
1528        if (i > LARGE_STEP)
1529                i = 2;
1530        for (; i > 0; i--) {
1531                t = bt.sec;
1532                ntp_update_second(&th->th_adjustment, &bt.sec);
1533                if (bt.sec != t)
1534                        boottimebin.sec += bt.sec - t;
1535        }
1536        /* Update the UTC timestamps used by the get*() functions. */
1537        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1538        bintime2timeval(&bt, &th->th_microtime);
1539        bintime2timespec(&bt, &th->th_nanotime);
1540
1541        /* Now is a good time to change timecounters. */
1542        if (th->th_counter != timecounter) {
1543#ifndef __rtems__
1544#ifndef __arm__
1545                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1546                        cpu_disable_c2_sleep++;
1547                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1548                        cpu_disable_c2_sleep--;
1549#endif
1550#endif /* __rtems__ */
1551                th->th_counter = timecounter;
1552                th->th_offset_count = ncount;
1553#ifndef __rtems__
1554                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1555                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1556#endif /* __rtems__ */
1557#ifdef FFCLOCK
1558                ffclock_change_tc(th);
1559#endif
1560        }
1561
1562        /*-
1563         * Recalculate the scaling factor.  We want the number of 1/2^64
1564         * fractions of a second per period of the hardware counter, taking
1565         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1566         * processing provides us with.
1567         *
1568         * The th_adjustment is nanoseconds per second with 32 bit binary
1569         * fraction and we want 64 bit binary fraction of second:
1570         *
1571         *       x = a * 2^32 / 10^9 = a * 4.294967296
1572         *
1573         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1574         * we can only multiply by about 850 without overflowing, that
1575         * leaves no suitably precise fractions for multiply before divide.
1576         *
1577         * Divide before multiply with a fraction of 2199/512 results in a
1578         * systematic undercompensation of 10PPM of th_adjustment.  On a
1579         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1580         *
1581         * We happily sacrifice the lowest of the 64 bits of our result
1582         * to the goddess of code clarity.
1583         *
1584         */
1585        scale = (uint64_t)1 << 63;
1586        scale += (th->th_adjustment / 1024) * 2199;
1587        scale /= th->th_counter->tc_frequency;
1588        th->th_scale = scale * 2;
1589
1590        /*
1591         * Now that the struct timehands is again consistent, set the new
1592         * generation number, making sure to not make it zero.
1593         */
1594        if (++ogen == 0)
1595                ogen = 1;
1596        atomic_store_rel_int(&th->th_generation, ogen);
1597
1598        /* Go live with the new struct timehands. */
1599#ifdef FFCLOCK
1600        switch (sysclock_active) {
1601        case SYSCLOCK_FBCK:
1602#endif
1603                time_second = th->th_microtime.tv_sec;
1604                time_uptime = th->th_offset.sec;
1605#ifdef FFCLOCK
1606                break;
1607        case SYSCLOCK_FFWD:
1608                time_second = fftimehands->tick_time_lerp.sec;
1609                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1610                break;
1611        }
1612#endif
1613
1614#if defined(RTEMS_SMP)
1615        timehands = th;
1616#endif
1617#ifndef __rtems__
1618        timekeep_push_vdso();
1619#endif /* __rtems__ */
1620#ifdef __rtems__
1621        _Timecounter_Release(lock_context);
1622#endif /* __rtems__ */
1623}
1624
1625#ifndef __rtems__
1626/* Report or change the active timecounter hardware. */
1627static int
1628sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1629{
1630        char newname[32];
1631        struct timecounter *newtc, *tc;
1632        int error;
1633
1634        tc = timecounter;
1635        strlcpy(newname, tc->tc_name, sizeof(newname));
1636
1637        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1638        if (error != 0 || req->newptr == NULL)
1639                return (error);
1640        /* Record that the tc in use now was specifically chosen. */
1641        tc_chosen = 1;
1642        if (strcmp(newname, tc->tc_name) == 0)
1643                return (0);
1644        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1645                if (strcmp(newname, newtc->tc_name) != 0)
1646                        continue;
1647
1648                /* Warm up new timecounter. */
1649                (void)newtc->tc_get_timecount(newtc);
1650                (void)newtc->tc_get_timecount(newtc);
1651
1652                timecounter = newtc;
1653
1654                /*
1655                 * The vdso timehands update is deferred until the next
1656                 * 'tc_windup()'.
1657                 *
1658                 * This is prudent given that 'timekeep_push_vdso()' does not
1659                 * use any locking and that it can be called in hard interrupt
1660                 * context via 'tc_windup()'.
1661                 */
1662                return (0);
1663        }
1664        return (EINVAL);
1665}
1666
1667SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1668    0, 0, sysctl_kern_timecounter_hardware, "A",
1669    "Timecounter hardware selected");
1670
1671
1672/* Report the available timecounter hardware. */
1673static int
1674sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1675{
1676        struct sbuf sb;
1677        struct timecounter *tc;
1678        int error;
1679
1680        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1681        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1682                if (tc != timecounters)
1683                        sbuf_putc(&sb, ' ');
1684                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1685        }
1686        error = sbuf_finish(&sb);
1687        sbuf_delete(&sb);
1688        return (error);
1689}
1690
1691SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1692    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1693#endif /* __rtems__ */
1694
1695#ifndef __rtems__
1696/*
1697 * RFC 2783 PPS-API implementation.
1698 */
1699
1700/*
1701 *  Return true if the driver is aware of the abi version extensions in the
1702 *  pps_state structure, and it supports at least the given abi version number.
1703 */
1704static inline int
1705abi_aware(struct pps_state *pps, int vers)
1706{
1707
1708        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1709}
1710
1711static int
1712pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1713{
1714        int err, timo;
1715        pps_seq_t aseq, cseq;
1716        struct timeval tv;
1717
1718        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1719                return (EINVAL);
1720
1721        /*
1722         * If no timeout is requested, immediately return whatever values were
1723         * most recently captured.  If timeout seconds is -1, that's a request
1724         * to block without a timeout.  WITNESS won't let us sleep forever
1725         * without a lock (we really don't need a lock), so just repeatedly
1726         * sleep a long time.
1727         */
1728        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1729                if (fapi->timeout.tv_sec == -1)
1730                        timo = 0x7fffffff;
1731                else {
1732                        tv.tv_sec = fapi->timeout.tv_sec;
1733                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1734                        timo = tvtohz(&tv);
1735                }
1736                aseq = pps->ppsinfo.assert_sequence;
1737                cseq = pps->ppsinfo.clear_sequence;
1738                while (aseq == pps->ppsinfo.assert_sequence &&
1739                    cseq == pps->ppsinfo.clear_sequence) {
1740                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1741                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1742                                        err = msleep_spin(pps, pps->driver_mtx,
1743                                            "ppsfch", timo);
1744                                } else {
1745                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1746                                            "ppsfch", timo);
1747                                }
1748                        } else {
1749                                err = tsleep(pps, PCATCH, "ppsfch", timo);
1750                        }
1751                        if (err == EWOULDBLOCK) {
1752                                if (fapi->timeout.tv_sec == -1) {
1753                                        continue;
1754                                } else {
1755                                        return (ETIMEDOUT);
1756                                }
1757                        } else if (err != 0) {
1758                                return (err);
1759                        }
1760                }
1761        }
1762
1763        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1764        fapi->pps_info_buf = pps->ppsinfo;
1765
1766        return (0);
1767}
1768
1769int
1770pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1771{
1772        pps_params_t *app;
1773        struct pps_fetch_args *fapi;
1774#ifdef FFCLOCK
1775        struct pps_fetch_ffc_args *fapi_ffc;
1776#endif
1777#ifdef PPS_SYNC
1778        struct pps_kcbind_args *kapi;
1779#endif
1780
1781        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1782        switch (cmd) {
1783        case PPS_IOC_CREATE:
1784                return (0);
1785        case PPS_IOC_DESTROY:
1786                return (0);
1787        case PPS_IOC_SETPARAMS:
1788                app = (pps_params_t *)data;
1789                if (app->mode & ~pps->ppscap)
1790                        return (EINVAL);
1791#ifdef FFCLOCK
1792                /* Ensure only a single clock is selected for ffc timestamp. */
1793                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1794                        return (EINVAL);
1795#endif
1796                pps->ppsparam = *app;
1797                return (0);
1798        case PPS_IOC_GETPARAMS:
1799                app = (pps_params_t *)data;
1800                *app = pps->ppsparam;
1801                app->api_version = PPS_API_VERS_1;
1802                return (0);
1803        case PPS_IOC_GETCAP:
1804                *(int*)data = pps->ppscap;
1805                return (0);
1806        case PPS_IOC_FETCH:
1807                fapi = (struct pps_fetch_args *)data;
1808                return (pps_fetch(fapi, pps));
1809#ifdef FFCLOCK
1810        case PPS_IOC_FETCH_FFCOUNTER:
1811                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1812                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1813                    PPS_TSFMT_TSPEC)
1814                        return (EINVAL);
1815                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1816                        return (EOPNOTSUPP);
1817                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1818                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1819                /* Overwrite timestamps if feedback clock selected. */
1820                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1821                case PPS_TSCLK_FBCK:
1822                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1823                            pps->ppsinfo.assert_timestamp;
1824                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1825                            pps->ppsinfo.clear_timestamp;
1826                        break;
1827                case PPS_TSCLK_FFWD:
1828                        break;
1829                default:
1830                        break;
1831                }
1832                return (0);
1833#endif /* FFCLOCK */
1834        case PPS_IOC_KCBIND:
1835#ifdef PPS_SYNC
1836                kapi = (struct pps_kcbind_args *)data;
1837                /* XXX Only root should be able to do this */
1838                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1839                        return (EINVAL);
1840                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1841                        return (EINVAL);
1842                if (kapi->edge & ~pps->ppscap)
1843                        return (EINVAL);
1844                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1845                    (pps->kcmode & KCMODE_ABIFLAG);
1846                return (0);
1847#else
1848                return (EOPNOTSUPP);
1849#endif
1850        default:
1851                return (ENOIOCTL);
1852        }
1853}
1854
1855void
1856pps_init(struct pps_state *pps)
1857{
1858        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1859        if (pps->ppscap & PPS_CAPTUREASSERT)
1860                pps->ppscap |= PPS_OFFSETASSERT;
1861        if (pps->ppscap & PPS_CAPTURECLEAR)
1862                pps->ppscap |= PPS_OFFSETCLEAR;
1863#ifdef FFCLOCK
1864        pps->ppscap |= PPS_TSCLK_MASK;
1865#endif
1866        pps->kcmode &= ~KCMODE_ABIFLAG;
1867}
1868
1869void
1870pps_init_abi(struct pps_state *pps)
1871{
1872
1873        pps_init(pps);
1874        if (pps->driver_abi > 0) {
1875                pps->kcmode |= KCMODE_ABIFLAG;
1876                pps->kernel_abi = PPS_ABI_VERSION;
1877        }
1878}
1879
1880void
1881pps_capture(struct pps_state *pps)
1882{
1883        struct timehands *th;
1884
1885        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1886        th = timehands;
1887        pps->capgen = atomic_load_acq_int(&th->th_generation);
1888        pps->capth = th;
1889#ifdef FFCLOCK
1890        pps->capffth = fftimehands;
1891#endif
1892        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1893        atomic_thread_fence_acq();
1894        if (pps->capgen != th->th_generation)
1895                pps->capgen = 0;
1896}
1897
1898void
1899pps_event(struct pps_state *pps, int event)
1900{
1901        struct bintime bt;
1902        struct timespec ts, *tsp, *osp;
1903        uint32_t tcount, *pcount;
1904        int foff;
1905        pps_seq_t *pseq;
1906#ifdef FFCLOCK
1907        struct timespec *tsp_ffc;
1908        pps_seq_t *pseq_ffc;
1909        ffcounter *ffcount;
1910#endif
1911#ifdef PPS_SYNC
1912        int fhard;
1913#endif
1914
1915        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1916        /* Nothing to do if not currently set to capture this event type. */
1917        if ((event & pps->ppsparam.mode) == 0)
1918                return;
1919        /* If the timecounter was wound up underneath us, bail out. */
1920        if (pps->capgen == 0 || pps->capgen !=
1921            atomic_load_acq_int(&pps->capth->th_generation))
1922                return;
1923
1924        /* Things would be easier with arrays. */
1925        if (event == PPS_CAPTUREASSERT) {
1926                tsp = &pps->ppsinfo.assert_timestamp;
1927                osp = &pps->ppsparam.assert_offset;
1928                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1929#ifdef PPS_SYNC
1930                fhard = pps->kcmode & PPS_CAPTUREASSERT;
1931#endif
1932                pcount = &pps->ppscount[0];
1933                pseq = &pps->ppsinfo.assert_sequence;
1934#ifdef FFCLOCK
1935                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1936                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1937                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1938#endif
1939        } else {
1940                tsp = &pps->ppsinfo.clear_timestamp;
1941                osp = &pps->ppsparam.clear_offset;
1942                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1943#ifdef PPS_SYNC
1944                fhard = pps->kcmode & PPS_CAPTURECLEAR;
1945#endif
1946                pcount = &pps->ppscount[1];
1947                pseq = &pps->ppsinfo.clear_sequence;
1948#ifdef FFCLOCK
1949                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1950                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1951                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1952#endif
1953        }
1954
1955        /*
1956         * If the timecounter changed, we cannot compare the count values, so
1957         * we have to drop the rest of the PPS-stuff until the next event.
1958         */
1959        if (pps->ppstc != pps->capth->th_counter) {
1960                pps->ppstc = pps->capth->th_counter;
1961                *pcount = pps->capcount;
1962                pps->ppscount[2] = pps->capcount;
1963                return;
1964        }
1965
1966        /* Convert the count to a timespec. */
1967        tcount = pps->capcount - pps->capth->th_offset_count;
1968        tcount &= pps->capth->th_counter->tc_counter_mask;
1969        bt = pps->capth->th_offset;
1970        bintime_addx(&bt, pps->capth->th_scale * tcount);
1971        bintime_add(&bt, &boottimebin);
1972        bintime2timespec(&bt, &ts);
1973
1974        /* If the timecounter was wound up underneath us, bail out. */
1975        atomic_thread_fence_acq();
1976        if (pps->capgen != pps->capth->th_generation)
1977                return;
1978
1979        *pcount = pps->capcount;
1980        (*pseq)++;
1981        *tsp = ts;
1982
1983        if (foff) {
1984                timespecadd(tsp, osp);
1985                if (tsp->tv_nsec < 0) {
1986                        tsp->tv_nsec += 1000000000;
1987                        tsp->tv_sec -= 1;
1988                }
1989        }
1990
1991#ifdef FFCLOCK
1992        *ffcount = pps->capffth->tick_ffcount + tcount;
1993        bt = pps->capffth->tick_time;
1994        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1995        bintime_add(&bt, &pps->capffth->tick_time);
1996        bintime2timespec(&bt, &ts);
1997        (*pseq_ffc)++;
1998        *tsp_ffc = ts;
1999#endif
2000
2001#ifdef PPS_SYNC
2002        if (fhard) {
2003                uint64_t scale;
2004
2005                /*
2006                 * Feed the NTP PLL/FLL.
2007                 * The FLL wants to know how many (hardware) nanoseconds
2008                 * elapsed since the previous event.
2009                 */
2010                tcount = pps->capcount - pps->ppscount[2];
2011                pps->ppscount[2] = pps->capcount;
2012                tcount &= pps->capth->th_counter->tc_counter_mask;
2013                scale = (uint64_t)1 << 63;
2014                scale /= pps->capth->th_counter->tc_frequency;
2015                scale *= 2;
2016                bt.sec = 0;
2017                bt.frac = 0;
2018                bintime_addx(&bt, scale * tcount);
2019                bintime2timespec(&bt, &ts);
2020                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
2021        }
2022#endif
2023
2024        /* Wakeup anyone sleeping in pps_fetch().  */
2025        wakeup(pps);
2026}
2027#else /* __rtems__ */
2028/* FIXME: https://devel.rtems.org/ticket/2349 */
2029#endif /* __rtems__ */
2030
2031/*
2032 * Timecounters need to be updated every so often to prevent the hardware
2033 * counter from overflowing.  Updating also recalculates the cached values
2034 * used by the get*() family of functions, so their precision depends on
2035 * the update frequency.
2036 */
2037
2038#ifndef __rtems__
2039static int tc_tick;
2040SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
2041    "Approximate number of hardclock ticks in a millisecond");
2042#endif /* __rtems__ */
2043
2044#ifndef __rtems__
2045void
2046tc_ticktock(int cnt)
2047{
2048        static int count;
2049
2050        count += cnt;
2051        if (count < tc_tick)
2052                return;
2053        count = 0;
2054        tc_windup();
2055}
2056#else /* __rtems__ */
2057void
2058_Timecounter_Tick(void)
2059{
2060        Per_CPU_Control *cpu_self = _Per_CPU_Get();
2061
2062        if (_Per_CPU_Is_boot_processor(cpu_self)) {
2063                tc_windup();
2064        }
2065
2066        _Watchdog_Tick(cpu_self);
2067}
2068
2069void
2070_Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
2071    ISR_lock_Context *lock_context)
2072{
2073        struct bintime bt;
2074        struct timehands *th;
2075        uint32_t ogen;
2076
2077        th = timehands;
2078        ogen = th->th_generation;
2079        th->th_offset_count = offset;
2080        bintime_addx(&th->th_offset, th->th_scale * delta);
2081
2082        bt = th->th_offset;
2083        bintime_add(&bt, &boottimebin);
2084
2085        /* Update the UTC timestamps used by the get*() functions. */
2086        /* XXX shouldn't do this here.  Should force non-`get' versions. */
2087        bintime2timeval(&bt, &th->th_microtime);
2088        bintime2timespec(&bt, &th->th_nanotime);
2089
2090        /*
2091         * Now that the struct timehands is again consistent, set the new
2092         * generation number, making sure to not make it zero.
2093         */
2094        if (++ogen == 0)
2095                ogen = 1;
2096        th->th_generation = ogen;
2097
2098        /* Go live with the new struct timehands. */
2099        time_second = th->th_microtime.tv_sec;
2100        time_uptime = th->th_offset.sec;
2101
2102        _Timecounter_Release(lock_context);
2103
2104        _Watchdog_Tick(_Per_CPU_Get_snapshot());
2105}
2106#endif /* __rtems__ */
2107
2108#ifndef __rtems__
2109static void __inline
2110tc_adjprecision(void)
2111{
2112        int t;
2113
2114        if (tc_timepercentage > 0) {
2115                t = (99 + tc_timepercentage) / tc_timepercentage;
2116                tc_precexp = fls(t + (t >> 1)) - 1;
2117                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2118                FREQ2BT(hz, &bt_tickthreshold);
2119                bintime_shift(&bt_timethreshold, tc_precexp);
2120                bintime_shift(&bt_tickthreshold, tc_precexp);
2121        } else {
2122                tc_precexp = 31;
2123                bt_timethreshold.sec = INT_MAX;
2124                bt_timethreshold.frac = ~(uint64_t)0;
2125                bt_tickthreshold = bt_timethreshold;
2126        }
2127        sbt_timethreshold = bttosbt(bt_timethreshold);
2128        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2129}
2130#endif /* __rtems__ */
2131
2132#ifndef __rtems__
2133static int
2134sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2135{
2136        int error, val;
2137
2138        val = tc_timepercentage;
2139        error = sysctl_handle_int(oidp, &val, 0, req);
2140        if (error != 0 || req->newptr == NULL)
2141                return (error);
2142        tc_timepercentage = val;
2143        if (cold)
2144                goto done;
2145        tc_adjprecision();
2146done:
2147        return (0);
2148}
2149
2150static void
2151inittimecounter(void *dummy)
2152{
2153        u_int p;
2154        int tick_rate;
2155
2156        /*
2157         * Set the initial timeout to
2158         * max(1, <approx. number of hardclock ticks in a millisecond>).
2159         * People should probably not use the sysctl to set the timeout
2160         * to smaller than its inital value, since that value is the
2161         * smallest reasonable one.  If they want better timestamps they
2162         * should use the non-"get"* functions.
2163         */
2164        if (hz > 1000)
2165                tc_tick = (hz + 500) / 1000;
2166        else
2167                tc_tick = 1;
2168        tc_adjprecision();
2169        FREQ2BT(hz, &tick_bt);
2170        tick_sbt = bttosbt(tick_bt);
2171        tick_rate = hz / tc_tick;
2172        FREQ2BT(tick_rate, &tc_tick_bt);
2173        tc_tick_sbt = bttosbt(tc_tick_bt);
2174        p = (tc_tick * 1000000) / hz;
2175        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2176
2177#ifdef FFCLOCK
2178        ffclock_init();
2179#endif
2180        /* warm up new timecounter (again) and get rolling. */
2181        (void)timecounter->tc_get_timecount(timecounter);
2182        (void)timecounter->tc_get_timecount(timecounter);
2183        tc_windup();
2184}
2185
2186SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2187
2188/* Cpu tick handling -------------------------------------------------*/
2189
2190static int cpu_tick_variable;
2191static uint64_t cpu_tick_frequency;
2192
2193static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
2194static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
2195
2196static uint64_t
2197tc_cpu_ticks(void)
2198{
2199        struct timecounter *tc;
2200        uint64_t res, *base;
2201        unsigned u, *last;
2202
2203        critical_enter();
2204        base = DPCPU_PTR(tc_cpu_ticks_base);
2205        last = DPCPU_PTR(tc_cpu_ticks_last);
2206        tc = timehands->th_counter;
2207        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2208        if (u < *last)
2209                *base += (uint64_t)tc->tc_counter_mask + 1;
2210        *last = u;
2211        res = u + *base;
2212        critical_exit();
2213        return (res);
2214}
2215
2216void
2217cpu_tick_calibration(void)
2218{
2219        static time_t last_calib;
2220
2221        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2222                cpu_tick_calibrate(0);
2223                last_calib = time_uptime;
2224        }
2225}
2226
2227/*
2228 * This function gets called every 16 seconds on only one designated
2229 * CPU in the system from hardclock() via cpu_tick_calibration()().
2230 *
2231 * Whenever the real time clock is stepped we get called with reset=1
2232 * to make sure we handle suspend/resume and similar events correctly.
2233 */
2234
2235static void
2236cpu_tick_calibrate(int reset)
2237{
2238        static uint64_t c_last;
2239        uint64_t c_this, c_delta;
2240        static struct bintime  t_last;
2241        struct bintime t_this, t_delta;
2242        uint32_t divi;
2243
2244        if (reset) {
2245                /* The clock was stepped, abort & reset */
2246                t_last.sec = 0;
2247                return;
2248        }
2249
2250        /* we don't calibrate fixed rate cputicks */
2251        if (!cpu_tick_variable)
2252                return;
2253
2254        getbinuptime(&t_this);
2255        c_this = cpu_ticks();
2256        if (t_last.sec != 0) {
2257                c_delta = c_this - c_last;
2258                t_delta = t_this;
2259                bintime_sub(&t_delta, &t_last);
2260                /*
2261                 * Headroom:
2262                 *      2^(64-20) / 16[s] =
2263                 *      2^(44) / 16[s] =
2264                 *      17.592.186.044.416 / 16 =
2265                 *      1.099.511.627.776 [Hz]
2266                 */
2267                divi = t_delta.sec << 20;
2268                divi |= t_delta.frac >> (64 - 20);
2269                c_delta <<= 20;
2270                c_delta /= divi;
2271                if (c_delta > cpu_tick_frequency) {
2272                        if (0 && bootverbose)
2273                                printf("cpu_tick increased to %ju Hz\n",
2274                                    c_delta);
2275                        cpu_tick_frequency = c_delta;
2276                }
2277        }
2278        c_last = c_this;
2279        t_last = t_this;
2280}
2281
2282void
2283set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2284{
2285
2286        if (func == NULL) {
2287                cpu_ticks = tc_cpu_ticks;
2288        } else {
2289                cpu_tick_frequency = freq;
2290                cpu_tick_variable = var;
2291                cpu_ticks = func;
2292        }
2293}
2294
2295uint64_t
2296cpu_tickrate(void)
2297{
2298
2299        if (cpu_ticks == tc_cpu_ticks)
2300                return (tc_getfrequency());
2301        return (cpu_tick_frequency);
2302}
2303
2304/*
2305 * We need to be slightly careful converting cputicks to microseconds.
2306 * There is plenty of margin in 64 bits of microseconds (half a million
2307 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2308 * before divide conversion (to retain precision) we find that the
2309 * margin shrinks to 1.5 hours (one millionth of 146y).
2310 * With a three prong approach we never lose significant bits, no
2311 * matter what the cputick rate and length of timeinterval is.
2312 */
2313
2314uint64_t
2315cputick2usec(uint64_t tick)
2316{
2317
2318        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2319                return (tick / (cpu_tickrate() / 1000000LL));
2320        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2321                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2322        else
2323                return ((tick * 1000000LL) / cpu_tickrate());
2324}
2325
2326cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2327#endif /* __rtems__ */
2328
2329#ifndef __rtems__
2330static int vdso_th_enable = 1;
2331static int
2332sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2333{
2334        int old_vdso_th_enable, error;
2335
2336        old_vdso_th_enable = vdso_th_enable;
2337        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2338        if (error != 0)
2339                return (error);
2340        vdso_th_enable = old_vdso_th_enable;
2341        return (0);
2342}
2343SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2344    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2345    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2346
2347uint32_t
2348tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2349{
2350        struct timehands *th;
2351        uint32_t enabled;
2352
2353        th = timehands;
2354        vdso_th->th_algo = VDSO_TH_ALGO_1;
2355        vdso_th->th_scale = th->th_scale;
2356        vdso_th->th_offset_count = th->th_offset_count;
2357        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2358        vdso_th->th_offset = th->th_offset;
2359        vdso_th->th_boottime = boottimebin;
2360        enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2361        if (!vdso_th_enable)
2362                enabled = 0;
2363        return (enabled);
2364}
2365#endif /* __rtems__ */
2366
2367#ifdef COMPAT_FREEBSD32
2368uint32_t
2369tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2370{
2371        struct timehands *th;
2372        uint32_t enabled;
2373
2374        th = timehands;
2375        vdso_th32->th_algo = VDSO_TH_ALGO_1;
2376        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2377        vdso_th32->th_offset_count = th->th_offset_count;
2378        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2379        vdso_th32->th_offset.sec = th->th_offset.sec;
2380        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2381        vdso_th32->th_boottime.sec = boottimebin.sec;
2382        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2383        enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2384        if (!vdso_th_enable)
2385                enabled = 0;
2386        return (enabled);
2387}
2388#endif
Note: See TracBrowser for help on using the repository browser.