source: rtems/cpukit/score/src/kern_tc.c @ f718e79

4.115
Last change on this file since f718e79 was f718e79, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 11, 2015 at 7:56:18 AM

Revert "timecounter: No _Timecounter_Tick_simple() for SMP"

This reverts commit 46ae1d7a2b49b8f973dd6ba44fbbd38383798524.

The _Timecounter_Tick_simple() function actually doesn't switch to the
next timehand, so it is all right to use the simple timecounter approach
even on SMP configurations. The use of simple timecounters is not
recommended however since they impose a performance penalty.

  • Property mode set to 100644
File size: 56.5 KB
Line 
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
11 *
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
14 */
15
16#ifdef __rtems__
17#define _KERNEL
18#define bintime _Timecounter_Bintime
19#define binuptime _Timecounter_Binuptime
20#define boottimebin _Timecounter_Boottimebin
21#define getbintime _Timecounter_Getbintime
22#define getbinuptime _Timecounter_Getbinuptime
23#define getmicrotime _Timecounter_Getmicrotime
24#define getmicrouptime _Timecounter_Getmicrouptime
25#define getnanotime _Timecounter_Getnanotime
26#define getnanouptime _Timecounter_Getnanouptime
27#define microtime _Timecounter_Microtime
28#define microuptime _Timecounter_Microuptime
29#define nanotime _Timecounter_Nanotime
30#define nanouptime _Timecounter_Nanouptime
31#define tc_init _Timecounter_Install
32#define timecounter _Timecounter
33#define time_second _Timecounter_Time_second
34#define time_uptime _Timecounter_Time_uptime
35#include <rtems/score/timecounterimpl.h>
36#include <rtems/score/watchdogimpl.h>
37#endif /* __rtems__ */
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD r284178 2015-06-09T11:49:56Z$");
40
41#include "opt_compat.h"
42#include "opt_ntp.h"
43#include "opt_ffclock.h"
44
45#include <sys/param.h>
46#ifndef __rtems__
47#include <sys/kernel.h>
48#include <sys/limits.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/sbuf.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <sys/systm.h>
55#endif /* __rtems__ */
56#include <sys/timeffc.h>
57#include <sys/timepps.h>
58#include <sys/timetc.h>
59#include <sys/timex.h>
60#ifndef __rtems__
61#include <sys/vdso.h>
62#include <machine/atomic.h>
63#endif /* __rtems__ */
64#ifdef __rtems__
65#include <limits.h>
66#include <rtems.h>
67ISR_LOCK_DEFINE(static, _Timecounter_Lock, "Timecounter");
68#define hz rtems_clock_get_ticks_per_second()
69#define printf(...)
70#define log(...)
71static inline int
72fls(int x)
73{
74        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
75}
76/* FIXME: https://devel.rtems.org/ticket/2348 */
77#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
78#endif /* __rtems__ */
79
80/*
81 * A large step happens on boot.  This constant detects such steps.
82 * It is relatively small so that ntp_update_second gets called enough
83 * in the typical 'missed a couple of seconds' case, but doesn't loop
84 * forever when the time step is large.
85 */
86#define LARGE_STEP      200
87
88/*
89 * Implement a dummy timecounter which we can use until we get a real one
90 * in the air.  This allows the console and other early stuff to use
91 * time services.
92 */
93
94static uint32_t
95dummy_get_timecount(struct timecounter *tc)
96{
97#ifndef __rtems__
98        static uint32_t now;
99
100        return (++now);
101#else /* __rtems__ */
102        return 0;
103#endif /* __rtems__ */
104}
105
106static struct timecounter dummy_timecounter = {
107        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
108};
109
110struct timehands {
111        /* These fields must be initialized by the driver. */
112        struct timecounter      *th_counter;
113        int64_t                 th_adjustment;
114        uint64_t                th_scale;
115        uint32_t                th_offset_count;
116        struct bintime          th_offset;
117        struct timeval          th_microtime;
118        struct timespec         th_nanotime;
119        /* Fields not to be copied in tc_windup start with th_generation. */
120#ifndef __rtems__
121        u_int                   th_generation;
122#else /* __rtems__ */
123        Atomic_Ulong            th_generation;
124#endif /* __rtems__ */
125        struct timehands        *th_next;
126};
127
128#if defined(RTEMS_SMP)
129static struct timehands th0;
130static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
131static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
132static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
133static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
134static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
135static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
136static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
137static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
138static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
139#endif
140static struct timehands th0 = {
141        &dummy_timecounter,
142        0,
143        (uint64_t)-1 / 1000000,
144        0,
145        {1, 0},
146        {0, 0},
147        {0, 0},
148        1,
149#if defined(RTEMS_SMP)
150        &th1
151#else
152        &th0
153#endif
154};
155
156static struct timehands *volatile timehands = &th0;
157struct timecounter *timecounter = &dummy_timecounter;
158static struct timecounter *timecounters = &dummy_timecounter;
159
160#ifndef __rtems__
161int tc_min_ticktock_freq = 1;
162#endif /* __rtems__ */
163
164volatile time_t time_second = 1;
165volatile time_t time_uptime = 1;
166
167struct bintime boottimebin;
168#ifndef __rtems__
169struct timeval boottime;
170static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
171SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
172    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
173
174SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
175static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
176
177static int timestepwarnings;
178SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
179    &timestepwarnings, 0, "Log time steps");
180
181struct bintime bt_timethreshold;
182struct bintime bt_tickthreshold;
183sbintime_t sbt_timethreshold;
184sbintime_t sbt_tickthreshold;
185struct bintime tc_tick_bt;
186sbintime_t tc_tick_sbt;
187int tc_precexp;
188int tc_timepercentage = TC_DEFAULTPERC;
189static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
190SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
191    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
192    sysctl_kern_timecounter_adjprecision, "I",
193    "Allowed time interval deviation in percents");
194#endif /* __rtems__ */
195
196static void tc_windup(void);
197#ifndef __rtems__
198static void cpu_tick_calibrate(int);
199#endif /* __rtems__ */
200
201void dtrace_getnanotime(struct timespec *tsp);
202
203#ifndef __rtems__
204static int
205sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
206{
207#ifndef __mips__
208#ifdef SCTL_MASK32
209        int tv[2];
210
211        if (req->flags & SCTL_MASK32) {
212                tv[0] = boottime.tv_sec;
213                tv[1] = boottime.tv_usec;
214                return SYSCTL_OUT(req, tv, sizeof(tv));
215        } else
216#endif
217#endif
218                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
219}
220
221static int
222sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
223{
224        uint32_t ncount;
225        struct timecounter *tc = arg1;
226
227        ncount = tc->tc_get_timecount(tc);
228        return sysctl_handle_int(oidp, &ncount, 0, req);
229}
230
231static int
232sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
233{
234        uint64_t freq;
235        struct timecounter *tc = arg1;
236
237        freq = tc->tc_frequency;
238        return sysctl_handle_64(oidp, &freq, 0, req);
239}
240#endif /* __rtems__ */
241
242/*
243 * Return the difference between the timehands' counter value now and what
244 * was when we copied it to the timehands' offset_count.
245 */
246static __inline uint32_t
247tc_delta(struct timehands *th)
248{
249        struct timecounter *tc;
250
251        tc = th->th_counter;
252        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
253            tc->tc_counter_mask);
254}
255
256static u_int
257tc_getgen(struct timehands *th)
258{
259
260#ifndef __rtems__
261#ifdef SMP
262        return (atomic_load_acq_int(&th->th_generation));
263#else
264        u_int gen;
265
266        gen = th->th_generation;
267        __compiler_membar();
268        return (gen);
269#endif
270#else /* __rtems__ */
271        return (_Atomic_Load_ulong(&th->th_generation, ATOMIC_ORDER_ACQUIRE));
272#endif /* __rtems__ */
273}
274
275static void
276tc_setgen(struct timehands *th, u_int newgen)
277{
278
279#ifndef __rtems__
280#ifdef SMP
281        atomic_store_rel_int(&th->th_generation, newgen);
282#else
283        __compiler_membar();
284        th->th_generation = newgen;
285#endif
286#else /* __rtems__ */
287        _Atomic_Store_ulong(&th->th_generation, newgen, ATOMIC_ORDER_RELEASE);
288#endif /* __rtems__ */
289}
290
291/*
292 * Functions for reading the time.  We have to loop until we are sure that
293 * the timehands that we operated on was not updated under our feet.  See
294 * the comment in <sys/time.h> for a description of these 12 functions.
295 */
296
297#ifdef FFCLOCK
298void
299fbclock_binuptime(struct bintime *bt)
300{
301        struct timehands *th;
302        unsigned int gen;
303
304        do {
305                th = timehands;
306                gen = tc_getgen(th);
307                *bt = th->th_offset;
308                bintime_addx(bt, th->th_scale * tc_delta(th));
309        } while (gen == 0 || gen != tc_getgen(th));
310}
311
312void
313fbclock_nanouptime(struct timespec *tsp)
314{
315        struct bintime bt;
316
317        fbclock_binuptime(&bt);
318        bintime2timespec(&bt, tsp);
319}
320
321void
322fbclock_microuptime(struct timeval *tvp)
323{
324        struct bintime bt;
325
326        fbclock_binuptime(&bt);
327        bintime2timeval(&bt, tvp);
328}
329
330void
331fbclock_bintime(struct bintime *bt)
332{
333
334        fbclock_binuptime(bt);
335        bintime_add(bt, &boottimebin);
336}
337
338void
339fbclock_nanotime(struct timespec *tsp)
340{
341        struct bintime bt;
342
343        fbclock_bintime(&bt);
344        bintime2timespec(&bt, tsp);
345}
346
347void
348fbclock_microtime(struct timeval *tvp)
349{
350        struct bintime bt;
351
352        fbclock_bintime(&bt);
353        bintime2timeval(&bt, tvp);
354}
355
356void
357fbclock_getbinuptime(struct bintime *bt)
358{
359        struct timehands *th;
360        unsigned int gen;
361
362        do {
363                th = timehands;
364                gen = tc_getgen(th);
365                *bt = th->th_offset;
366        } while (gen == 0 || gen != tc_getgen(th));
367}
368
369void
370fbclock_getnanouptime(struct timespec *tsp)
371{
372        struct timehands *th;
373        unsigned int gen;
374
375        do {
376                th = timehands;
377                gen = tc_getgen(th);
378                bintime2timespec(&th->th_offset, tsp);
379        } while (gen == 0 || gen != tc_getgen(th));
380}
381
382void
383fbclock_getmicrouptime(struct timeval *tvp)
384{
385        struct timehands *th;
386        unsigned int gen;
387
388        do {
389                th = timehands;
390                gen = tc_getgen(th);
391                bintime2timeval(&th->th_offset, tvp);
392        } while (gen == 0 || gen != tc_getgen(th));
393}
394
395void
396fbclock_getbintime(struct bintime *bt)
397{
398        struct timehands *th;
399        unsigned int gen;
400
401        do {
402                th = timehands;
403                gen = tc_getgen(th);
404                *bt = th->th_offset;
405        } while (gen == 0 || gen != tc_getgen(th));
406        bintime_add(bt, &boottimebin);
407}
408
409void
410fbclock_getnanotime(struct timespec *tsp)
411{
412        struct timehands *th;
413        unsigned int gen;
414
415        do {
416                th = timehands;
417                gen = tc_getgen(th);
418                *tsp = th->th_nanotime;
419        } while (gen == 0 || gen != tc_getgen(th));
420}
421
422void
423fbclock_getmicrotime(struct timeval *tvp)
424{
425        struct timehands *th;
426        unsigned int gen;
427
428        do {
429                th = timehands;
430                gen = tc_getgen(th);
431                *tvp = th->th_microtime;
432        } while (gen == 0 || gen != tc_getgen(th));
433}
434#else /* !FFCLOCK */
435void
436binuptime(struct bintime *bt)
437{
438        struct timehands *th;
439        uint32_t gen;
440
441        do {
442                th = timehands;
443                gen = tc_getgen(th);
444                *bt = th->th_offset;
445                bintime_addx(bt, th->th_scale * tc_delta(th));
446        } while (gen == 0 || gen != tc_getgen(th));
447}
448
449void
450nanouptime(struct timespec *tsp)
451{
452        struct bintime bt;
453
454        binuptime(&bt);
455        bintime2timespec(&bt, tsp);
456}
457
458void
459microuptime(struct timeval *tvp)
460{
461        struct bintime bt;
462
463        binuptime(&bt);
464        bintime2timeval(&bt, tvp);
465}
466
467void
468bintime(struct bintime *bt)
469{
470
471        binuptime(bt);
472        bintime_add(bt, &boottimebin);
473}
474
475void
476nanotime(struct timespec *tsp)
477{
478        struct bintime bt;
479
480        bintime(&bt);
481        bintime2timespec(&bt, tsp);
482}
483
484void
485microtime(struct timeval *tvp)
486{
487        struct bintime bt;
488
489        bintime(&bt);
490        bintime2timeval(&bt, tvp);
491}
492
493void
494getbinuptime(struct bintime *bt)
495{
496        struct timehands *th;
497        uint32_t gen;
498
499        do {
500                th = timehands;
501                gen = tc_getgen(th);
502                *bt = th->th_offset;
503        } while (gen == 0 || gen != tc_getgen(th));
504}
505
506void
507getnanouptime(struct timespec *tsp)
508{
509        struct timehands *th;
510        uint32_t gen;
511
512        do {
513                th = timehands;
514                gen = tc_getgen(th);
515                bintime2timespec(&th->th_offset, tsp);
516        } while (gen == 0 || gen != tc_getgen(th));
517}
518
519void
520getmicrouptime(struct timeval *tvp)
521{
522        struct timehands *th;
523        uint32_t gen;
524
525        do {
526                th = timehands;
527                gen = tc_getgen(th);
528                bintime2timeval(&th->th_offset, tvp);
529        } while (gen == 0 || gen != tc_getgen(th));
530}
531
532void
533getbintime(struct bintime *bt)
534{
535        struct timehands *th;
536        uint32_t gen;
537
538        do {
539                th = timehands;
540                gen = tc_getgen(th);
541                *bt = th->th_offset;
542        } while (gen == 0 || gen != tc_getgen(th));
543        bintime_add(bt, &boottimebin);
544}
545
546void
547getnanotime(struct timespec *tsp)
548{
549        struct timehands *th;
550        uint32_t gen;
551
552        do {
553                th = timehands;
554                gen = tc_getgen(th);
555                *tsp = th->th_nanotime;
556        } while (gen == 0 || gen != tc_getgen(th));
557}
558
559void
560getmicrotime(struct timeval *tvp)
561{
562        struct timehands *th;
563        uint32_t gen;
564
565        do {
566                th = timehands;
567                gen = tc_getgen(th);
568                *tvp = th->th_microtime;
569        } while (gen == 0 || gen != tc_getgen(th));
570}
571#endif /* FFCLOCK */
572
573#ifdef FFCLOCK
574/*
575 * Support for feed-forward synchronization algorithms. This is heavily inspired
576 * by the timehands mechanism but kept independent from it. *_windup() functions
577 * have some connection to avoid accessing the timecounter hardware more than
578 * necessary.
579 */
580
581/* Feed-forward clock estimates kept updated by the synchronization daemon. */
582struct ffclock_estimate ffclock_estimate;
583struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
584uint32_t ffclock_status;                /* Feed-forward clock status. */
585int8_t ffclock_updated;                 /* New estimates are available. */
586struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
587
588struct fftimehands {
589        struct ffclock_estimate cest;
590        struct bintime          tick_time;
591        struct bintime          tick_time_lerp;
592        ffcounter               tick_ffcount;
593        uint64_t                period_lerp;
594        volatile uint8_t        gen;
595        struct fftimehands      *next;
596};
597
598#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
599
600static struct fftimehands ffth[10];
601static struct fftimehands *volatile fftimehands = ffth;
602
603static void
604ffclock_init(void)
605{
606        struct fftimehands *cur;
607        struct fftimehands *last;
608
609        memset(ffth, 0, sizeof(ffth));
610
611        last = ffth + NUM_ELEMENTS(ffth) - 1;
612        for (cur = ffth; cur < last; cur++)
613                cur->next = cur + 1;
614        last->next = ffth;
615
616        ffclock_updated = 0;
617        ffclock_status = FFCLOCK_STA_UNSYNC;
618        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
619}
620
621/*
622 * Reset the feed-forward clock estimates. Called from inittodr() to get things
623 * kick started and uses the timecounter nominal frequency as a first period
624 * estimate. Note: this function may be called several time just after boot.
625 * Note: this is the only function that sets the value of boot time for the
626 * monotonic (i.e. uptime) version of the feed-forward clock.
627 */
628void
629ffclock_reset_clock(struct timespec *ts)
630{
631        struct timecounter *tc;
632        struct ffclock_estimate cest;
633
634        tc = timehands->th_counter;
635        memset(&cest, 0, sizeof(struct ffclock_estimate));
636
637        timespec2bintime(ts, &ffclock_boottime);
638        timespec2bintime(ts, &(cest.update_time));
639        ffclock_read_counter(&cest.update_ffcount);
640        cest.leapsec_next = 0;
641        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
642        cest.errb_abs = 0;
643        cest.errb_rate = 0;
644        cest.status = FFCLOCK_STA_UNSYNC;
645        cest.leapsec_total = 0;
646        cest.leapsec = 0;
647
648        mtx_lock(&ffclock_mtx);
649        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
650        ffclock_updated = INT8_MAX;
651        mtx_unlock(&ffclock_mtx);
652
653        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
654            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
655            (unsigned long)ts->tv_nsec);
656}
657
658/*
659 * Sub-routine to convert a time interval measured in RAW counter units to time
660 * in seconds stored in bintime format.
661 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
662 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
663 * extra cycles.
664 */
665static void
666ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
667{
668        struct bintime bt2;
669        ffcounter delta, delta_max;
670
671        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
672        bintime_clear(bt);
673        do {
674                if (ffdelta > delta_max)
675                        delta = delta_max;
676                else
677                        delta = ffdelta;
678                bt2.sec = 0;
679                bt2.frac = period;
680                bintime_mul(&bt2, (unsigned int)delta);
681                bintime_add(bt, &bt2);
682                ffdelta -= delta;
683        } while (ffdelta > 0);
684}
685
686/*
687 * Update the fftimehands.
688 * Push the tick ffcount and time(s) forward based on current clock estimate.
689 * The conversion from ffcounter to bintime relies on the difference clock
690 * principle, whose accuracy relies on computing small time intervals. If a new
691 * clock estimate has been passed by the synchronisation daemon, make it
692 * current, and compute the linear interpolation for monotonic time if needed.
693 */
694static void
695ffclock_windup(unsigned int delta)
696{
697        struct ffclock_estimate *cest;
698        struct fftimehands *ffth;
699        struct bintime bt, gap_lerp;
700        ffcounter ffdelta;
701        uint64_t frac;
702        unsigned int polling;
703        uint8_t forward_jump, ogen;
704
705        /*
706         * Pick the next timehand, copy current ffclock estimates and move tick
707         * times and counter forward.
708         */
709        forward_jump = 0;
710        ffth = fftimehands->next;
711        ogen = ffth->gen;
712        ffth->gen = 0;
713        cest = &ffth->cest;
714        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
715        ffdelta = (ffcounter)delta;
716        ffth->period_lerp = fftimehands->period_lerp;
717
718        ffth->tick_time = fftimehands->tick_time;
719        ffclock_convert_delta(ffdelta, cest->period, &bt);
720        bintime_add(&ffth->tick_time, &bt);
721
722        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
723        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
724        bintime_add(&ffth->tick_time_lerp, &bt);
725
726        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
727
728        /*
729         * Assess the status of the clock, if the last update is too old, it is
730         * likely the synchronisation daemon is dead and the clock is free
731         * running.
732         */
733        if (ffclock_updated == 0) {
734                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
735                ffclock_convert_delta(ffdelta, cest->period, &bt);
736                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
737                        ffclock_status |= FFCLOCK_STA_UNSYNC;
738        }
739
740        /*
741         * If available, grab updated clock estimates and make them current.
742         * Recompute time at this tick using the updated estimates. The clock
743         * estimates passed the feed-forward synchronisation daemon may result
744         * in time conversion that is not monotonically increasing (just after
745         * the update). time_lerp is a particular linear interpolation over the
746         * synchronisation algo polling period that ensures monotonicity for the
747         * clock ids requesting it.
748         */
749        if (ffclock_updated > 0) {
750                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
751                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
752                ffth->tick_time = cest->update_time;
753                ffclock_convert_delta(ffdelta, cest->period, &bt);
754                bintime_add(&ffth->tick_time, &bt);
755
756                /* ffclock_reset sets ffclock_updated to INT8_MAX */
757                if (ffclock_updated == INT8_MAX)
758                        ffth->tick_time_lerp = ffth->tick_time;
759
760                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
761                        forward_jump = 1;
762                else
763                        forward_jump = 0;
764
765                bintime_clear(&gap_lerp);
766                if (forward_jump) {
767                        gap_lerp = ffth->tick_time;
768                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
769                } else {
770                        gap_lerp = ffth->tick_time_lerp;
771                        bintime_sub(&gap_lerp, &ffth->tick_time);
772                }
773
774                /*
775                 * The reset from the RTC clock may be far from accurate, and
776                 * reducing the gap between real time and interpolated time
777                 * could take a very long time if the interpolated clock insists
778                 * on strict monotonicity. The clock is reset under very strict
779                 * conditions (kernel time is known to be wrong and
780                 * synchronization daemon has been restarted recently.
781                 * ffclock_boottime absorbs the jump to ensure boot time is
782                 * correct and uptime functions stay consistent.
783                 */
784                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
785                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
786                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
787                        if (forward_jump)
788                                bintime_add(&ffclock_boottime, &gap_lerp);
789                        else
790                                bintime_sub(&ffclock_boottime, &gap_lerp);
791                        ffth->tick_time_lerp = ffth->tick_time;
792                        bintime_clear(&gap_lerp);
793                }
794
795                ffclock_status = cest->status;
796                ffth->period_lerp = cest->period;
797
798                /*
799                 * Compute corrected period used for the linear interpolation of
800                 * time. The rate of linear interpolation is capped to 5000PPM
801                 * (5ms/s).
802                 */
803                if (bintime_isset(&gap_lerp)) {
804                        ffdelta = cest->update_ffcount;
805                        ffdelta -= fftimehands->cest.update_ffcount;
806                        ffclock_convert_delta(ffdelta, cest->period, &bt);
807                        polling = bt.sec;
808                        bt.sec = 0;
809                        bt.frac = 5000000 * (uint64_t)18446744073LL;
810                        bintime_mul(&bt, polling);
811                        if (bintime_cmp(&gap_lerp, &bt, >))
812                                gap_lerp = bt;
813
814                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
815                        frac = 0;
816                        if (gap_lerp.sec > 0) {
817                                frac -= 1;
818                                frac /= ffdelta / gap_lerp.sec;
819                        }
820                        frac += gap_lerp.frac / ffdelta;
821
822                        if (forward_jump)
823                                ffth->period_lerp += frac;
824                        else
825                                ffth->period_lerp -= frac;
826                }
827
828                ffclock_updated = 0;
829        }
830        if (++ogen == 0)
831                ogen = 1;
832        ffth->gen = ogen;
833        fftimehands = ffth;
834}
835
836/*
837 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
838 * the old and new hardware counter cannot be read simultaneously. tc_windup()
839 * does read the two counters 'back to back', but a few cycles are effectively
840 * lost, and not accumulated in tick_ffcount. This is a fairly radical
841 * operation for a feed-forward synchronization daemon, and it is its job to not
842 * pushing irrelevant data to the kernel. Because there is no locking here,
843 * simply force to ignore pending or next update to give daemon a chance to
844 * realize the counter has changed.
845 */
846static void
847ffclock_change_tc(struct timehands *th)
848{
849        struct fftimehands *ffth;
850        struct ffclock_estimate *cest;
851        struct timecounter *tc;
852        uint8_t ogen;
853
854        tc = th->th_counter;
855        ffth = fftimehands->next;
856        ogen = ffth->gen;
857        ffth->gen = 0;
858
859        cest = &ffth->cest;
860        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
861        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
862        cest->errb_abs = 0;
863        cest->errb_rate = 0;
864        cest->status |= FFCLOCK_STA_UNSYNC;
865
866        ffth->tick_ffcount = fftimehands->tick_ffcount;
867        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
868        ffth->tick_time = fftimehands->tick_time;
869        ffth->period_lerp = cest->period;
870
871        /* Do not lock but ignore next update from synchronization daemon. */
872        ffclock_updated--;
873
874        if (++ogen == 0)
875                ogen = 1;
876        ffth->gen = ogen;
877        fftimehands = ffth;
878}
879
880/*
881 * Retrieve feed-forward counter and time of last kernel tick.
882 */
883void
884ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
885{
886        struct fftimehands *ffth;
887        uint8_t gen;
888
889        /*
890         * No locking but check generation has not changed. Also need to make
891         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
892         */
893        do {
894                ffth = fftimehands;
895                gen = ffth->gen;
896                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
897                        *bt = ffth->tick_time_lerp;
898                else
899                        *bt = ffth->tick_time;
900                *ffcount = ffth->tick_ffcount;
901        } while (gen == 0 || gen != ffth->gen);
902}
903
904/*
905 * Absolute clock conversion. Low level function to convert ffcounter to
906 * bintime. The ffcounter is converted using the current ffclock period estimate
907 * or the "interpolated period" to ensure monotonicity.
908 * NOTE: this conversion may have been deferred, and the clock updated since the
909 * hardware counter has been read.
910 */
911void
912ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
913{
914        struct fftimehands *ffth;
915        struct bintime bt2;
916        ffcounter ffdelta;
917        uint8_t gen;
918
919        /*
920         * No locking but check generation has not changed. Also need to make
921         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
922         */
923        do {
924                ffth = fftimehands;
925                gen = ffth->gen;
926                if (ffcount > ffth->tick_ffcount)
927                        ffdelta = ffcount - ffth->tick_ffcount;
928                else
929                        ffdelta = ffth->tick_ffcount - ffcount;
930
931                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
932                        *bt = ffth->tick_time_lerp;
933                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
934                } else {
935                        *bt = ffth->tick_time;
936                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
937                }
938
939                if (ffcount > ffth->tick_ffcount)
940                        bintime_add(bt, &bt2);
941                else
942                        bintime_sub(bt, &bt2);
943        } while (gen == 0 || gen != ffth->gen);
944}
945
946/*
947 * Difference clock conversion.
948 * Low level function to Convert a time interval measured in RAW counter units
949 * into bintime. The difference clock allows measuring small intervals much more
950 * reliably than the absolute clock.
951 */
952void
953ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
954{
955        struct fftimehands *ffth;
956        uint8_t gen;
957
958        /* No locking but check generation has not changed. */
959        do {
960                ffth = fftimehands;
961                gen = ffth->gen;
962                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
963        } while (gen == 0 || gen != ffth->gen);
964}
965
966/*
967 * Access to current ffcounter value.
968 */
969void
970ffclock_read_counter(ffcounter *ffcount)
971{
972        struct timehands *th;
973        struct fftimehands *ffth;
974        unsigned int gen, delta;
975
976        /*
977         * ffclock_windup() called from tc_windup(), safe to rely on
978         * th->th_generation only, for correct delta and ffcounter.
979         */
980        do {
981                th = timehands;
982                gen = tc_getgen(th);
983                ffth = fftimehands;
984                delta = tc_delta(th);
985                *ffcount = ffth->tick_ffcount;
986        } while (gen == 0 || gen != tc_getgen(th));
987
988        *ffcount += delta;
989}
990
991void
992binuptime(struct bintime *bt)
993{
994
995        binuptime_fromclock(bt, sysclock_active);
996}
997
998void
999nanouptime(struct timespec *tsp)
1000{
1001
1002        nanouptime_fromclock(tsp, sysclock_active);
1003}
1004
1005void
1006microuptime(struct timeval *tvp)
1007{
1008
1009        microuptime_fromclock(tvp, sysclock_active);
1010}
1011
1012void
1013bintime(struct bintime *bt)
1014{
1015
1016        bintime_fromclock(bt, sysclock_active);
1017}
1018
1019void
1020nanotime(struct timespec *tsp)
1021{
1022
1023        nanotime_fromclock(tsp, sysclock_active);
1024}
1025
1026void
1027microtime(struct timeval *tvp)
1028{
1029
1030        microtime_fromclock(tvp, sysclock_active);
1031}
1032
1033void
1034getbinuptime(struct bintime *bt)
1035{
1036
1037        getbinuptime_fromclock(bt, sysclock_active);
1038}
1039
1040void
1041getnanouptime(struct timespec *tsp)
1042{
1043
1044        getnanouptime_fromclock(tsp, sysclock_active);
1045}
1046
1047void
1048getmicrouptime(struct timeval *tvp)
1049{
1050
1051        getmicrouptime_fromclock(tvp, sysclock_active);
1052}
1053
1054void
1055getbintime(struct bintime *bt)
1056{
1057
1058        getbintime_fromclock(bt, sysclock_active);
1059}
1060
1061void
1062getnanotime(struct timespec *tsp)
1063{
1064
1065        getnanotime_fromclock(tsp, sysclock_active);
1066}
1067
1068void
1069getmicrotime(struct timeval *tvp)
1070{
1071
1072        getmicrouptime_fromclock(tvp, sysclock_active);
1073}
1074
1075#endif /* FFCLOCK */
1076
1077#ifndef __rtems__
1078/*
1079 * This is a clone of getnanotime and used for walltimestamps.
1080 * The dtrace_ prefix prevents fbt from creating probes for
1081 * it so walltimestamp can be safely used in all fbt probes.
1082 */
1083void
1084dtrace_getnanotime(struct timespec *tsp)
1085{
1086        struct timehands *th;
1087        uint32_t gen;
1088
1089        do {
1090                th = timehands;
1091                gen = tc_getgen(th);
1092                *tsp = th->th_nanotime;
1093        } while (gen == 0 || gen != tc_getgen(th));
1094}
1095#endif /* __rtems__ */
1096
1097#ifdef FFCLOCK
1098/*
1099 * System clock currently providing time to the system. Modifiable via sysctl
1100 * when the FFCLOCK option is defined.
1101 */
1102int sysclock_active = SYSCLOCK_FBCK;
1103#endif
1104
1105/* Internal NTP status and error estimates. */
1106extern int time_status;
1107extern long time_esterror;
1108
1109#ifndef __rtems__
1110/*
1111 * Take a snapshot of sysclock data which can be used to compare system clocks
1112 * and generate timestamps after the fact.
1113 */
1114void
1115sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1116{
1117        struct fbclock_info *fbi;
1118        struct timehands *th;
1119        struct bintime bt;
1120        unsigned int delta, gen;
1121#ifdef FFCLOCK
1122        ffcounter ffcount;
1123        struct fftimehands *ffth;
1124        struct ffclock_info *ffi;
1125        struct ffclock_estimate cest;
1126
1127        ffi = &clock_snap->ff_info;
1128#endif
1129
1130        fbi = &clock_snap->fb_info;
1131        delta = 0;
1132
1133        do {
1134                th = timehands;
1135                gen = tc_getgen(th);
1136                fbi->th_scale = th->th_scale;
1137                fbi->tick_time = th->th_offset;
1138#ifdef FFCLOCK
1139                ffth = fftimehands;
1140                ffi->tick_time = ffth->tick_time_lerp;
1141                ffi->tick_time_lerp = ffth->tick_time_lerp;
1142                ffi->period = ffth->cest.period;
1143                ffi->period_lerp = ffth->period_lerp;
1144                clock_snap->ffcount = ffth->tick_ffcount;
1145                cest = ffth->cest;
1146#endif
1147                if (!fast)
1148                        delta = tc_delta(th);
1149        } while (gen == 0 || gen != tc_getgen(th));
1150
1151        clock_snap->delta = delta;
1152#ifdef FFCLOCK
1153        clock_snap->sysclock_active = sysclock_active;
1154#endif
1155
1156        /* Record feedback clock status and error. */
1157        clock_snap->fb_info.status = time_status;
1158        /* XXX: Very crude estimate of feedback clock error. */
1159        bt.sec = time_esterror / 1000000;
1160        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1161            (uint64_t)18446744073709ULL;
1162        clock_snap->fb_info.error = bt;
1163
1164#ifdef FFCLOCK
1165        if (!fast)
1166                clock_snap->ffcount += delta;
1167
1168        /* Record feed-forward clock leap second adjustment. */
1169        ffi->leapsec_adjustment = cest.leapsec_total;
1170        if (clock_snap->ffcount > cest.leapsec_next)
1171                ffi->leapsec_adjustment -= cest.leapsec;
1172
1173        /* Record feed-forward clock status and error. */
1174        clock_snap->ff_info.status = cest.status;
1175        ffcount = clock_snap->ffcount - cest.update_ffcount;
1176        ffclock_convert_delta(ffcount, cest.period, &bt);
1177        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1178        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1179        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1180        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1181        clock_snap->ff_info.error = bt;
1182#endif
1183}
1184
1185/*
1186 * Convert a sysclock snapshot into a struct bintime based on the specified
1187 * clock source and flags.
1188 */
1189int
1190sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1191    int whichclock, uint32_t flags)
1192{
1193#ifdef FFCLOCK
1194        struct bintime bt2;
1195        uint64_t period;
1196#endif
1197
1198        switch (whichclock) {
1199        case SYSCLOCK_FBCK:
1200                *bt = cs->fb_info.tick_time;
1201
1202                /* If snapshot was created with !fast, delta will be >0. */
1203                if (cs->delta > 0)
1204                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1205
1206                if ((flags & FBCLOCK_UPTIME) == 0)
1207                        bintime_add(bt, &boottimebin);
1208                break;
1209#ifdef FFCLOCK
1210        case SYSCLOCK_FFWD:
1211                if (flags & FFCLOCK_LERP) {
1212                        *bt = cs->ff_info.tick_time_lerp;
1213                        period = cs->ff_info.period_lerp;
1214                } else {
1215                        *bt = cs->ff_info.tick_time;
1216                        period = cs->ff_info.period;
1217                }
1218
1219                /* If snapshot was created with !fast, delta will be >0. */
1220                if (cs->delta > 0) {
1221                        ffclock_convert_delta(cs->delta, period, &bt2);
1222                        bintime_add(bt, &bt2);
1223                }
1224
1225                /* Leap second adjustment. */
1226                if (flags & FFCLOCK_LEAPSEC)
1227                        bt->sec -= cs->ff_info.leapsec_adjustment;
1228
1229                /* Boot time adjustment, for uptime/monotonic clocks. */
1230                if (flags & FFCLOCK_UPTIME)
1231                        bintime_sub(bt, &ffclock_boottime);
1232                break;
1233#endif
1234        default:
1235                return (EINVAL);
1236                break;
1237        }
1238
1239        return (0);
1240}
1241#endif /* __rtems__ */
1242
1243/*
1244 * Initialize a new timecounter and possibly use it.
1245 */
1246void
1247tc_init(struct timecounter *tc)
1248{
1249#ifndef __rtems__
1250        uint32_t u;
1251        struct sysctl_oid *tc_root;
1252
1253        u = tc->tc_frequency / tc->tc_counter_mask;
1254        /* XXX: We need some margin here, 10% is a guess */
1255        u *= 11;
1256        u /= 10;
1257        if (u > hz && tc->tc_quality >= 0) {
1258                tc->tc_quality = -2000;
1259                if (bootverbose) {
1260                        printf("Timecounter \"%s\" frequency %ju Hz",
1261                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1262                        printf(" -- Insufficient hz, needs at least %u\n", u);
1263                }
1264        } else if (tc->tc_quality >= 0 || bootverbose) {
1265                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1266                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1267                    tc->tc_quality);
1268        }
1269#endif /* __rtems__ */
1270
1271        tc->tc_next = timecounters;
1272        timecounters = tc;
1273#ifndef __rtems__
1274        /*
1275         * Set up sysctl tree for this counter.
1276         */
1277        tc_root = SYSCTL_ADD_NODE(NULL,
1278            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1279            CTLFLAG_RW, 0, "timecounter description");
1280        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1281            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1282            "mask for implemented bits");
1283        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1284            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1285            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1286        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1287            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1288             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1289        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1290            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1291            "goodness of time counter");
1292        /*
1293         * Never automatically use a timecounter with negative quality.
1294         * Even though we run on the dummy counter, switching here may be
1295         * worse since this timecounter may not be monotonous.
1296         */
1297        if (tc->tc_quality < 0)
1298                return;
1299        if (tc->tc_quality < timecounter->tc_quality)
1300                return;
1301        if (tc->tc_quality == timecounter->tc_quality &&
1302            tc->tc_frequency < timecounter->tc_frequency)
1303                return;
1304#endif /* __rtems__ */
1305        (void)tc->tc_get_timecount(tc);
1306        (void)tc->tc_get_timecount(tc);
1307        timecounter = tc;
1308#ifdef __rtems__
1309        tc_windup();
1310#endif /* __rtems__ */
1311}
1312
1313#ifndef __rtems__
1314/* Report the frequency of the current timecounter. */
1315uint64_t
1316tc_getfrequency(void)
1317{
1318
1319        return (timehands->th_counter->tc_frequency);
1320}
1321#endif /* __rtems__ */
1322
1323/*
1324 * Step our concept of UTC.  This is done by modifying our estimate of
1325 * when we booted.
1326 * XXX: not locked.
1327 */
1328#ifndef __rtems__
1329void
1330tc_setclock(struct timespec *ts)
1331#else /* __rtems__ */
1332void
1333_Timecounter_Set_clock(const struct timespec *ts)
1334#endif /* __rtems__ */
1335{
1336#ifndef __rtems__
1337        struct timespec tbef, taft;
1338#endif /* __rtems__ */
1339        struct bintime bt, bt2;
1340
1341#ifndef __rtems__
1342        cpu_tick_calibrate(1);
1343        nanotime(&tbef);
1344#endif /* __rtems__ */
1345        timespec2bintime(ts, &bt);
1346        binuptime(&bt2);
1347        bintime_sub(&bt, &bt2);
1348        bintime_add(&bt2, &boottimebin);
1349        boottimebin = bt;
1350#ifndef __rtems__
1351        bintime2timeval(&bt, &boottime);
1352#endif /* __rtems__ */
1353
1354        /* XXX fiddle all the little crinkly bits around the fiords... */
1355        tc_windup();
1356#ifndef __rtems__
1357        nanotime(&taft);
1358        if (timestepwarnings) {
1359                log(LOG_INFO,
1360                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1361                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1362                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1363                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1364        }
1365        cpu_tick_calibrate(1);
1366#endif /* __rtems__ */
1367}
1368
1369/*
1370 * Initialize the next struct timehands in the ring and make
1371 * it the active timehands.  Along the way we might switch to a different
1372 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1373 */
1374static void
1375tc_windup(void)
1376{
1377        struct bintime bt;
1378        struct timehands *th, *tho;
1379        uint64_t scale;
1380        uint32_t delta, ncount, ogen;
1381        int i;
1382        time_t t;
1383#ifdef __rtems__
1384        ISR_lock_Context lock_context;
1385
1386        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1387#endif /* __rtems__ */
1388
1389        /*
1390         * Make the next timehands a copy of the current one, but do not
1391         * overwrite the generation or next pointer.  While we update
1392         * the contents, the generation must be zero.
1393         */
1394        tho = timehands;
1395        th = tho->th_next;
1396        ogen = th->th_generation;
1397        tc_setgen(th, 0);
1398        bcopy(tho, th, offsetof(struct timehands, th_generation));
1399
1400        /*
1401         * Capture a timecounter delta on the current timecounter and if
1402         * changing timecounters, a counter value from the new timecounter.
1403         * Update the offset fields accordingly.
1404         */
1405        delta = tc_delta(th);
1406        if (th->th_counter != timecounter)
1407                ncount = timecounter->tc_get_timecount(timecounter);
1408        else
1409                ncount = 0;
1410#ifdef FFCLOCK
1411        ffclock_windup(delta);
1412#endif
1413        th->th_offset_count += delta;
1414        th->th_offset_count &= th->th_counter->tc_counter_mask;
1415        while (delta > th->th_counter->tc_frequency) {
1416                /* Eat complete unadjusted seconds. */
1417                delta -= th->th_counter->tc_frequency;
1418                th->th_offset.sec++;
1419        }
1420        if ((delta > th->th_counter->tc_frequency / 2) &&
1421            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1422                /* The product th_scale * delta just barely overflows. */
1423                th->th_offset.sec++;
1424        }
1425        bintime_addx(&th->th_offset, th->th_scale * delta);
1426
1427        /*
1428         * Hardware latching timecounters may not generate interrupts on
1429         * PPS events, so instead we poll them.  There is a finite risk that
1430         * the hardware might capture a count which is later than the one we
1431         * got above, and therefore possibly in the next NTP second which might
1432         * have a different rate than the current NTP second.  It doesn't
1433         * matter in practice.
1434         */
1435        if (tho->th_counter->tc_poll_pps)
1436                tho->th_counter->tc_poll_pps(tho->th_counter);
1437
1438        /*
1439         * Deal with NTP second processing.  The for loop normally
1440         * iterates at most once, but in extreme situations it might
1441         * keep NTP sane if timeouts are not run for several seconds.
1442         * At boot, the time step can be large when the TOD hardware
1443         * has been read, so on really large steps, we call
1444         * ntp_update_second only twice.  We need to call it twice in
1445         * case we missed a leap second.
1446         */
1447        bt = th->th_offset;
1448        bintime_add(&bt, &boottimebin);
1449        i = bt.sec - tho->th_microtime.tv_sec;
1450        if (i > LARGE_STEP)
1451                i = 2;
1452        for (; i > 0; i--) {
1453                t = bt.sec;
1454                ntp_update_second(&th->th_adjustment, &bt.sec);
1455                if (bt.sec != t)
1456                        boottimebin.sec += bt.sec - t;
1457        }
1458        /* Update the UTC timestamps used by the get*() functions. */
1459        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1460        bintime2timeval(&bt, &th->th_microtime);
1461        bintime2timespec(&bt, &th->th_nanotime);
1462
1463        /* Now is a good time to change timecounters. */
1464        if (th->th_counter != timecounter) {
1465#ifndef __rtems__
1466#ifndef __arm__
1467                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1468                        cpu_disable_c2_sleep++;
1469                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1470                        cpu_disable_c2_sleep--;
1471#endif
1472#endif /* __rtems__ */
1473                th->th_counter = timecounter;
1474                th->th_offset_count = ncount;
1475#ifndef __rtems__
1476                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1477                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1478#endif /* __rtems__ */
1479#ifdef FFCLOCK
1480                ffclock_change_tc(th);
1481#endif
1482        }
1483
1484        /*-
1485         * Recalculate the scaling factor.  We want the number of 1/2^64
1486         * fractions of a second per period of the hardware counter, taking
1487         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1488         * processing provides us with.
1489         *
1490         * The th_adjustment is nanoseconds per second with 32 bit binary
1491         * fraction and we want 64 bit binary fraction of second:
1492         *
1493         *       x = a * 2^32 / 10^9 = a * 4.294967296
1494         *
1495         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1496         * we can only multiply by about 850 without overflowing, that
1497         * leaves no suitably precise fractions for multiply before divide.
1498         *
1499         * Divide before multiply with a fraction of 2199/512 results in a
1500         * systematic undercompensation of 10PPM of th_adjustment.  On a
1501         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1502         *
1503         * We happily sacrifice the lowest of the 64 bits of our result
1504         * to the goddess of code clarity.
1505         *
1506         */
1507        scale = (uint64_t)1 << 63;
1508        scale += (th->th_adjustment / 1024) * 2199;
1509        scale /= th->th_counter->tc_frequency;
1510        th->th_scale = scale * 2;
1511
1512        /*
1513         * Now that the struct timehands is again consistent, set the new
1514         * generation number, making sure to not make it zero.
1515         */
1516        if (++ogen == 0)
1517                ogen = 1;
1518        tc_setgen(th, ogen);
1519
1520        /* Go live with the new struct timehands. */
1521#ifdef FFCLOCK
1522        switch (sysclock_active) {
1523        case SYSCLOCK_FBCK:
1524#endif
1525                time_second = th->th_microtime.tv_sec;
1526                time_uptime = th->th_offset.sec;
1527#ifdef FFCLOCK
1528                break;
1529        case SYSCLOCK_FFWD:
1530                time_second = fftimehands->tick_time_lerp.sec;
1531                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1532                break;
1533        }
1534#endif
1535
1536        timehands = th;
1537#ifndef __rtems__
1538        timekeep_push_vdso();
1539#endif /* __rtems__ */
1540#ifdef __rtems__
1541        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
1542#endif /* __rtems__ */
1543}
1544
1545#ifndef __rtems__
1546/* Report or change the active timecounter hardware. */
1547static int
1548sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1549{
1550        char newname[32];
1551        struct timecounter *newtc, *tc;
1552        int error;
1553
1554        tc = timecounter;
1555        strlcpy(newname, tc->tc_name, sizeof(newname));
1556
1557        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1558        if (error != 0 || req->newptr == NULL ||
1559            strcmp(newname, tc->tc_name) == 0)
1560                return (error);
1561        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1562                if (strcmp(newname, newtc->tc_name) != 0)
1563                        continue;
1564
1565                /* Warm up new timecounter. */
1566                (void)newtc->tc_get_timecount(newtc);
1567                (void)newtc->tc_get_timecount(newtc);
1568
1569                timecounter = newtc;
1570
1571                /*
1572                 * The vdso timehands update is deferred until the next
1573                 * 'tc_windup()'.
1574                 *
1575                 * This is prudent given that 'timekeep_push_vdso()' does not
1576                 * use any locking and that it can be called in hard interrupt
1577                 * context via 'tc_windup()'.
1578                 */
1579                return (0);
1580        }
1581        return (EINVAL);
1582}
1583
1584SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1585    0, 0, sysctl_kern_timecounter_hardware, "A",
1586    "Timecounter hardware selected");
1587
1588
1589/* Report or change the active timecounter hardware. */
1590static int
1591sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1592{
1593        struct sbuf sb;
1594        struct timecounter *tc;
1595        int error;
1596
1597        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1598        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1599                if (tc != timecounters)
1600                        sbuf_putc(&sb, ' ');
1601                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1602        }
1603        error = sbuf_finish(&sb);
1604        sbuf_delete(&sb);
1605        return (error);
1606}
1607
1608SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1609    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1610#endif /* __rtems__ */
1611
1612#ifndef __rtems__
1613/*
1614 * RFC 2783 PPS-API implementation.
1615 */
1616
1617/*
1618 *  Return true if the driver is aware of the abi version extensions in the
1619 *  pps_state structure, and it supports at least the given abi version number.
1620 */
1621static inline int
1622abi_aware(struct pps_state *pps, int vers)
1623{
1624
1625        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1626}
1627
1628static int
1629pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1630{
1631        int err, timo;
1632        pps_seq_t aseq, cseq;
1633        struct timeval tv;
1634
1635        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1636                return (EINVAL);
1637
1638        /*
1639         * If no timeout is requested, immediately return whatever values were
1640         * most recently captured.  If timeout seconds is -1, that's a request
1641         * to block without a timeout.  WITNESS won't let us sleep forever
1642         * without a lock (we really don't need a lock), so just repeatedly
1643         * sleep a long time.
1644         */
1645        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1646                if (fapi->timeout.tv_sec == -1)
1647                        timo = 0x7fffffff;
1648                else {
1649                        tv.tv_sec = fapi->timeout.tv_sec;
1650                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1651                        timo = tvtohz(&tv);
1652                }
1653                aseq = pps->ppsinfo.assert_sequence;
1654                cseq = pps->ppsinfo.clear_sequence;
1655                while (aseq == pps->ppsinfo.assert_sequence &&
1656                    cseq == pps->ppsinfo.clear_sequence) {
1657                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1658                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1659                                        err = msleep_spin(pps, pps->driver_mtx,
1660                                            "ppsfch", timo);
1661                                } else {
1662                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1663                                            "ppsfch", timo);
1664                                }
1665                        } else {
1666                                err = tsleep(pps, PCATCH, "ppsfch", timo);
1667                        }
1668                        if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) {
1669                                continue;
1670                        } else if (err != 0) {
1671                                return (err);
1672                        }
1673                }
1674        }
1675
1676        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1677        fapi->pps_info_buf = pps->ppsinfo;
1678
1679        return (0);
1680}
1681
1682int
1683pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1684{
1685        pps_params_t *app;
1686        struct pps_fetch_args *fapi;
1687#ifdef FFCLOCK
1688        struct pps_fetch_ffc_args *fapi_ffc;
1689#endif
1690#ifdef PPS_SYNC
1691        struct pps_kcbind_args *kapi;
1692#endif
1693
1694        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1695        switch (cmd) {
1696        case PPS_IOC_CREATE:
1697                return (0);
1698        case PPS_IOC_DESTROY:
1699                return (0);
1700        case PPS_IOC_SETPARAMS:
1701                app = (pps_params_t *)data;
1702                if (app->mode & ~pps->ppscap)
1703                        return (EINVAL);
1704#ifdef FFCLOCK
1705                /* Ensure only a single clock is selected for ffc timestamp. */
1706                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1707                        return (EINVAL);
1708#endif
1709                pps->ppsparam = *app;
1710                return (0);
1711        case PPS_IOC_GETPARAMS:
1712                app = (pps_params_t *)data;
1713                *app = pps->ppsparam;
1714                app->api_version = PPS_API_VERS_1;
1715                return (0);
1716        case PPS_IOC_GETCAP:
1717                *(int*)data = pps->ppscap;
1718                return (0);
1719        case PPS_IOC_FETCH:
1720                fapi = (struct pps_fetch_args *)data;
1721                return (pps_fetch(fapi, pps));
1722#ifdef FFCLOCK
1723        case PPS_IOC_FETCH_FFCOUNTER:
1724                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1725                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1726                    PPS_TSFMT_TSPEC)
1727                        return (EINVAL);
1728                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1729                        return (EOPNOTSUPP);
1730                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1731                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1732                /* Overwrite timestamps if feedback clock selected. */
1733                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1734                case PPS_TSCLK_FBCK:
1735                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1736                            pps->ppsinfo.assert_timestamp;
1737                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1738                            pps->ppsinfo.clear_timestamp;
1739                        break;
1740                case PPS_TSCLK_FFWD:
1741                        break;
1742                default:
1743                        break;
1744                }
1745                return (0);
1746#endif /* FFCLOCK */
1747        case PPS_IOC_KCBIND:
1748#ifdef PPS_SYNC
1749                kapi = (struct pps_kcbind_args *)data;
1750                /* XXX Only root should be able to do this */
1751                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1752                        return (EINVAL);
1753                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1754                        return (EINVAL);
1755                if (kapi->edge & ~pps->ppscap)
1756                        return (EINVAL);
1757                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1758                    (pps->kcmode & KCMODE_ABIFLAG);
1759                return (0);
1760#else
1761                return (EOPNOTSUPP);
1762#endif
1763        default:
1764                return (ENOIOCTL);
1765        }
1766}
1767
1768void
1769pps_init(struct pps_state *pps)
1770{
1771        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1772        if (pps->ppscap & PPS_CAPTUREASSERT)
1773                pps->ppscap |= PPS_OFFSETASSERT;
1774        if (pps->ppscap & PPS_CAPTURECLEAR)
1775                pps->ppscap |= PPS_OFFSETCLEAR;
1776#ifdef FFCLOCK
1777        pps->ppscap |= PPS_TSCLK_MASK;
1778#endif
1779        pps->kcmode &= ~KCMODE_ABIFLAG;
1780}
1781
1782void
1783pps_init_abi(struct pps_state *pps)
1784{
1785
1786        pps_init(pps);
1787        if (pps->driver_abi > 0) {
1788                pps->kcmode |= KCMODE_ABIFLAG;
1789                pps->kernel_abi = PPS_ABI_VERSION;
1790        }
1791}
1792
1793void
1794pps_capture(struct pps_state *pps)
1795{
1796        struct timehands *th;
1797
1798        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1799        th = timehands;
1800        pps->capgen = tc_getgen(th);
1801        pps->capth = th;
1802#ifdef FFCLOCK
1803        pps->capffth = fftimehands;
1804#endif
1805        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1806        if (pps->capgen != tc_getgen(th))
1807                pps->capgen = 0;
1808}
1809
1810void
1811pps_event(struct pps_state *pps, int event)
1812{
1813        struct bintime bt;
1814        struct timespec ts, *tsp, *osp;
1815        uint32_t tcount, *pcount;
1816        int foff, fhard;
1817        pps_seq_t *pseq;
1818#ifdef FFCLOCK
1819        struct timespec *tsp_ffc;
1820        pps_seq_t *pseq_ffc;
1821        ffcounter *ffcount;
1822#endif
1823
1824        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1825        /* If the timecounter was wound up underneath us, bail out. */
1826        if (pps->capgen == 0 || pps->capgen != tc_getgen(pps->capth))
1827                return;
1828
1829        /* Things would be easier with arrays. */
1830        if (event == PPS_CAPTUREASSERT) {
1831                tsp = &pps->ppsinfo.assert_timestamp;
1832                osp = &pps->ppsparam.assert_offset;
1833                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1834                fhard = pps->kcmode & PPS_CAPTUREASSERT;
1835                pcount = &pps->ppscount[0];
1836                pseq = &pps->ppsinfo.assert_sequence;
1837#ifdef FFCLOCK
1838                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1839                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1840                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1841#endif
1842        } else {
1843                tsp = &pps->ppsinfo.clear_timestamp;
1844                osp = &pps->ppsparam.clear_offset;
1845                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1846                fhard = pps->kcmode & PPS_CAPTURECLEAR;
1847                pcount = &pps->ppscount[1];
1848                pseq = &pps->ppsinfo.clear_sequence;
1849#ifdef FFCLOCK
1850                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1851                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1852                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1853#endif
1854        }
1855
1856        /*
1857         * If the timecounter changed, we cannot compare the count values, so
1858         * we have to drop the rest of the PPS-stuff until the next event.
1859         */
1860        if (pps->ppstc != pps->capth->th_counter) {
1861                pps->ppstc = pps->capth->th_counter;
1862                *pcount = pps->capcount;
1863                pps->ppscount[2] = pps->capcount;
1864                return;
1865        }
1866
1867        /* Convert the count to a timespec. */
1868        tcount = pps->capcount - pps->capth->th_offset_count;
1869        tcount &= pps->capth->th_counter->tc_counter_mask;
1870        bt = pps->capth->th_offset;
1871        bintime_addx(&bt, pps->capth->th_scale * tcount);
1872        bintime_add(&bt, &boottimebin);
1873        bintime2timespec(&bt, &ts);
1874
1875        /* If the timecounter was wound up underneath us, bail out. */
1876        if (pps->capgen != tc_getgen(pps->capth))
1877                return;
1878
1879        *pcount = pps->capcount;
1880        (*pseq)++;
1881        *tsp = ts;
1882
1883        if (foff) {
1884                timespecadd(tsp, osp);
1885                if (tsp->tv_nsec < 0) {
1886                        tsp->tv_nsec += 1000000000;
1887                        tsp->tv_sec -= 1;
1888                }
1889        }
1890
1891#ifdef FFCLOCK
1892        *ffcount = pps->capffth->tick_ffcount + tcount;
1893        bt = pps->capffth->tick_time;
1894        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1895        bintime_add(&bt, &pps->capffth->tick_time);
1896        bintime2timespec(&bt, &ts);
1897        (*pseq_ffc)++;
1898        *tsp_ffc = ts;
1899#endif
1900
1901#ifdef PPS_SYNC
1902        if (fhard) {
1903                uint64_t scale;
1904
1905                /*
1906                 * Feed the NTP PLL/FLL.
1907                 * The FLL wants to know how many (hardware) nanoseconds
1908                 * elapsed since the previous event.
1909                 */
1910                tcount = pps->capcount - pps->ppscount[2];
1911                pps->ppscount[2] = pps->capcount;
1912                tcount &= pps->capth->th_counter->tc_counter_mask;
1913                scale = (uint64_t)1 << 63;
1914                scale /= pps->capth->th_counter->tc_frequency;
1915                scale *= 2;
1916                bt.sec = 0;
1917                bt.frac = 0;
1918                bintime_addx(&bt, scale * tcount);
1919                bintime2timespec(&bt, &ts);
1920                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1921        }
1922#endif
1923
1924        /* Wakeup anyone sleeping in pps_fetch().  */
1925        wakeup(pps);
1926}
1927#else /* __rtems__ */
1928/* FIXME: https://devel.rtems.org/ticket/2349 */
1929#endif /* __rtems__ */
1930
1931/*
1932 * Timecounters need to be updated every so often to prevent the hardware
1933 * counter from overflowing.  Updating also recalculates the cached values
1934 * used by the get*() family of functions, so their precision depends on
1935 * the update frequency.
1936 */
1937
1938#ifndef __rtems__
1939static int tc_tick;
1940SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1941    "Approximate number of hardclock ticks in a millisecond");
1942#endif /* __rtems__ */
1943
1944#ifndef __rtems__
1945void
1946tc_ticktock(int cnt)
1947{
1948        static int count;
1949
1950        count += cnt;
1951        if (count < tc_tick)
1952                return;
1953        count = 0;
1954#else /* __rtems__ */
1955void
1956_Timecounter_Tick(void)
1957{
1958#endif /* __rtems__ */
1959        tc_windup();
1960#ifdef __rtems__
1961        _Watchdog_Tick();
1962#endif /* __rtems__ */
1963}
1964#ifdef __rtems__
1965void
1966_Timecounter_Tick_simple(uint32_t delta, uint32_t offset)
1967{
1968        struct bintime bt;
1969        struct timehands *th;
1970        uint32_t ogen;
1971        ISR_lock_Context lock_context;
1972
1973        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1974
1975        th = timehands;
1976        ogen = th->th_generation;
1977        th->th_offset_count = offset;
1978        bintime_addx(&th->th_offset, th->th_scale * delta);
1979
1980        bt = th->th_offset;
1981        bintime_add(&bt, &boottimebin);
1982
1983        /* Update the UTC timestamps used by the get*() functions. */
1984        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1985        bintime2timeval(&bt, &th->th_microtime);
1986        bintime2timespec(&bt, &th->th_nanotime);
1987
1988        /*
1989         * Now that the struct timehands is again consistent, set the new
1990         * generation number, making sure to not make it zero.
1991         */
1992        if (++ogen == 0)
1993                ogen = 1;
1994        th->th_generation = ogen;
1995
1996        /* Go live with the new struct timehands. */
1997        time_second = th->th_microtime.tv_sec;
1998        time_uptime = th->th_offset.sec;
1999
2000        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
2001
2002        _Watchdog_Tick();
2003}
2004#endif /* __rtems__ */
2005
2006#ifndef __rtems__
2007static void __inline
2008tc_adjprecision(void)
2009{
2010        int t;
2011
2012        if (tc_timepercentage > 0) {
2013                t = (99 + tc_timepercentage) / tc_timepercentage;
2014                tc_precexp = fls(t + (t >> 1)) - 1;
2015                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2016                FREQ2BT(hz, &bt_tickthreshold);
2017                bintime_shift(&bt_timethreshold, tc_precexp);
2018                bintime_shift(&bt_tickthreshold, tc_precexp);
2019        } else {
2020                tc_precexp = 31;
2021                bt_timethreshold.sec = INT_MAX;
2022                bt_timethreshold.frac = ~(uint64_t)0;
2023                bt_tickthreshold = bt_timethreshold;
2024        }
2025        sbt_timethreshold = bttosbt(bt_timethreshold);
2026        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2027}
2028#endif /* __rtems__ */
2029
2030#ifndef __rtems__
2031static int
2032sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2033{
2034        int error, val;
2035
2036        val = tc_timepercentage;
2037        error = sysctl_handle_int(oidp, &val, 0, req);
2038        if (error != 0 || req->newptr == NULL)
2039                return (error);
2040        tc_timepercentage = val;
2041        if (cold)
2042                goto done;
2043        tc_adjprecision();
2044done:
2045        return (0);
2046}
2047#endif /* __rtems__ */
2048
2049#ifndef __rtems__
2050static void
2051inittimecounter(void *dummy)
2052#else /* __rtems__ */
2053void
2054_Timecounter_Initialize(void)
2055#endif /* __rtems__ */
2056{
2057#ifndef __rtems__
2058        u_int p;
2059        int tick_rate;
2060
2061        /*
2062         * Set the initial timeout to
2063         * max(1, <approx. number of hardclock ticks in a millisecond>).
2064         * People should probably not use the sysctl to set the timeout
2065         * to smaller than its inital value, since that value is the
2066         * smallest reasonable one.  If they want better timestamps they
2067         * should use the non-"get"* functions.
2068         */
2069        if (hz > 1000)
2070                tc_tick = (hz + 500) / 1000;
2071        else
2072                tc_tick = 1;
2073        tc_adjprecision();
2074        FREQ2BT(hz, &tick_bt);
2075        tick_sbt = bttosbt(tick_bt);
2076        tick_rate = hz / tc_tick;
2077        FREQ2BT(tick_rate, &tc_tick_bt);
2078        tc_tick_sbt = bttosbt(tc_tick_bt);
2079        p = (tc_tick * 1000000) / hz;
2080        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2081#endif /* __rtems__ */
2082
2083#ifdef FFCLOCK
2084        ffclock_init();
2085#endif
2086        /* warm up new timecounter (again) and get rolling. */
2087        (void)timecounter->tc_get_timecount(timecounter);
2088        (void)timecounter->tc_get_timecount(timecounter);
2089        tc_windup();
2090}
2091
2092#ifndef __rtems__
2093SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2094#endif /* __rtems__ */
2095
2096#ifndef __rtems__
2097/* Cpu tick handling -------------------------------------------------*/
2098
2099static int cpu_tick_variable;
2100static uint64_t cpu_tick_frequency;
2101
2102static uint64_t
2103tc_cpu_ticks(void)
2104{
2105        static uint64_t base;
2106        static unsigned last;
2107        unsigned u;
2108        struct timecounter *tc;
2109
2110        tc = timehands->th_counter;
2111        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2112        if (u < last)
2113                base += (uint64_t)tc->tc_counter_mask + 1;
2114        last = u;
2115        return (u + base);
2116}
2117
2118void
2119cpu_tick_calibration(void)
2120{
2121        static time_t last_calib;
2122
2123        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2124                cpu_tick_calibrate(0);
2125                last_calib = time_uptime;
2126        }
2127}
2128
2129/*
2130 * This function gets called every 16 seconds on only one designated
2131 * CPU in the system from hardclock() via cpu_tick_calibration()().
2132 *
2133 * Whenever the real time clock is stepped we get called with reset=1
2134 * to make sure we handle suspend/resume and similar events correctly.
2135 */
2136
2137static void
2138cpu_tick_calibrate(int reset)
2139{
2140        static uint64_t c_last;
2141        uint64_t c_this, c_delta;
2142        static struct bintime  t_last;
2143        struct bintime t_this, t_delta;
2144        uint32_t divi;
2145
2146        if (reset) {
2147                /* The clock was stepped, abort & reset */
2148                t_last.sec = 0;
2149                return;
2150        }
2151
2152        /* we don't calibrate fixed rate cputicks */
2153        if (!cpu_tick_variable)
2154                return;
2155
2156        getbinuptime(&t_this);
2157        c_this = cpu_ticks();
2158        if (t_last.sec != 0) {
2159                c_delta = c_this - c_last;
2160                t_delta = t_this;
2161                bintime_sub(&t_delta, &t_last);
2162                /*
2163                 * Headroom:
2164                 *      2^(64-20) / 16[s] =
2165                 *      2^(44) / 16[s] =
2166                 *      17.592.186.044.416 / 16 =
2167                 *      1.099.511.627.776 [Hz]
2168                 */
2169                divi = t_delta.sec << 20;
2170                divi |= t_delta.frac >> (64 - 20);
2171                c_delta <<= 20;
2172                c_delta /= divi;
2173                if (c_delta > cpu_tick_frequency) {
2174                        if (0 && bootverbose)
2175                                printf("cpu_tick increased to %ju Hz\n",
2176                                    c_delta);
2177                        cpu_tick_frequency = c_delta;
2178                }
2179        }
2180        c_last = c_this;
2181        t_last = t_this;
2182}
2183
2184void
2185set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2186{
2187
2188        if (func == NULL) {
2189                cpu_ticks = tc_cpu_ticks;
2190        } else {
2191                cpu_tick_frequency = freq;
2192                cpu_tick_variable = var;
2193                cpu_ticks = func;
2194        }
2195}
2196
2197uint64_t
2198cpu_tickrate(void)
2199{
2200
2201        if (cpu_ticks == tc_cpu_ticks) 
2202                return (tc_getfrequency());
2203        return (cpu_tick_frequency);
2204}
2205
2206/*
2207 * We need to be slightly careful converting cputicks to microseconds.
2208 * There is plenty of margin in 64 bits of microseconds (half a million
2209 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2210 * before divide conversion (to retain precision) we find that the
2211 * margin shrinks to 1.5 hours (one millionth of 146y).
2212 * With a three prong approach we never lose significant bits, no
2213 * matter what the cputick rate and length of timeinterval is.
2214 */
2215
2216uint64_t
2217cputick2usec(uint64_t tick)
2218{
2219
2220        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2221                return (tick / (cpu_tickrate() / 1000000LL));
2222        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2223                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2224        else
2225                return ((tick * 1000000LL) / cpu_tickrate());
2226}
2227
2228cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2229#endif /* __rtems__ */
2230
2231#ifndef __rtems__
2232static int vdso_th_enable = 1;
2233static int
2234sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2235{
2236        int old_vdso_th_enable, error;
2237
2238        old_vdso_th_enable = vdso_th_enable;
2239        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2240        if (error != 0)
2241                return (error);
2242        vdso_th_enable = old_vdso_th_enable;
2243        return (0);
2244}
2245SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2246    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2247    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2248
2249uint32_t
2250tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2251{
2252        struct timehands *th;
2253        uint32_t enabled;
2254
2255        th = timehands;
2256        vdso_th->th_algo = VDSO_TH_ALGO_1;
2257        vdso_th->th_scale = th->th_scale;
2258        vdso_th->th_offset_count = th->th_offset_count;
2259        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2260        vdso_th->th_offset = th->th_offset;
2261        vdso_th->th_boottime = boottimebin;
2262        enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2263        if (!vdso_th_enable)
2264                enabled = 0;
2265        return (enabled);
2266}
2267#endif /* __rtems__ */
2268
2269#ifdef COMPAT_FREEBSD32
2270uint32_t
2271tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2272{
2273        struct timehands *th;
2274        uint32_t enabled;
2275
2276        th = timehands;
2277        vdso_th32->th_algo = VDSO_TH_ALGO_1;
2278        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2279        vdso_th32->th_offset_count = th->th_offset_count;
2280        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2281        vdso_th32->th_offset.sec = th->th_offset.sec;
2282        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2283        vdso_th32->th_boottime.sec = boottimebin.sec;
2284        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2285        enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2286        if (!vdso_th_enable)
2287                enabled = 0;
2288        return (enabled);
2289}
2290#endif
Note: See TracBrowser for help on using the repository browser.