source: rtems/cpukit/score/src/kern_tc.c @ 7cd2484

4.115
Last change on this file since 7cd2484 was 7cd2484, checked in by Alexander Krutwig <alexander.krutwig@…>, on 05/12/15 at 12:32:47

timecounter: Use in RTEMS

Replace timestamp implementation with FreeBSD bintime and timecounters.

New test sptests/sptimecounter02.

Update #2271.

  • Property mode set to 100644
File size: 55.1 KB
Line 
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * Copyright (c) 2011 The FreeBSD Foundation
10 * All rights reserved.
11 *
12 * Portions of this software were developed by Julien Ridoux at the University
13 * of Melbourne under sponsorship from the FreeBSD Foundation.
14 */
15
16#ifdef __rtems__
17#define _KERNEL
18#define bintime _Timecounter_Bintime
19#define binuptime _Timecounter_Binuptime
20#define boottimebin _Timecounter_Boottimebin
21#define getbintime _Timecounter_Getbintime
22#define getbinuptime _Timecounter_Getbinuptime
23#define getmicrotime _Timecounter_Getmicrotime
24#define getmicrouptime _Timecounter_Getmicrouptime
25#define getnanotime _Timecounter_Getnanotime
26#define getnanouptime _Timecounter_Getnanouptime
27#define microtime _Timecounter_Microtime
28#define microuptime _Timecounter_Microuptime
29#define nanotime _Timecounter_Nanotime
30#define nanouptime _Timecounter_Nanouptime
31#define tc_init _Timecounter_Install
32#define timecounter _Timecounter
33#define time_second _Timecounter_Time_second
34#define time_uptime _Timecounter_Time_uptime
35#include <rtems/score/timecounterimpl.h>
36#include <rtems/score/watchdogimpl.h>
37#endif /* __rtems__ */
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD r277406 2015-01-20T03:54:30Z$");
40
41#include "opt_compat.h"
42#include "opt_ntp.h"
43#include "opt_ffclock.h"
44
45#include <sys/param.h>
46#ifndef __rtems__
47#include <sys/kernel.h>
48#include <sys/limits.h>
49#else /* __rtems__ */
50#include <limits.h>
51#endif /* __rtems__ */
52#ifdef FFCLOCK
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#endif
56#ifndef __rtems__
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/systm.h>
60#endif /* __rtems__ */
61#include <sys/timeffc.h>
62#include <sys/timepps.h>
63#include <sys/timetc.h>
64#include <sys/timex.h>
65#ifndef __rtems__
66#include <sys/vdso.h>
67#endif /* __rtems__ */
68#ifdef __rtems__
69#include <rtems.h>
70ISR_LOCK_DEFINE(static, _Timecounter_Lock, "Timecounter");
71#define hz rtems_clock_get_ticks_per_second()
72#define printf(...)
73#define log(...)
74static inline int
75fls(int x)
76{
77        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
78}
79/* FIXME: https://devel.rtems.org/ticket/2348 */
80#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
81#endif /* __rtems__ */
82
83/*
84 * A large step happens on boot.  This constant detects such steps.
85 * It is relatively small so that ntp_update_second gets called enough
86 * in the typical 'missed a couple of seconds' case, but doesn't loop
87 * forever when the time step is large.
88 */
89#define LARGE_STEP      200
90
91/*
92 * Implement a dummy timecounter which we can use until we get a real one
93 * in the air.  This allows the console and other early stuff to use
94 * time services.
95 */
96
97static uint32_t
98dummy_get_timecount(struct timecounter *tc)
99{
100#ifndef __rtems__
101        static uint32_t now;
102
103        return (++now);
104#else /* __rtems__ */
105        return 0;
106#endif /* __rtems__ */
107}
108
109static struct timecounter dummy_timecounter = {
110        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
111};
112
113struct timehands {
114        /* These fields must be initialized by the driver. */
115        struct timecounter      *th_counter;
116        int64_t                 th_adjustment;
117        uint64_t                th_scale;
118        uint32_t                th_offset_count;
119        struct bintime          th_offset;
120        struct timeval          th_microtime;
121        struct timespec         th_nanotime;
122        /* Fields not to be copied in tc_windup start with th_generation. */
123        volatile uint32_t       th_generation;
124        struct timehands        *th_next;
125};
126
127#if defined(RTEMS_SMP)
128static struct timehands th0;
129static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
130static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
131static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
132static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
133static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
134static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
135static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
136static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
137static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
138#endif
139static struct timehands th0 = {
140        &dummy_timecounter,
141        0,
142        (uint64_t)-1 / 1000000,
143        0,
144        {1, 0},
145        {0, 0},
146        {0, 0},
147        1,
148#if defined(RTEMS_SMP)
149        &th1
150#else
151        &th0
152#endif
153};
154
155static struct timehands *volatile timehands = &th0;
156struct timecounter *timecounter = &dummy_timecounter;
157static struct timecounter *timecounters = &dummy_timecounter;
158
159#ifndef __rtems__
160int tc_min_ticktock_freq = 1;
161#endif /* __rtems__ */
162
163volatile time_t time_second = 1;
164volatile time_t time_uptime = 1;
165
166struct bintime boottimebin;
167#ifndef __rtems__
168struct timeval boottime;
169static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
170SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
171    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
172
173SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
174static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
175
176static int timestepwarnings;
177SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
178    &timestepwarnings, 0, "Log time steps");
179
180struct bintime bt_timethreshold;
181struct bintime bt_tickthreshold;
182sbintime_t sbt_timethreshold;
183sbintime_t sbt_tickthreshold;
184struct bintime tc_tick_bt;
185sbintime_t tc_tick_sbt;
186int tc_precexp;
187int tc_timepercentage = TC_DEFAULTPERC;
188static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
189SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
190    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
191    sysctl_kern_timecounter_adjprecision, "I",
192    "Allowed time interval deviation in percents");
193#endif /* __rtems__ */
194
195static void tc_windup(void);
196#ifndef __rtems__
197static void cpu_tick_calibrate(int);
198#endif /* __rtems__ */
199
200void dtrace_getnanotime(struct timespec *tsp);
201
202#ifndef __rtems__
203static int
204sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
205{
206#ifndef __mips__
207#ifdef SCTL_MASK32
208        int tv[2];
209
210        if (req->flags & SCTL_MASK32) {
211                tv[0] = boottime.tv_sec;
212                tv[1] = boottime.tv_usec;
213                return SYSCTL_OUT(req, tv, sizeof(tv));
214        } else
215#endif
216#endif
217                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
218}
219
220static int
221sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
222{
223        uint32_t ncount;
224        struct timecounter *tc = arg1;
225
226        ncount = tc->tc_get_timecount(tc);
227        return sysctl_handle_int(oidp, &ncount, 0, req);
228}
229
230static int
231sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
232{
233        uint64_t freq;
234        struct timecounter *tc = arg1;
235
236        freq = tc->tc_frequency;
237        return sysctl_handle_64(oidp, &freq, 0, req);
238}
239#endif /* __rtems__ */
240
241/*
242 * Return the difference between the timehands' counter value now and what
243 * was when we copied it to the timehands' offset_count.
244 */
245static __inline uint32_t
246tc_delta(struct timehands *th)
247{
248        struct timecounter *tc;
249
250        tc = th->th_counter;
251        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
252            tc->tc_counter_mask);
253}
254
255/*
256 * Functions for reading the time.  We have to loop until we are sure that
257 * the timehands that we operated on was not updated under our feet.  See
258 * the comment in <sys/time.h> for a description of these 12 functions.
259 */
260
261#ifdef FFCLOCK
262void
263fbclock_binuptime(struct bintime *bt)
264{
265        struct timehands *th;
266        unsigned int gen;
267
268        do {
269                th = timehands;
270                gen = th->th_generation;
271                *bt = th->th_offset;
272                bintime_addx(bt, th->th_scale * tc_delta(th));
273        } while (gen == 0 || gen != th->th_generation);
274}
275
276void
277fbclock_nanouptime(struct timespec *tsp)
278{
279        struct bintime bt;
280
281        fbclock_binuptime(&bt);
282        bintime2timespec(&bt, tsp);
283}
284
285void
286fbclock_microuptime(struct timeval *tvp)
287{
288        struct bintime bt;
289
290        fbclock_binuptime(&bt);
291        bintime2timeval(&bt, tvp);
292}
293
294void
295fbclock_bintime(struct bintime *bt)
296{
297
298        fbclock_binuptime(bt);
299        bintime_add(bt, &boottimebin);
300}
301
302void
303fbclock_nanotime(struct timespec *tsp)
304{
305        struct bintime bt;
306
307        fbclock_bintime(&bt);
308        bintime2timespec(&bt, tsp);
309}
310
311void
312fbclock_microtime(struct timeval *tvp)
313{
314        struct bintime bt;
315
316        fbclock_bintime(&bt);
317        bintime2timeval(&bt, tvp);
318}
319
320void
321fbclock_getbinuptime(struct bintime *bt)
322{
323        struct timehands *th;
324        unsigned int gen;
325
326        do {
327                th = timehands;
328                gen = th->th_generation;
329                *bt = th->th_offset;
330        } while (gen == 0 || gen != th->th_generation);
331}
332
333void
334fbclock_getnanouptime(struct timespec *tsp)
335{
336        struct timehands *th;
337        unsigned int gen;
338
339        do {
340                th = timehands;
341                gen = th->th_generation;
342                bintime2timespec(&th->th_offset, tsp);
343        } while (gen == 0 || gen != th->th_generation);
344}
345
346void
347fbclock_getmicrouptime(struct timeval *tvp)
348{
349        struct timehands *th;
350        unsigned int gen;
351
352        do {
353                th = timehands;
354                gen = th->th_generation;
355                bintime2timeval(&th->th_offset, tvp);
356        } while (gen == 0 || gen != th->th_generation);
357}
358
359void
360fbclock_getbintime(struct bintime *bt)
361{
362        struct timehands *th;
363        unsigned int gen;
364
365        do {
366                th = timehands;
367                gen = th->th_generation;
368                *bt = th->th_offset;
369        } while (gen == 0 || gen != th->th_generation);
370        bintime_add(bt, &boottimebin);
371}
372
373void
374fbclock_getnanotime(struct timespec *tsp)
375{
376        struct timehands *th;
377        unsigned int gen;
378
379        do {
380                th = timehands;
381                gen = th->th_generation;
382                *tsp = th->th_nanotime;
383        } while (gen == 0 || gen != th->th_generation);
384}
385
386void
387fbclock_getmicrotime(struct timeval *tvp)
388{
389        struct timehands *th;
390        unsigned int gen;
391
392        do {
393                th = timehands;
394                gen = th->th_generation;
395                *tvp = th->th_microtime;
396        } while (gen == 0 || gen != th->th_generation);
397}
398#else /* !FFCLOCK */
399void
400binuptime(struct bintime *bt)
401{
402        struct timehands *th;
403        uint32_t gen;
404
405        do {
406                th = timehands;
407                gen = th->th_generation;
408                *bt = th->th_offset;
409                bintime_addx(bt, th->th_scale * tc_delta(th));
410        } while (gen == 0 || gen != th->th_generation);
411}
412
413void
414nanouptime(struct timespec *tsp)
415{
416        struct bintime bt;
417
418        binuptime(&bt);
419        bintime2timespec(&bt, tsp);
420}
421
422void
423microuptime(struct timeval *tvp)
424{
425        struct bintime bt;
426
427        binuptime(&bt);
428        bintime2timeval(&bt, tvp);
429}
430
431void
432bintime(struct bintime *bt)
433{
434
435        binuptime(bt);
436        bintime_add(bt, &boottimebin);
437}
438
439void
440nanotime(struct timespec *tsp)
441{
442        struct bintime bt;
443
444        bintime(&bt);
445        bintime2timespec(&bt, tsp);
446}
447
448void
449microtime(struct timeval *tvp)
450{
451        struct bintime bt;
452
453        bintime(&bt);
454        bintime2timeval(&bt, tvp);
455}
456
457void
458getbinuptime(struct bintime *bt)
459{
460        struct timehands *th;
461        uint32_t gen;
462
463        do {
464                th = timehands;
465                gen = th->th_generation;
466                *bt = th->th_offset;
467        } while (gen == 0 || gen != th->th_generation);
468}
469
470void
471getnanouptime(struct timespec *tsp)
472{
473        struct timehands *th;
474        uint32_t gen;
475
476        do {
477                th = timehands;
478                gen = th->th_generation;
479                bintime2timespec(&th->th_offset, tsp);
480        } while (gen == 0 || gen != th->th_generation);
481}
482
483void
484getmicrouptime(struct timeval *tvp)
485{
486        struct timehands *th;
487        uint32_t gen;
488
489        do {
490                th = timehands;
491                gen = th->th_generation;
492                bintime2timeval(&th->th_offset, tvp);
493        } while (gen == 0 || gen != th->th_generation);
494}
495
496void
497getbintime(struct bintime *bt)
498{
499        struct timehands *th;
500        uint32_t gen;
501
502        do {
503                th = timehands;
504                gen = th->th_generation;
505                *bt = th->th_offset;
506        } while (gen == 0 || gen != th->th_generation);
507        bintime_add(bt, &boottimebin);
508}
509
510void
511getnanotime(struct timespec *tsp)
512{
513        struct timehands *th;
514        uint32_t gen;
515
516        do {
517                th = timehands;
518                gen = th->th_generation;
519                *tsp = th->th_nanotime;
520        } while (gen == 0 || gen != th->th_generation);
521}
522
523void
524getmicrotime(struct timeval *tvp)
525{
526        struct timehands *th;
527        uint32_t gen;
528
529        do {
530                th = timehands;
531                gen = th->th_generation;
532                *tvp = th->th_microtime;
533        } while (gen == 0 || gen != th->th_generation);
534}
535#endif /* FFCLOCK */
536
537#ifdef FFCLOCK
538/*
539 * Support for feed-forward synchronization algorithms. This is heavily inspired
540 * by the timehands mechanism but kept independent from it. *_windup() functions
541 * have some connection to avoid accessing the timecounter hardware more than
542 * necessary.
543 */
544
545/* Feed-forward clock estimates kept updated by the synchronization daemon. */
546struct ffclock_estimate ffclock_estimate;
547struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
548uint32_t ffclock_status;                /* Feed-forward clock status. */
549int8_t ffclock_updated;                 /* New estimates are available. */
550struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
551
552struct fftimehands {
553        struct ffclock_estimate cest;
554        struct bintime          tick_time;
555        struct bintime          tick_time_lerp;
556        ffcounter               tick_ffcount;
557        uint64_t                period_lerp;
558        volatile uint8_t        gen;
559        struct fftimehands      *next;
560};
561
562#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
563
564static struct fftimehands ffth[10];
565static struct fftimehands *volatile fftimehands = ffth;
566
567static void
568ffclock_init(void)
569{
570        struct fftimehands *cur;
571        struct fftimehands *last;
572
573        memset(ffth, 0, sizeof(ffth));
574
575        last = ffth + NUM_ELEMENTS(ffth) - 1;
576        for (cur = ffth; cur < last; cur++)
577                cur->next = cur + 1;
578        last->next = ffth;
579
580        ffclock_updated = 0;
581        ffclock_status = FFCLOCK_STA_UNSYNC;
582        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
583}
584
585/*
586 * Reset the feed-forward clock estimates. Called from inittodr() to get things
587 * kick started and uses the timecounter nominal frequency as a first period
588 * estimate. Note: this function may be called several time just after boot.
589 * Note: this is the only function that sets the value of boot time for the
590 * monotonic (i.e. uptime) version of the feed-forward clock.
591 */
592void
593ffclock_reset_clock(struct timespec *ts)
594{
595        struct timecounter *tc;
596        struct ffclock_estimate cest;
597
598        tc = timehands->th_counter;
599        memset(&cest, 0, sizeof(struct ffclock_estimate));
600
601        timespec2bintime(ts, &ffclock_boottime);
602        timespec2bintime(ts, &(cest.update_time));
603        ffclock_read_counter(&cest.update_ffcount);
604        cest.leapsec_next = 0;
605        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
606        cest.errb_abs = 0;
607        cest.errb_rate = 0;
608        cest.status = FFCLOCK_STA_UNSYNC;
609        cest.leapsec_total = 0;
610        cest.leapsec = 0;
611
612        mtx_lock(&ffclock_mtx);
613        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
614        ffclock_updated = INT8_MAX;
615        mtx_unlock(&ffclock_mtx);
616
617        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
618            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
619            (unsigned long)ts->tv_nsec);
620}
621
622/*
623 * Sub-routine to convert a time interval measured in RAW counter units to time
624 * in seconds stored in bintime format.
625 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
626 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
627 * extra cycles.
628 */
629static void
630ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
631{
632        struct bintime bt2;
633        ffcounter delta, delta_max;
634
635        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
636        bintime_clear(bt);
637        do {
638                if (ffdelta > delta_max)
639                        delta = delta_max;
640                else
641                        delta = ffdelta;
642                bt2.sec = 0;
643                bt2.frac = period;
644                bintime_mul(&bt2, (unsigned int)delta);
645                bintime_add(bt, &bt2);
646                ffdelta -= delta;
647        } while (ffdelta > 0);
648}
649
650/*
651 * Update the fftimehands.
652 * Push the tick ffcount and time(s) forward based on current clock estimate.
653 * The conversion from ffcounter to bintime relies on the difference clock
654 * principle, whose accuracy relies on computing small time intervals. If a new
655 * clock estimate has been passed by the synchronisation daemon, make it
656 * current, and compute the linear interpolation for monotonic time if needed.
657 */
658static void
659ffclock_windup(unsigned int delta)
660{
661        struct ffclock_estimate *cest;
662        struct fftimehands *ffth;
663        struct bintime bt, gap_lerp;
664        ffcounter ffdelta;
665        uint64_t frac;
666        unsigned int polling;
667        uint8_t forward_jump, ogen;
668
669        /*
670         * Pick the next timehand, copy current ffclock estimates and move tick
671         * times and counter forward.
672         */
673        forward_jump = 0;
674        ffth = fftimehands->next;
675        ogen = ffth->gen;
676        ffth->gen = 0;
677        cest = &ffth->cest;
678        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
679        ffdelta = (ffcounter)delta;
680        ffth->period_lerp = fftimehands->period_lerp;
681
682        ffth->tick_time = fftimehands->tick_time;
683        ffclock_convert_delta(ffdelta, cest->period, &bt);
684        bintime_add(&ffth->tick_time, &bt);
685
686        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
687        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
688        bintime_add(&ffth->tick_time_lerp, &bt);
689
690        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
691
692        /*
693         * Assess the status of the clock, if the last update is too old, it is
694         * likely the synchronisation daemon is dead and the clock is free
695         * running.
696         */
697        if (ffclock_updated == 0) {
698                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
699                ffclock_convert_delta(ffdelta, cest->period, &bt);
700                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
701                        ffclock_status |= FFCLOCK_STA_UNSYNC;
702        }
703
704        /*
705         * If available, grab updated clock estimates and make them current.
706         * Recompute time at this tick using the updated estimates. The clock
707         * estimates passed the feed-forward synchronisation daemon may result
708         * in time conversion that is not monotonically increasing (just after
709         * the update). time_lerp is a particular linear interpolation over the
710         * synchronisation algo polling period that ensures monotonicity for the
711         * clock ids requesting it.
712         */
713        if (ffclock_updated > 0) {
714                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
715                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
716                ffth->tick_time = cest->update_time;
717                ffclock_convert_delta(ffdelta, cest->period, &bt);
718                bintime_add(&ffth->tick_time, &bt);
719
720                /* ffclock_reset sets ffclock_updated to INT8_MAX */
721                if (ffclock_updated == INT8_MAX)
722                        ffth->tick_time_lerp = ffth->tick_time;
723
724                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
725                        forward_jump = 1;
726                else
727                        forward_jump = 0;
728
729                bintime_clear(&gap_lerp);
730                if (forward_jump) {
731                        gap_lerp = ffth->tick_time;
732                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
733                } else {
734                        gap_lerp = ffth->tick_time_lerp;
735                        bintime_sub(&gap_lerp, &ffth->tick_time);
736                }
737
738                /*
739                 * The reset from the RTC clock may be far from accurate, and
740                 * reducing the gap between real time and interpolated time
741                 * could take a very long time if the interpolated clock insists
742                 * on strict monotonicity. The clock is reset under very strict
743                 * conditions (kernel time is known to be wrong and
744                 * synchronization daemon has been restarted recently.
745                 * ffclock_boottime absorbs the jump to ensure boot time is
746                 * correct and uptime functions stay consistent.
747                 */
748                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
749                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
750                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
751                        if (forward_jump)
752                                bintime_add(&ffclock_boottime, &gap_lerp);
753                        else
754                                bintime_sub(&ffclock_boottime, &gap_lerp);
755                        ffth->tick_time_lerp = ffth->tick_time;
756                        bintime_clear(&gap_lerp);
757                }
758
759                ffclock_status = cest->status;
760                ffth->period_lerp = cest->period;
761
762                /*
763                 * Compute corrected period used for the linear interpolation of
764                 * time. The rate of linear interpolation is capped to 5000PPM
765                 * (5ms/s).
766                 */
767                if (bintime_isset(&gap_lerp)) {
768                        ffdelta = cest->update_ffcount;
769                        ffdelta -= fftimehands->cest.update_ffcount;
770                        ffclock_convert_delta(ffdelta, cest->period, &bt);
771                        polling = bt.sec;
772                        bt.sec = 0;
773                        bt.frac = 5000000 * (uint64_t)18446744073LL;
774                        bintime_mul(&bt, polling);
775                        if (bintime_cmp(&gap_lerp, &bt, >))
776                                gap_lerp = bt;
777
778                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
779                        frac = 0;
780                        if (gap_lerp.sec > 0) {
781                                frac -= 1;
782                                frac /= ffdelta / gap_lerp.sec;
783                        }
784                        frac += gap_lerp.frac / ffdelta;
785
786                        if (forward_jump)
787                                ffth->period_lerp += frac;
788                        else
789                                ffth->period_lerp -= frac;
790                }
791
792                ffclock_updated = 0;
793        }
794        if (++ogen == 0)
795                ogen = 1;
796        ffth->gen = ogen;
797        fftimehands = ffth;
798}
799
800/*
801 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
802 * the old and new hardware counter cannot be read simultaneously. tc_windup()
803 * does read the two counters 'back to back', but a few cycles are effectively
804 * lost, and not accumulated in tick_ffcount. This is a fairly radical
805 * operation for a feed-forward synchronization daemon, and it is its job to not
806 * pushing irrelevant data to the kernel. Because there is no locking here,
807 * simply force to ignore pending or next update to give daemon a chance to
808 * realize the counter has changed.
809 */
810static void
811ffclock_change_tc(struct timehands *th)
812{
813        struct fftimehands *ffth;
814        struct ffclock_estimate *cest;
815        struct timecounter *tc;
816        uint8_t ogen;
817
818        tc = th->th_counter;
819        ffth = fftimehands->next;
820        ogen = ffth->gen;
821        ffth->gen = 0;
822
823        cest = &ffth->cest;
824        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
825        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
826        cest->errb_abs = 0;
827        cest->errb_rate = 0;
828        cest->status |= FFCLOCK_STA_UNSYNC;
829
830        ffth->tick_ffcount = fftimehands->tick_ffcount;
831        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
832        ffth->tick_time = fftimehands->tick_time;
833        ffth->period_lerp = cest->period;
834
835        /* Do not lock but ignore next update from synchronization daemon. */
836        ffclock_updated--;
837
838        if (++ogen == 0)
839                ogen = 1;
840        ffth->gen = ogen;
841        fftimehands = ffth;
842}
843
844/*
845 * Retrieve feed-forward counter and time of last kernel tick.
846 */
847void
848ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
849{
850        struct fftimehands *ffth;
851        uint8_t gen;
852
853        /*
854         * No locking but check generation has not changed. Also need to make
855         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
856         */
857        do {
858                ffth = fftimehands;
859                gen = ffth->gen;
860                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
861                        *bt = ffth->tick_time_lerp;
862                else
863                        *bt = ffth->tick_time;
864                *ffcount = ffth->tick_ffcount;
865        } while (gen == 0 || gen != ffth->gen);
866}
867
868/*
869 * Absolute clock conversion. Low level function to convert ffcounter to
870 * bintime. The ffcounter is converted using the current ffclock period estimate
871 * or the "interpolated period" to ensure monotonicity.
872 * NOTE: this conversion may have been deferred, and the clock updated since the
873 * hardware counter has been read.
874 */
875void
876ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
877{
878        struct fftimehands *ffth;
879        struct bintime bt2;
880        ffcounter ffdelta;
881        uint8_t gen;
882
883        /*
884         * No locking but check generation has not changed. Also need to make
885         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
886         */
887        do {
888                ffth = fftimehands;
889                gen = ffth->gen;
890                if (ffcount > ffth->tick_ffcount)
891                        ffdelta = ffcount - ffth->tick_ffcount;
892                else
893                        ffdelta = ffth->tick_ffcount - ffcount;
894
895                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
896                        *bt = ffth->tick_time_lerp;
897                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
898                } else {
899                        *bt = ffth->tick_time;
900                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
901                }
902
903                if (ffcount > ffth->tick_ffcount)
904                        bintime_add(bt, &bt2);
905                else
906                        bintime_sub(bt, &bt2);
907        } while (gen == 0 || gen != ffth->gen);
908}
909
910/*
911 * Difference clock conversion.
912 * Low level function to Convert a time interval measured in RAW counter units
913 * into bintime. The difference clock allows measuring small intervals much more
914 * reliably than the absolute clock.
915 */
916void
917ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
918{
919        struct fftimehands *ffth;
920        uint8_t gen;
921
922        /* No locking but check generation has not changed. */
923        do {
924                ffth = fftimehands;
925                gen = ffth->gen;
926                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
927        } while (gen == 0 || gen != ffth->gen);
928}
929
930/*
931 * Access to current ffcounter value.
932 */
933void
934ffclock_read_counter(ffcounter *ffcount)
935{
936        struct timehands *th;
937        struct fftimehands *ffth;
938        unsigned int gen, delta;
939
940        /*
941         * ffclock_windup() called from tc_windup(), safe to rely on
942         * th->th_generation only, for correct delta and ffcounter.
943         */
944        do {
945                th = timehands;
946                gen = th->th_generation;
947                ffth = fftimehands;
948                delta = tc_delta(th);
949                *ffcount = ffth->tick_ffcount;
950        } while (gen == 0 || gen != th->th_generation);
951
952        *ffcount += delta;
953}
954
955void
956binuptime(struct bintime *bt)
957{
958
959        binuptime_fromclock(bt, sysclock_active);
960}
961
962void
963nanouptime(struct timespec *tsp)
964{
965
966        nanouptime_fromclock(tsp, sysclock_active);
967}
968
969void
970microuptime(struct timeval *tvp)
971{
972
973        microuptime_fromclock(tvp, sysclock_active);
974}
975
976void
977bintime(struct bintime *bt)
978{
979
980        bintime_fromclock(bt, sysclock_active);
981}
982
983void
984nanotime(struct timespec *tsp)
985{
986
987        nanotime_fromclock(tsp, sysclock_active);
988}
989
990void
991microtime(struct timeval *tvp)
992{
993
994        microtime_fromclock(tvp, sysclock_active);
995}
996
997void
998getbinuptime(struct bintime *bt)
999{
1000
1001        getbinuptime_fromclock(bt, sysclock_active);
1002}
1003
1004void
1005getnanouptime(struct timespec *tsp)
1006{
1007
1008        getnanouptime_fromclock(tsp, sysclock_active);
1009}
1010
1011void
1012getmicrouptime(struct timeval *tvp)
1013{
1014
1015        getmicrouptime_fromclock(tvp, sysclock_active);
1016}
1017
1018void
1019getbintime(struct bintime *bt)
1020{
1021
1022        getbintime_fromclock(bt, sysclock_active);
1023}
1024
1025void
1026getnanotime(struct timespec *tsp)
1027{
1028
1029        getnanotime_fromclock(tsp, sysclock_active);
1030}
1031
1032void
1033getmicrotime(struct timeval *tvp)
1034{
1035
1036        getmicrouptime_fromclock(tvp, sysclock_active);
1037}
1038
1039#endif /* FFCLOCK */
1040
1041#ifndef __rtems__
1042/*
1043 * This is a clone of getnanotime and used for walltimestamps.
1044 * The dtrace_ prefix prevents fbt from creating probes for
1045 * it so walltimestamp can be safely used in all fbt probes.
1046 */
1047void
1048dtrace_getnanotime(struct timespec *tsp)
1049{
1050        struct timehands *th;
1051        uint32_t gen;
1052
1053        do {
1054                th = timehands;
1055                gen = th->th_generation;
1056                *tsp = th->th_nanotime;
1057        } while (gen == 0 || gen != th->th_generation);
1058}
1059#endif /* __rtems__ */
1060
1061#ifdef FFCLOCK
1062/*
1063 * System clock currently providing time to the system. Modifiable via sysctl
1064 * when the FFCLOCK option is defined.
1065 */
1066int sysclock_active = SYSCLOCK_FBCK;
1067#endif
1068
1069/* Internal NTP status and error estimates. */
1070extern int time_status;
1071extern long time_esterror;
1072
1073#ifndef __rtems__
1074/*
1075 * Take a snapshot of sysclock data which can be used to compare system clocks
1076 * and generate timestamps after the fact.
1077 */
1078void
1079sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1080{
1081        struct fbclock_info *fbi;
1082        struct timehands *th;
1083        struct bintime bt;
1084        unsigned int delta, gen;
1085#ifdef FFCLOCK
1086        ffcounter ffcount;
1087        struct fftimehands *ffth;
1088        struct ffclock_info *ffi;
1089        struct ffclock_estimate cest;
1090
1091        ffi = &clock_snap->ff_info;
1092#endif
1093
1094        fbi = &clock_snap->fb_info;
1095        delta = 0;
1096
1097        do {
1098                th = timehands;
1099                gen = th->th_generation;
1100                fbi->th_scale = th->th_scale;
1101                fbi->tick_time = th->th_offset;
1102#ifdef FFCLOCK
1103                ffth = fftimehands;
1104                ffi->tick_time = ffth->tick_time_lerp;
1105                ffi->tick_time_lerp = ffth->tick_time_lerp;
1106                ffi->period = ffth->cest.period;
1107                ffi->period_lerp = ffth->period_lerp;
1108                clock_snap->ffcount = ffth->tick_ffcount;
1109                cest = ffth->cest;
1110#endif
1111                if (!fast)
1112                        delta = tc_delta(th);
1113        } while (gen == 0 || gen != th->th_generation);
1114
1115        clock_snap->delta = delta;
1116#ifdef FFCLOCK
1117        clock_snap->sysclock_active = sysclock_active;
1118#endif
1119
1120        /* Record feedback clock status and error. */
1121        clock_snap->fb_info.status = time_status;
1122        /* XXX: Very crude estimate of feedback clock error. */
1123        bt.sec = time_esterror / 1000000;
1124        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1125            (uint64_t)18446744073709ULL;
1126        clock_snap->fb_info.error = bt;
1127
1128#ifdef FFCLOCK
1129        if (!fast)
1130                clock_snap->ffcount += delta;
1131
1132        /* Record feed-forward clock leap second adjustment. */
1133        ffi->leapsec_adjustment = cest.leapsec_total;
1134        if (clock_snap->ffcount > cest.leapsec_next)
1135                ffi->leapsec_adjustment -= cest.leapsec;
1136
1137        /* Record feed-forward clock status and error. */
1138        clock_snap->ff_info.status = cest.status;
1139        ffcount = clock_snap->ffcount - cest.update_ffcount;
1140        ffclock_convert_delta(ffcount, cest.period, &bt);
1141        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1142        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1143        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1144        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1145        clock_snap->ff_info.error = bt;
1146#endif
1147}
1148
1149/*
1150 * Convert a sysclock snapshot into a struct bintime based on the specified
1151 * clock source and flags.
1152 */
1153int
1154sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1155    int whichclock, uint32_t flags)
1156{
1157#ifdef FFCLOCK
1158        struct bintime bt2;
1159        uint64_t period;
1160#endif
1161
1162        switch (whichclock) {
1163        case SYSCLOCK_FBCK:
1164                *bt = cs->fb_info.tick_time;
1165
1166                /* If snapshot was created with !fast, delta will be >0. */
1167                if (cs->delta > 0)
1168                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1169
1170                if ((flags & FBCLOCK_UPTIME) == 0)
1171                        bintime_add(bt, &boottimebin);
1172                break;
1173#ifdef FFCLOCK
1174        case SYSCLOCK_FFWD:
1175                if (flags & FFCLOCK_LERP) {
1176                        *bt = cs->ff_info.tick_time_lerp;
1177                        period = cs->ff_info.period_lerp;
1178                } else {
1179                        *bt = cs->ff_info.tick_time;
1180                        period = cs->ff_info.period;
1181                }
1182
1183                /* If snapshot was created with !fast, delta will be >0. */
1184                if (cs->delta > 0) {
1185                        ffclock_convert_delta(cs->delta, period, &bt2);
1186                        bintime_add(bt, &bt2);
1187                }
1188
1189                /* Leap second adjustment. */
1190                if (flags & FFCLOCK_LEAPSEC)
1191                        bt->sec -= cs->ff_info.leapsec_adjustment;
1192
1193                /* Boot time adjustment, for uptime/monotonic clocks. */
1194                if (flags & FFCLOCK_UPTIME)
1195                        bintime_sub(bt, &ffclock_boottime);
1196                break;
1197#endif
1198        default:
1199                return (EINVAL);
1200                break;
1201        }
1202
1203        return (0);
1204}
1205#endif /* __rtems__ */
1206
1207/*
1208 * Initialize a new timecounter and possibly use it.
1209 */
1210void
1211tc_init(struct timecounter *tc)
1212{
1213#ifndef __rtems__
1214        uint32_t u;
1215        struct sysctl_oid *tc_root;
1216
1217        u = tc->tc_frequency / tc->tc_counter_mask;
1218        /* XXX: We need some margin here, 10% is a guess */
1219        u *= 11;
1220        u /= 10;
1221        if (u > hz && tc->tc_quality >= 0) {
1222                tc->tc_quality = -2000;
1223                if (bootverbose) {
1224                        printf("Timecounter \"%s\" frequency %ju Hz",
1225                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1226                        printf(" -- Insufficient hz, needs at least %u\n", u);
1227                }
1228        } else if (tc->tc_quality >= 0 || bootverbose) {
1229                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1230                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1231                    tc->tc_quality);
1232        }
1233#endif /* __rtems__ */
1234
1235        tc->tc_next = timecounters;
1236        timecounters = tc;
1237#ifndef __rtems__
1238        /*
1239         * Set up sysctl tree for this counter.
1240         */
1241        tc_root = SYSCTL_ADD_NODE(NULL,
1242            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1243            CTLFLAG_RW, 0, "timecounter description");
1244        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1245            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1246            "mask for implemented bits");
1247        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1248            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1249            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1250        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1251            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1252             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1253        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1254            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1255            "goodness of time counter");
1256        /*
1257         * Never automatically use a timecounter with negative quality.
1258         * Even though we run on the dummy counter, switching here may be
1259         * worse since this timecounter may not be monotonous.
1260         */
1261        if (tc->tc_quality < 0)
1262                return;
1263        if (tc->tc_quality < timecounter->tc_quality)
1264                return;
1265        if (tc->tc_quality == timecounter->tc_quality &&
1266            tc->tc_frequency < timecounter->tc_frequency)
1267                return;
1268#endif /* __rtems__ */
1269        (void)tc->tc_get_timecount(tc);
1270        (void)tc->tc_get_timecount(tc);
1271        timecounter = tc;
1272#ifdef __rtems__
1273        tc_windup();
1274#endif /* __rtems__ */
1275}
1276
1277#ifndef __rtems__
1278/* Report the frequency of the current timecounter. */
1279uint64_t
1280tc_getfrequency(void)
1281{
1282
1283        return (timehands->th_counter->tc_frequency);
1284}
1285#endif /* __rtems__ */
1286
1287/*
1288 * Step our concept of UTC.  This is done by modifying our estimate of
1289 * when we booted.
1290 * XXX: not locked.
1291 */
1292#ifndef __rtems__
1293void
1294tc_setclock(struct timespec *ts)
1295#else /* __rtems__ */
1296void
1297_Timecounter_Set_clock(const struct timespec *ts)
1298#endif /* __rtems__ */
1299{
1300#ifndef __rtems__
1301        struct timespec tbef, taft;
1302#endif /* __rtems__ */
1303        struct bintime bt, bt2;
1304
1305#ifndef __rtems__
1306        cpu_tick_calibrate(1);
1307        nanotime(&tbef);
1308#endif /* __rtems__ */
1309        timespec2bintime(ts, &bt);
1310        binuptime(&bt2);
1311        bintime_sub(&bt, &bt2);
1312        bintime_add(&bt2, &boottimebin);
1313        boottimebin = bt;
1314#ifndef __rtems__
1315        bintime2timeval(&bt, &boottime);
1316#endif /* __rtems__ */
1317
1318        /* XXX fiddle all the little crinkly bits around the fiords... */
1319        tc_windup();
1320#ifndef __rtems__
1321        nanotime(&taft);
1322        if (timestepwarnings) {
1323                log(LOG_INFO,
1324                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1325                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1326                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1327                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1328        }
1329        cpu_tick_calibrate(1);
1330#endif /* __rtems__ */
1331}
1332
1333/*
1334 * Initialize the next struct timehands in the ring and make
1335 * it the active timehands.  Along the way we might switch to a different
1336 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1337 */
1338static void
1339tc_windup(void)
1340{
1341        struct bintime bt;
1342        struct timehands *th, *tho;
1343        uint64_t scale;
1344        uint32_t delta, ncount, ogen;
1345        int i;
1346        time_t t;
1347#ifdef __rtems__
1348        ISR_lock_Context lock_context;
1349
1350        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1351#endif /* __rtems__ */
1352
1353        /*
1354         * Make the next timehands a copy of the current one, but do not
1355         * overwrite the generation or next pointer.  While we update
1356         * the contents, the generation must be zero.
1357         */
1358        tho = timehands;
1359        th = tho->th_next;
1360        ogen = th->th_generation;
1361        th->th_generation = 0;
1362        bcopy(tho, th, offsetof(struct timehands, th_generation));
1363
1364        /*
1365         * Capture a timecounter delta on the current timecounter and if
1366         * changing timecounters, a counter value from the new timecounter.
1367         * Update the offset fields accordingly.
1368         */
1369        delta = tc_delta(th);
1370        if (th->th_counter != timecounter)
1371                ncount = timecounter->tc_get_timecount(timecounter);
1372        else
1373                ncount = 0;
1374#ifdef FFCLOCK
1375        ffclock_windup(delta);
1376#endif
1377        th->th_offset_count += delta;
1378        th->th_offset_count &= th->th_counter->tc_counter_mask;
1379        while (delta > th->th_counter->tc_frequency) {
1380                /* Eat complete unadjusted seconds. */
1381                delta -= th->th_counter->tc_frequency;
1382                th->th_offset.sec++;
1383        }
1384        if ((delta > th->th_counter->tc_frequency / 2) &&
1385            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1386                /* The product th_scale * delta just barely overflows. */
1387                th->th_offset.sec++;
1388        }
1389        bintime_addx(&th->th_offset, th->th_scale * delta);
1390
1391        /*
1392         * Hardware latching timecounters may not generate interrupts on
1393         * PPS events, so instead we poll them.  There is a finite risk that
1394         * the hardware might capture a count which is later than the one we
1395         * got above, and therefore possibly in the next NTP second which might
1396         * have a different rate than the current NTP second.  It doesn't
1397         * matter in practice.
1398         */
1399        if (tho->th_counter->tc_poll_pps)
1400                tho->th_counter->tc_poll_pps(tho->th_counter);
1401
1402        /*
1403         * Deal with NTP second processing.  The for loop normally
1404         * iterates at most once, but in extreme situations it might
1405         * keep NTP sane if timeouts are not run for several seconds.
1406         * At boot, the time step can be large when the TOD hardware
1407         * has been read, so on really large steps, we call
1408         * ntp_update_second only twice.  We need to call it twice in
1409         * case we missed a leap second.
1410         */
1411        bt = th->th_offset;
1412        bintime_add(&bt, &boottimebin);
1413        i = bt.sec - tho->th_microtime.tv_sec;
1414        if (i > LARGE_STEP)
1415                i = 2;
1416        for (; i > 0; i--) {
1417                t = bt.sec;
1418                ntp_update_second(&th->th_adjustment, &bt.sec);
1419                if (bt.sec != t)
1420                        boottimebin.sec += bt.sec - t;
1421        }
1422        /* Update the UTC timestamps used by the get*() functions. */
1423        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1424        bintime2timeval(&bt, &th->th_microtime);
1425        bintime2timespec(&bt, &th->th_nanotime);
1426
1427        /* Now is a good time to change timecounters. */
1428        if (th->th_counter != timecounter) {
1429#ifndef __rtems__
1430#ifndef __arm__
1431                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1432                        cpu_disable_c2_sleep++;
1433                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1434                        cpu_disable_c2_sleep--;
1435#endif
1436#endif /* __rtems__ */
1437                th->th_counter = timecounter;
1438                th->th_offset_count = ncount;
1439#ifndef __rtems__
1440                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1441                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1442#endif /* __rtems__ */
1443#ifdef FFCLOCK
1444                ffclock_change_tc(th);
1445#endif
1446        }
1447
1448        /*-
1449         * Recalculate the scaling factor.  We want the number of 1/2^64
1450         * fractions of a second per period of the hardware counter, taking
1451         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1452         * processing provides us with.
1453         *
1454         * The th_adjustment is nanoseconds per second with 32 bit binary
1455         * fraction and we want 64 bit binary fraction of second:
1456         *
1457         *       x = a * 2^32 / 10^9 = a * 4.294967296
1458         *
1459         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1460         * we can only multiply by about 850 without overflowing, that
1461         * leaves no suitably precise fractions for multiply before divide.
1462         *
1463         * Divide before multiply with a fraction of 2199/512 results in a
1464         * systematic undercompensation of 10PPM of th_adjustment.  On a
1465         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1466         *
1467         * We happily sacrifice the lowest of the 64 bits of our result
1468         * to the goddess of code clarity.
1469         *
1470         */
1471        scale = (uint64_t)1 << 63;
1472        scale += (th->th_adjustment / 1024) * 2199;
1473        scale /= th->th_counter->tc_frequency;
1474        th->th_scale = scale * 2;
1475
1476        /*
1477         * Now that the struct timehands is again consistent, set the new
1478         * generation number, making sure to not make it zero.
1479         */
1480        if (++ogen == 0)
1481                ogen = 1;
1482        th->th_generation = ogen;
1483
1484        /* Go live with the new struct timehands. */
1485#ifdef FFCLOCK
1486        switch (sysclock_active) {
1487        case SYSCLOCK_FBCK:
1488#endif
1489                time_second = th->th_microtime.tv_sec;
1490                time_uptime = th->th_offset.sec;
1491#ifdef FFCLOCK
1492                break;
1493        case SYSCLOCK_FFWD:
1494                time_second = fftimehands->tick_time_lerp.sec;
1495                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1496                break;
1497        }
1498#endif
1499
1500        timehands = th;
1501#ifndef __rtems__
1502        timekeep_push_vdso();
1503#endif /* __rtems__ */
1504#ifdef __rtems__
1505        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
1506#endif /* __rtems__ */
1507}
1508
1509#ifndef __rtems__
1510/* Report or change the active timecounter hardware. */
1511static int
1512sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1513{
1514        char newname[32];
1515        struct timecounter *newtc, *tc;
1516        int error;
1517
1518        tc = timecounter;
1519        strlcpy(newname, tc->tc_name, sizeof(newname));
1520
1521        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1522        if (error != 0 || req->newptr == NULL ||
1523            strcmp(newname, tc->tc_name) == 0)
1524                return (error);
1525        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1526                if (strcmp(newname, newtc->tc_name) != 0)
1527                        continue;
1528
1529                /* Warm up new timecounter. */
1530                (void)newtc->tc_get_timecount(newtc);
1531                (void)newtc->tc_get_timecount(newtc);
1532
1533                timecounter = newtc;
1534
1535                /*
1536                 * The vdso timehands update is deferred until the next
1537                 * 'tc_windup()'.
1538                 *
1539                 * This is prudent given that 'timekeep_push_vdso()' does not
1540                 * use any locking and that it can be called in hard interrupt
1541                 * context via 'tc_windup()'.
1542                 */
1543                return (0);
1544        }
1545        return (EINVAL);
1546}
1547
1548SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1549    0, 0, sysctl_kern_timecounter_hardware, "A",
1550    "Timecounter hardware selected");
1551
1552
1553/* Report or change the active timecounter hardware. */
1554static int
1555sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1556{
1557        char buf[32], *spc;
1558        struct timecounter *tc;
1559        int error;
1560
1561        spc = "";
1562        error = 0;
1563        for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
1564                sprintf(buf, "%s%s(%d)",
1565                    spc, tc->tc_name, tc->tc_quality);
1566                error = SYSCTL_OUT(req, buf, strlen(buf));
1567                spc = " ";
1568        }
1569        return (error);
1570}
1571
1572SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1573    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1574#endif /* __rtems__ */
1575
1576#ifndef __rtems__
1577/*
1578 * RFC 2783 PPS-API implementation.
1579 */
1580
1581static int
1582pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1583{
1584        int err, timo;
1585        pps_seq_t aseq, cseq;
1586        struct timeval tv;
1587
1588        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1589                return (EINVAL);
1590
1591        /*
1592         * If no timeout is requested, immediately return whatever values were
1593         * most recently captured.  If timeout seconds is -1, that's a request
1594         * to block without a timeout.  WITNESS won't let us sleep forever
1595         * without a lock (we really don't need a lock), so just repeatedly
1596         * sleep a long time.
1597         */
1598        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1599                if (fapi->timeout.tv_sec == -1)
1600                        timo = 0x7fffffff;
1601                else {
1602                        tv.tv_sec = fapi->timeout.tv_sec;
1603                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1604                        timo = tvtohz(&tv);
1605                }
1606                aseq = pps->ppsinfo.assert_sequence;
1607                cseq = pps->ppsinfo.clear_sequence;
1608                while (aseq == pps->ppsinfo.assert_sequence &&
1609                    cseq == pps->ppsinfo.clear_sequence) {
1610                        err = tsleep(pps, PCATCH, "ppsfch", timo);
1611                        if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) {
1612                                continue;
1613                        } else if (err != 0) {
1614                                return (err);
1615                        }
1616                }
1617        }
1618
1619        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1620        fapi->pps_info_buf = pps->ppsinfo;
1621
1622        return (0);
1623}
1624
1625int
1626pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1627{
1628        pps_params_t *app;
1629        struct pps_fetch_args *fapi;
1630#ifdef FFCLOCK
1631        struct pps_fetch_ffc_args *fapi_ffc;
1632#endif
1633#ifdef PPS_SYNC
1634        struct pps_kcbind_args *kapi;
1635#endif
1636
1637        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1638        switch (cmd) {
1639        case PPS_IOC_CREATE:
1640                return (0);
1641        case PPS_IOC_DESTROY:
1642                return (0);
1643        case PPS_IOC_SETPARAMS:
1644                app = (pps_params_t *)data;
1645                if (app->mode & ~pps->ppscap)
1646                        return (EINVAL);
1647#ifdef FFCLOCK
1648                /* Ensure only a single clock is selected for ffc timestamp. */
1649                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1650                        return (EINVAL);
1651#endif
1652                pps->ppsparam = *app;
1653                return (0);
1654        case PPS_IOC_GETPARAMS:
1655                app = (pps_params_t *)data;
1656                *app = pps->ppsparam;
1657                app->api_version = PPS_API_VERS_1;
1658                return (0);
1659        case PPS_IOC_GETCAP:
1660                *(int*)data = pps->ppscap;
1661                return (0);
1662        case PPS_IOC_FETCH:
1663                fapi = (struct pps_fetch_args *)data;
1664                return (pps_fetch(fapi, pps));
1665#ifdef FFCLOCK
1666        case PPS_IOC_FETCH_FFCOUNTER:
1667                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1668                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1669                    PPS_TSFMT_TSPEC)
1670                        return (EINVAL);
1671                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1672                        return (EOPNOTSUPP);
1673                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1674                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1675                /* Overwrite timestamps if feedback clock selected. */
1676                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1677                case PPS_TSCLK_FBCK:
1678                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1679                            pps->ppsinfo.assert_timestamp;
1680                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1681                            pps->ppsinfo.clear_timestamp;
1682                        break;
1683                case PPS_TSCLK_FFWD:
1684                        break;
1685                default:
1686                        break;
1687                }
1688                return (0);
1689#endif /* FFCLOCK */
1690        case PPS_IOC_KCBIND:
1691#ifdef PPS_SYNC
1692                kapi = (struct pps_kcbind_args *)data;
1693                /* XXX Only root should be able to do this */
1694                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1695                        return (EINVAL);
1696                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1697                        return (EINVAL);
1698                if (kapi->edge & ~pps->ppscap)
1699                        return (EINVAL);
1700                pps->kcmode = kapi->edge;
1701                return (0);
1702#else
1703                return (EOPNOTSUPP);
1704#endif
1705        default:
1706                return (ENOIOCTL);
1707        }
1708}
1709
1710void
1711pps_init(struct pps_state *pps)
1712{
1713        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1714        if (pps->ppscap & PPS_CAPTUREASSERT)
1715                pps->ppscap |= PPS_OFFSETASSERT;
1716        if (pps->ppscap & PPS_CAPTURECLEAR)
1717                pps->ppscap |= PPS_OFFSETCLEAR;
1718#ifdef FFCLOCK
1719        pps->ppscap |= PPS_TSCLK_MASK;
1720#endif
1721}
1722
1723void
1724pps_capture(struct pps_state *pps)
1725{
1726        struct timehands *th;
1727
1728        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1729        th = timehands;
1730        pps->capgen = th->th_generation;
1731        pps->capth = th;
1732#ifdef FFCLOCK
1733        pps->capffth = fftimehands;
1734#endif
1735        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1736        if (pps->capgen != th->th_generation)
1737                pps->capgen = 0;
1738}
1739
1740void
1741pps_event(struct pps_state *pps, int event)
1742{
1743        struct bintime bt;
1744        struct timespec ts, *tsp, *osp;
1745        uint32_t tcount, *pcount;
1746        int foff, fhard;
1747        pps_seq_t *pseq;
1748#ifdef FFCLOCK
1749        struct timespec *tsp_ffc;
1750        pps_seq_t *pseq_ffc;
1751        ffcounter *ffcount;
1752#endif
1753
1754        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1755        /* If the timecounter was wound up underneath us, bail out. */
1756        if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
1757                return;
1758
1759        /* Things would be easier with arrays. */
1760        if (event == PPS_CAPTUREASSERT) {
1761                tsp = &pps->ppsinfo.assert_timestamp;
1762                osp = &pps->ppsparam.assert_offset;
1763                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1764                fhard = pps->kcmode & PPS_CAPTUREASSERT;
1765                pcount = &pps->ppscount[0];
1766                pseq = &pps->ppsinfo.assert_sequence;
1767#ifdef FFCLOCK
1768                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1769                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1770                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1771#endif
1772        } else {
1773                tsp = &pps->ppsinfo.clear_timestamp;
1774                osp = &pps->ppsparam.clear_offset;
1775                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1776                fhard = pps->kcmode & PPS_CAPTURECLEAR;
1777                pcount = &pps->ppscount[1];
1778                pseq = &pps->ppsinfo.clear_sequence;
1779#ifdef FFCLOCK
1780                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1781                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1782                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1783#endif
1784        }
1785
1786        /*
1787         * If the timecounter changed, we cannot compare the count values, so
1788         * we have to drop the rest of the PPS-stuff until the next event.
1789         */
1790        if (pps->ppstc != pps->capth->th_counter) {
1791                pps->ppstc = pps->capth->th_counter;
1792                *pcount = pps->capcount;
1793                pps->ppscount[2] = pps->capcount;
1794                return;
1795        }
1796
1797        /* Convert the count to a timespec. */
1798        tcount = pps->capcount - pps->capth->th_offset_count;
1799        tcount &= pps->capth->th_counter->tc_counter_mask;
1800        bt = pps->capth->th_offset;
1801        bintime_addx(&bt, pps->capth->th_scale * tcount);
1802        bintime_add(&bt, &boottimebin);
1803        bintime2timespec(&bt, &ts);
1804
1805        /* If the timecounter was wound up underneath us, bail out. */
1806        if (pps->capgen != pps->capth->th_generation)
1807                return;
1808
1809        *pcount = pps->capcount;
1810        (*pseq)++;
1811        *tsp = ts;
1812
1813        if (foff) {
1814                timespecadd(tsp, osp);
1815                if (tsp->tv_nsec < 0) {
1816                        tsp->tv_nsec += 1000000000;
1817                        tsp->tv_sec -= 1;
1818                }
1819        }
1820
1821#ifdef FFCLOCK
1822        *ffcount = pps->capffth->tick_ffcount + tcount;
1823        bt = pps->capffth->tick_time;
1824        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1825        bintime_add(&bt, &pps->capffth->tick_time);
1826        bintime2timespec(&bt, &ts);
1827        (*pseq_ffc)++;
1828        *tsp_ffc = ts;
1829#endif
1830
1831#ifdef PPS_SYNC
1832        if (fhard) {
1833                uint64_t scale;
1834
1835                /*
1836                 * Feed the NTP PLL/FLL.
1837                 * The FLL wants to know how many (hardware) nanoseconds
1838                 * elapsed since the previous event.
1839                 */
1840                tcount = pps->capcount - pps->ppscount[2];
1841                pps->ppscount[2] = pps->capcount;
1842                tcount &= pps->capth->th_counter->tc_counter_mask;
1843                scale = (uint64_t)1 << 63;
1844                scale /= pps->capth->th_counter->tc_frequency;
1845                scale *= 2;
1846                bt.sec = 0;
1847                bt.frac = 0;
1848                bintime_addx(&bt, scale * tcount);
1849                bintime2timespec(&bt, &ts);
1850                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1851        }
1852#endif
1853
1854        /* Wakeup anyone sleeping in pps_fetch().  */
1855        wakeup(pps);
1856}
1857#else /* __rtems__ */
1858/* FIXME: https://devel.rtems.org/ticket/2349 */
1859#endif /* __rtems__ */
1860
1861/*
1862 * Timecounters need to be updated every so often to prevent the hardware
1863 * counter from overflowing.  Updating also recalculates the cached values
1864 * used by the get*() family of functions, so their precision depends on
1865 * the update frequency.
1866 */
1867
1868#ifndef __rtems__
1869static int tc_tick;
1870SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1871    "Approximate number of hardclock ticks in a millisecond");
1872#endif /* __rtems__ */
1873
1874#ifndef __rtems__
1875void
1876tc_ticktock(int cnt)
1877{
1878        static int count;
1879
1880        count += cnt;
1881        if (count < tc_tick)
1882                return;
1883        count = 0;
1884#else /* __rtems__ */
1885void
1886_Timecounter_Tick(void)
1887{
1888#endif /* __rtems__ */
1889        tc_windup();
1890#ifdef __rtems__
1891        _Watchdog_Tick();
1892#endif /* __rtems__ */
1893}
1894#ifdef __rtems__
1895void
1896_Timecounter_Tick_simple(uint32_t delta, uint32_t offset)
1897{
1898        struct bintime bt;
1899        struct timehands *th;
1900        uint32_t ogen;
1901        ISR_lock_Context lock_context;
1902
1903        _ISR_lock_ISR_disable_and_acquire(&_Timecounter_Lock, &lock_context);
1904
1905        th = timehands;
1906        ogen = th->th_generation;
1907        th->th_offset_count = offset;
1908        bintime_addx(&th->th_offset, th->th_scale * delta);
1909
1910        bt = th->th_offset;
1911        bintime_add(&bt, &boottimebin);
1912
1913        /* Update the UTC timestamps used by the get*() functions. */
1914        /* XXX shouldn't do this here.  Should force non-`get' versions. */
1915        bintime2timeval(&bt, &th->th_microtime);
1916        bintime2timespec(&bt, &th->th_nanotime);
1917
1918        /*
1919         * Now that the struct timehands is again consistent, set the new
1920         * generation number, making sure to not make it zero.
1921         */
1922        if (++ogen == 0)
1923                ogen = 1;
1924        th->th_generation = ogen;
1925
1926        /* Go live with the new struct timehands. */
1927        time_second = th->th_microtime.tv_sec;
1928        time_uptime = th->th_offset.sec;
1929
1930        _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, &lock_context);
1931
1932        _Watchdog_Tick();
1933}
1934#endif /* __rtems__ */
1935
1936#ifndef __rtems__
1937static void __inline
1938tc_adjprecision(void)
1939{
1940        int t;
1941
1942        if (tc_timepercentage > 0) {
1943                t = (99 + tc_timepercentage) / tc_timepercentage;
1944                tc_precexp = fls(t + (t >> 1)) - 1;
1945                FREQ2BT(hz / tc_tick, &bt_timethreshold);
1946                FREQ2BT(hz, &bt_tickthreshold);
1947                bintime_shift(&bt_timethreshold, tc_precexp);
1948                bintime_shift(&bt_tickthreshold, tc_precexp);
1949        } else {
1950                tc_precexp = 31;
1951                bt_timethreshold.sec = INT_MAX;
1952                bt_timethreshold.frac = ~(uint64_t)0;
1953                bt_tickthreshold = bt_timethreshold;
1954        }
1955        sbt_timethreshold = bttosbt(bt_timethreshold);
1956        sbt_tickthreshold = bttosbt(bt_tickthreshold);
1957}
1958#endif /* __rtems__ */
1959
1960#ifndef __rtems__
1961static int
1962sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
1963{
1964        int error, val;
1965
1966        val = tc_timepercentage;
1967        error = sysctl_handle_int(oidp, &val, 0, req);
1968        if (error != 0 || req->newptr == NULL)
1969                return (error);
1970        tc_timepercentage = val;
1971        if (cold)
1972                goto done;
1973        tc_adjprecision();
1974done:
1975        return (0);
1976}
1977#endif /* __rtems__ */
1978
1979#ifndef __rtems__
1980static void
1981inittimecounter(void *dummy)
1982#else /* __rtems__ */
1983void
1984_Timecounter_Initialize(void)
1985#endif /* __rtems__ */
1986{
1987#ifndef __rtems__
1988        u_int p;
1989        int tick_rate;
1990
1991        /*
1992         * Set the initial timeout to
1993         * max(1, <approx. number of hardclock ticks in a millisecond>).
1994         * People should probably not use the sysctl to set the timeout
1995         * to smaller than its inital value, since that value is the
1996         * smallest reasonable one.  If they want better timestamps they
1997         * should use the non-"get"* functions.
1998         */
1999        if (hz > 1000)
2000                tc_tick = (hz + 500) / 1000;
2001        else
2002                tc_tick = 1;
2003        tc_adjprecision();
2004        FREQ2BT(hz, &tick_bt);
2005        tick_sbt = bttosbt(tick_bt);
2006        tick_rate = hz / tc_tick;
2007        FREQ2BT(tick_rate, &tc_tick_bt);
2008        tc_tick_sbt = bttosbt(tc_tick_bt);
2009        p = (tc_tick * 1000000) / hz;
2010        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2011#endif /* __rtems__ */
2012
2013#ifdef FFCLOCK
2014        ffclock_init();
2015#endif
2016        /* warm up new timecounter (again) and get rolling. */
2017        (void)timecounter->tc_get_timecount(timecounter);
2018        (void)timecounter->tc_get_timecount(timecounter);
2019        tc_windup();
2020}
2021
2022#ifndef __rtems__
2023SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2024#endif /* __rtems__ */
2025
2026#ifndef __rtems__
2027/* Cpu tick handling -------------------------------------------------*/
2028
2029static int cpu_tick_variable;
2030static uint64_t cpu_tick_frequency;
2031
2032static uint64_t
2033tc_cpu_ticks(void)
2034{
2035        static uint64_t base;
2036        static unsigned last;
2037        unsigned u;
2038        struct timecounter *tc;
2039
2040        tc = timehands->th_counter;
2041        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2042        if (u < last)
2043                base += (uint64_t)tc->tc_counter_mask + 1;
2044        last = u;
2045        return (u + base);
2046}
2047
2048void
2049cpu_tick_calibration(void)
2050{
2051        static time_t last_calib;
2052
2053        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2054                cpu_tick_calibrate(0);
2055                last_calib = time_uptime;
2056        }
2057}
2058
2059/*
2060 * This function gets called every 16 seconds on only one designated
2061 * CPU in the system from hardclock() via cpu_tick_calibration()().
2062 *
2063 * Whenever the real time clock is stepped we get called with reset=1
2064 * to make sure we handle suspend/resume and similar events correctly.
2065 */
2066
2067static void
2068cpu_tick_calibrate(int reset)
2069{
2070        static uint64_t c_last;
2071        uint64_t c_this, c_delta;
2072        static struct bintime  t_last;
2073        struct bintime t_this, t_delta;
2074        uint32_t divi;
2075
2076        if (reset) {
2077                /* The clock was stepped, abort & reset */
2078                t_last.sec = 0;
2079                return;
2080        }
2081
2082        /* we don't calibrate fixed rate cputicks */
2083        if (!cpu_tick_variable)
2084                return;
2085
2086        getbinuptime(&t_this);
2087        c_this = cpu_ticks();
2088        if (t_last.sec != 0) {
2089                c_delta = c_this - c_last;
2090                t_delta = t_this;
2091                bintime_sub(&t_delta, &t_last);
2092                /*
2093                 * Headroom:
2094                 *      2^(64-20) / 16[s] =
2095                 *      2^(44) / 16[s] =
2096                 *      17.592.186.044.416 / 16 =
2097                 *      1.099.511.627.776 [Hz]
2098                 */
2099                divi = t_delta.sec << 20;
2100                divi |= t_delta.frac >> (64 - 20);
2101                c_delta <<= 20;
2102                c_delta /= divi;
2103                if (c_delta > cpu_tick_frequency) {
2104                        if (0 && bootverbose)
2105                                printf("cpu_tick increased to %ju Hz\n",
2106                                    c_delta);
2107                        cpu_tick_frequency = c_delta;
2108                }
2109        }
2110        c_last = c_this;
2111        t_last = t_this;
2112}
2113
2114void
2115set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2116{
2117
2118        if (func == NULL) {
2119                cpu_ticks = tc_cpu_ticks;
2120        } else {
2121                cpu_tick_frequency = freq;
2122                cpu_tick_variable = var;
2123                cpu_ticks = func;
2124        }
2125}
2126
2127uint64_t
2128cpu_tickrate(void)
2129{
2130
2131        if (cpu_ticks == tc_cpu_ticks)
2132                return (tc_getfrequency());
2133        return (cpu_tick_frequency);
2134}
2135
2136/*
2137 * We need to be slightly careful converting cputicks to microseconds.
2138 * There is plenty of margin in 64 bits of microseconds (half a million
2139 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2140 * before divide conversion (to retain precision) we find that the
2141 * margin shrinks to 1.5 hours (one millionth of 146y).
2142 * With a three prong approach we never lose significant bits, no
2143 * matter what the cputick rate and length of timeinterval is.
2144 */
2145
2146uint64_t
2147cputick2usec(uint64_t tick)
2148{
2149
2150        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2151                return (tick / (cpu_tickrate() / 1000000LL));
2152        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2153                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2154        else
2155                return ((tick * 1000000LL) / cpu_tickrate());
2156}
2157
2158cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2159#endif /* __rtems__ */
2160
2161#ifndef __rtems__
2162static int vdso_th_enable = 1;
2163static int
2164sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2165{
2166        int old_vdso_th_enable, error;
2167
2168        old_vdso_th_enable = vdso_th_enable;
2169        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2170        if (error != 0)
2171                return (error);
2172        vdso_th_enable = old_vdso_th_enable;
2173        return (0);
2174}
2175SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2176    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2177    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2178
2179uint32_t
2180tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2181{
2182        struct timehands *th;
2183        uint32_t enabled;
2184
2185        th = timehands;
2186        vdso_th->th_algo = VDSO_TH_ALGO_1;
2187        vdso_th->th_scale = th->th_scale;
2188        vdso_th->th_offset_count = th->th_offset_count;
2189        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2190        vdso_th->th_offset = th->th_offset;
2191        vdso_th->th_boottime = boottimebin;
2192        enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2193        if (!vdso_th_enable)
2194                enabled = 0;
2195        return (enabled);
2196}
2197#endif /* __rtems__ */
2198
2199#ifdef COMPAT_FREEBSD32
2200uint32_t
2201tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2202{
2203        struct timehands *th;
2204        uint32_t enabled;
2205
2206        th = timehands;
2207        vdso_th32->th_algo = VDSO_TH_ALGO_1;
2208        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2209        vdso_th32->th_offset_count = th->th_offset_count;
2210        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2211        vdso_th32->th_offset.sec = th->th_offset.sec;
2212        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2213        vdso_th32->th_boottime.sec = boottimebin.sec;
2214        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2215        enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2216        if (!vdso_th_enable)
2217                enabled = 0;
2218        return (enabled);
2219}
2220#endif
Note: See TracBrowser for help on using the repository browser.