source: rtems/cpukit/score/src/kern_tc.c

Last change on this file was d5c386f, checked in by Sebastian Huber <sebastian.huber@…>, on 02/27/23 at 21:49:10

pps: Round to closest integer in pps_event()

The comment above bintime2timespec() says:

When converting between timestamps on parallel timescales of differing
resolutions it is historical and scientific practice to round down.

However, the delta_nsec value is a time difference and not a timestamp. Also
the rounding errors accumulate in the frequency accumulator, see hardpps().
So, rounding to the closest integer is probably slightly better.

Reviewed by: imp
Pull Request: https://github.com/freebsd/freebsd-src/pull/604

  • Property mode set to 100644
File size: 68.8 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreTimecounter
5 *
6 * @brief This source file contains the definition of
7 *  ::_Timecounter, ::_Timecounter_Time_second, and ::_Timecounter_Time_uptime
8 *  and the implementation of _Timecounter_Set_NTP_update_second(),
9 *  _Timecounter_Binuptime(), _Timecounter_Nanouptime(),
10 *  _Timecounter_Microuptime(), _Timecounter_Bintime(),
11 *  _Timecounter_Nanotime(), _Timecounter_Microtime(),
12 *  _Timecounter_Getbinuptime(), _Timecounter_Getnanouptime(),
13 *  _Timecounter_Getmicrouptime(), _Timecounter_Getbintime(),
14 *  _Timecounter_Getnanotime(), _Timecounter_Getmicrotime(),
15 *  _Timecounter_Getboottime(), _Timecounter_Getboottimebin(), and
16 *  _Timecounter_Install().
17 */
18
19/*-
20 * SPDX-License-Identifier: Beerware
21 *
22 * ----------------------------------------------------------------------------
23 * "THE BEER-WARE LICENSE" (Revision 42):
24 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
25 * can do whatever you want with this stuff. If we meet some day, and you think
26 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
27 * ----------------------------------------------------------------------------
28 *
29 * Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
30 *
31 * Portions of this software were developed by Julien Ridoux at the University
32 * of Melbourne under sponsorship from the FreeBSD Foundation.
33 *
34 * Portions of this software were developed by Konstantin Belousov
35 * under sponsorship from the FreeBSD Foundation.
36 */
37
38#ifdef __rtems__
39#include <sys/lock.h>
40#define _KERNEL
41#define binuptime(_bt) _Timecounter_Binuptime(_bt)
42#define nanouptime(_tsp) _Timecounter_Nanouptime(_tsp)
43#define microuptime(_tvp) _Timecounter_Microuptime(_tvp)
44#define bintime(_bt) _Timecounter_Bintime(_bt)
45#define nanotime(_tsp) _Timecounter_Nanotime(_tsp)
46#define microtime(_tvp) _Timecounter_Microtime(_tvp)
47#define getbinuptime(_bt) _Timecounter_Getbinuptime(_bt)
48#define getnanouptime(_tsp) _Timecounter_Getnanouptime(_tsp)
49#define getmicrouptime(_tvp) _Timecounter_Getmicrouptime(_tvp)
50#define getbintime(_bt) _Timecounter_Getbintime(_bt)
51#define getnanotime(_tsp) _Timecounter_Getnanotime(_tsp)
52#define getmicrotime(_tvp) _Timecounter_Getmicrotime(_tvp)
53#define getboottime(_tvp) _Timecounter_Getboottime(_tvp)
54#define getboottimebin(_bt) _Timecounter_Getboottimebin(_bt)
55#define tc_init _Timecounter_Install
56#define timecounter _Timecounter
57#define time_second _Timecounter_Time_second
58#define time_uptime _Timecounter_Time_uptime
59
60#include <rtems/score/timecounterimpl.h>
61#include <rtems/score/assert.h>
62#include <rtems/score/atomic.h>
63#include <rtems/score/smp.h>
64#include <rtems/score/todimpl.h>
65#include <rtems/score/watchdogimpl.h>
66#include <rtems/rtems/clock.h>
67
68#define ENOIOCTL EINVAL
69#define KASSERT(exp, arg) _Assert(exp)
70#endif /* __rtems__ */
71#include <sys/cdefs.h>
72__FBSDID("$FreeBSD$");
73
74#include "opt_ntp.h"
75#include "opt_ffclock.h"
76
77#include <sys/param.h>
78#ifndef __rtems__
79#include <sys/kernel.h>
80#include <sys/limits.h>
81#include <sys/lock.h>
82#include <sys/mutex.h>
83#include <sys/proc.h>
84#include <sys/sbuf.h>
85#include <sys/sleepqueue.h>
86#include <sys/sysctl.h>
87#include <sys/syslog.h>
88#include <sys/systm.h>
89#endif /* __rtems__ */
90#include <sys/timeffc.h>
91#include <sys/timepps.h>
92#include <sys/timetc.h>
93#include <sys/timex.h>
94#ifndef __rtems__
95#include <sys/vdso.h>
96#endif /* __rtems__ */
97#ifdef __rtems__
98#include <errno.h>
99#include <limits.h>
100#include <string.h>
101#include <rtems.h>
102ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
103#define _Timecounter_Release(lock_context) \
104  _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
105#define hz rtems_clock_get_ticks_per_second()
106#define printf(...)
107#define log(...)
108
109static inline void
110atomic_thread_fence_acq(void)
111{
112
113        _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
114}
115
116static inline void
117atomic_thread_fence_rel(void)
118{
119
120        _Atomic_Fence(ATOMIC_ORDER_RELEASE);
121}
122
123static inline u_int
124atomic_load_int(Atomic_Uint *i)
125{
126
127        return (_Atomic_Load_uint(i, ATOMIC_ORDER_RELAXED));
128}
129
130static inline u_int
131atomic_load_acq_int(Atomic_Uint *i)
132{
133
134        return (_Atomic_Load_uint(i, ATOMIC_ORDER_ACQUIRE));
135}
136
137static inline void
138atomic_store_rel_int(Atomic_Uint *i, u_int val)
139{
140
141        _Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
142}
143
144static inline void *
145atomic_load_ptr(void *ptr)
146{
147
148        return ((void *)_Atomic_Load_uintptr(ptr, ATOMIC_ORDER_RELAXED));
149}
150
151static Timecounter_NTP_update_second _Timecounter_NTP_update_second_handler;
152
153void
154_Timecounter_Set_NTP_update_second(Timecounter_NTP_update_second handler)
155{
156
157        _Timecounter_NTP_update_second_handler = handler;
158}
159
160#define ntp_update_second(a, b) (*ntp_update_second_handler)(a, b)
161#endif /* __rtems__ */
162
163/*
164 * A large step happens on boot.  This constant detects such steps.
165 * It is relatively small so that ntp_update_second gets called enough
166 * in the typical 'missed a couple of seconds' case, but doesn't loop
167 * forever when the time step is large.
168 */
169#define LARGE_STEP      200
170
171/*
172 * Implement a dummy timecounter which we can use until we get a real one
173 * in the air.  This allows the console and other early stuff to use
174 * time services.
175 */
176
177static uint32_t
178dummy_get_timecount(struct timecounter *tc)
179{
180#ifndef __rtems__
181        static uint32_t now;
182
183        return (++now);
184#else /* __rtems__ */
185        return 0;
186#endif /* __rtems__ */
187}
188
189static struct timecounter dummy_timecounter = {
190#ifndef __rtems__
191        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
192#else /* __rtems__ */
193        dummy_get_timecount, ~(uint32_t)0, 1000000, "dummy", -1000000
194#endif /* __rtems__ */
195};
196
197struct timehands {
198        /* These fields must be initialized by the driver. */
199        struct timecounter      *th_counter;
200        int64_t                 th_adjustment;
201        uint64_t                th_scale;
202        uint32_t                th_large_delta;
203        uint32_t                th_offset_count;
204        struct bintime          th_offset;
205        struct bintime          th_bintime;
206        struct timeval          th_microtime;
207        struct timespec         th_nanotime;
208        struct bintime          th_boottime;
209        /* Fields not to be copied in tc_windup start with th_generation. */
210#ifndef __rtems__
211        u_int                   th_generation;
212#else /* __rtems__ */
213        Atomic_Uint             th_generation;
214#endif /* __rtems__ */
215        struct timehands        *th_next;
216};
217
218#ifndef __rtems__
219static struct timehands ths[16] = {
220    [0] =  {
221        .th_counter = &dummy_timecounter,
222        .th_scale = (uint64_t)-1 / 1000000,
223        .th_large_delta = 1000000,
224        .th_offset = { .sec = 1 },
225        .th_generation = 1,
226    },
227};
228
229static struct timehands *volatile timehands = &ths[0];
230struct timecounter *timecounter = &dummy_timecounter;
231static struct timecounter *timecounters = &dummy_timecounter;
232
233/* Mutex to protect the timecounter list. */
234static struct mtx tc_lock;
235
236int tc_min_ticktock_freq = 1;
237#else /* __rtems__ */
238/*
239 * In FreeBSD, the timehands count is a tuning option from two to 16.  The
240 * tuning option was added since it is inexpensive and some FreeBSD users asked
241 * for it to play around.  The default value is two.  One system which did not
242 * work with two timehands was a system with one processor and a specific PPS
243 * device.
244 *
245 * For RTEMS, in uniprocessor configurations, just use one timehand since the
246 * update is done with interrupt disabled.
247 *
248 * In SMP configurations, use a fixed set of two timehands until someone
249 * reports an issue.
250 */
251#if defined(RTEMS_SMP)
252static struct timehands th0;
253static struct timehands th1 = {
254        .th_next = &th0
255};
256#endif
257static struct timehands th0 = {
258        .th_counter = &dummy_timecounter,
259        .th_scale = (uint64_t)-1 / 1000000,
260        .th_offset = { .sec = 1 },
261        .th_large_delta = 1000000,
262        .th_generation = UINT_MAX,
263#ifdef __rtems__
264        .th_bintime = { .sec = TOD_SECONDS_1970_THROUGH_1988 },
265        .th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
266        .th_nanotime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
267        .th_boottime = { .sec = TOD_SECONDS_1970_THROUGH_1988 - 1 },
268#endif /* __rtems__ */
269#if defined(RTEMS_SMP)
270        .th_next = &th1
271#else
272        .th_next = &th0
273#endif
274};
275
276static struct timehands *volatile timehands = &th0;
277struct timecounter *timecounter = &dummy_timecounter;
278#endif /* __rtems__ */
279
280#ifndef __rtems__
281volatile time_t time_second = 1;
282volatile time_t time_uptime = 1;
283#else /* __rtems__ */
284volatile time_t time_second = TOD_SECONDS_1970_THROUGH_1988;
285volatile int32_t time_uptime = 1;
286#endif /* __rtems__ */
287
288#ifndef __rtems__
289/*
290 * The system time is always computed by summing the estimated boot time and the
291 * system uptime. The timehands track boot time, but it changes when the system
292 * time is set by the user, stepped by ntpd or adjusted when resuming. It
293 * is set to new_time - uptime.
294 */
295static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
296SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
297    CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
298    sysctl_kern_boottime, "S,timeval",
299    "Estimated system boottime");
300
301SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
302    "");
303static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc,
304    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
305    "");
306
307static int timestepwarnings;
308SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RWTUN,
309    &timestepwarnings, 0, "Log time steps");
310
311static int timehands_count = 2;
312SYSCTL_INT(_kern_timecounter, OID_AUTO, timehands_count,
313    CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
314    &timehands_count, 0, "Count of timehands in rotation");
315
316struct bintime bt_timethreshold;
317struct bintime bt_tickthreshold;
318sbintime_t sbt_timethreshold;
319sbintime_t sbt_tickthreshold;
320struct bintime tc_tick_bt;
321sbintime_t tc_tick_sbt;
322int tc_precexp;
323int tc_timepercentage = TC_DEFAULTPERC;
324static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
325SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
326    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
327    sysctl_kern_timecounter_adjprecision, "I",
328    "Allowed time interval deviation in percents");
329
330volatile int rtc_generation = 1;
331
332static int tc_chosen;   /* Non-zero if a specific tc was chosen via sysctl. */
333static char tc_from_tunable[16];
334#endif /* __rtems__ */
335
336static void tc_windup(struct bintime *new_boottimebin);
337#ifndef __rtems__
338static void cpu_tick_calibrate(int);
339#else /* __rtems__ */
340static void _Timecounter_Windup(struct bintime *new_boottimebin,
341    ISR_lock_Context *lock_context);
342#endif /* __rtems__ */
343
344void dtrace_getnanotime(struct timespec *tsp);
345void dtrace_getnanouptime(struct timespec *tsp);
346
347#ifndef __rtems__
348static int
349sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
350{
351        struct timeval boottime;
352
353        getboottime(&boottime);
354
355/* i386 is the only arch which uses a 32bits time_t */
356#ifdef __amd64__
357#ifdef SCTL_MASK32
358        int tv[2];
359
360        if (req->flags & SCTL_MASK32) {
361                tv[0] = boottime.tv_sec;
362                tv[1] = boottime.tv_usec;
363                return (SYSCTL_OUT(req, tv, sizeof(tv)));
364        }
365#endif
366#endif
367        return (SYSCTL_OUT(req, &boottime, sizeof(boottime)));
368}
369
370static int
371sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
372{
373        uint32_t ncount;
374        struct timecounter *tc = arg1;
375
376        ncount = tc->tc_get_timecount(tc);
377        return (sysctl_handle_int(oidp, &ncount, 0, req));
378}
379
380static int
381sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
382{
383        uint64_t freq;
384        struct timecounter *tc = arg1;
385
386        freq = tc->tc_frequency;
387        return (sysctl_handle_64(oidp, &freq, 0, req));
388}
389#endif /* __rtems__ */
390
391/*
392 * Return the difference between the timehands' counter value now and what
393 * was when we copied it to the timehands' offset_count.
394 */
395static __inline uint32_t
396tc_delta(struct timehands *th)
397{
398        struct timecounter *tc;
399
400        tc = th->th_counter;
401        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
402            tc->tc_counter_mask);
403}
404
405static __inline void
406bintime_add_tc_delta(struct bintime *bt, uint64_t scale,
407    uint64_t large_delta, uint64_t delta)
408{
409        uint64_t x;
410
411        if (__predict_false(delta >= large_delta)) {
412                /* Avoid overflow for scale * delta. */
413                x = (scale >> 32) * delta;
414                bt->sec += x >> 32;
415                bintime_addx(bt, x << 32);
416                bintime_addx(bt, (scale & 0xffffffff) * delta);
417        } else {
418                bintime_addx(bt, scale * delta);
419        }
420}
421
422/*
423 * Functions for reading the time.  We have to loop until we are sure that
424 * the timehands that we operated on was not updated under our feet.  See
425 * the comment in <sys/time.h> for a description of these 12 functions.
426 */
427
428static __inline void
429bintime_off(struct bintime *bt, u_int off)
430{
431        struct timehands *th;
432        struct bintime *btp;
433        uint64_t scale;
434#ifndef __rtems__
435        u_int delta, gen, large_delta;
436#else /* __rtems__ */
437        uint32_t delta, large_delta;
438        u_int gen;
439#endif /* __rtems__ */
440
441        do {
442                th = timehands;
443                gen = atomic_load_acq_int(&th->th_generation);
444                btp = (struct bintime *)((vm_offset_t)th + off);
445                *bt = *btp;
446                scale = th->th_scale;
447                delta = tc_delta(th);
448                large_delta = th->th_large_delta;
449                atomic_thread_fence_acq();
450#if defined(RTEMS_SMP)
451        } while (gen == 0 || gen != th->th_generation);
452#else
453        } while (gen != th->th_generation);
454#endif
455
456        bintime_add_tc_delta(bt, scale, large_delta, delta);
457}
458#define GETTHBINTIME(dst, member)                                       \
459do {                                                                    \
460        _Static_assert(_Generic(((struct timehands *)NULL)->member,     \
461            struct bintime: 1, default: 0) == 1,                        \
462            "struct timehands member is not of struct bintime type");   \
463        bintime_off(dst, __offsetof(struct timehands, member));         \
464} while (0)
465
466static __inline void
467getthmember(void *out, size_t out_size, u_int off)
468{
469        struct timehands *th;
470        u_int gen;
471
472        do {
473                th = timehands;
474                gen = atomic_load_acq_int(&th->th_generation);
475                memcpy(out, (char *)th + off, out_size);
476                atomic_thread_fence_acq();
477#if defined(RTEMS_SMP)
478        } while (gen == 0 || gen != th->th_generation);
479#else
480        } while (gen != th->th_generation);
481#endif
482}
483#define GETTHMEMBER(dst, member)                                        \
484do {                                                                    \
485        _Static_assert(_Generic(*dst,                                   \
486            __typeof(((struct timehands *)NULL)->member): 1,            \
487            default: 0) == 1,                                           \
488            "*dst and struct timehands member have different types");   \
489        getthmember(dst, sizeof(*dst), __offsetof(struct timehands,     \
490            member));                                                   \
491} while (0)
492
493#ifdef FFCLOCK
494void
495fbclock_binuptime(struct bintime *bt)
496{
497
498        GETTHBINTIME(bt, th_offset);
499}
500
501void
502fbclock_nanouptime(struct timespec *tsp)
503{
504        struct bintime bt;
505
506        fbclock_binuptime(&bt);
507        bintime2timespec(&bt, tsp);
508}
509
510void
511fbclock_microuptime(struct timeval *tvp)
512{
513        struct bintime bt;
514
515        fbclock_binuptime(&bt);
516        bintime2timeval(&bt, tvp);
517}
518
519void
520fbclock_bintime(struct bintime *bt)
521{
522
523        GETTHBINTIME(bt, th_bintime);
524}
525
526void
527fbclock_nanotime(struct timespec *tsp)
528{
529        struct bintime bt;
530
531        fbclock_bintime(&bt);
532        bintime2timespec(&bt, tsp);
533}
534
535void
536fbclock_microtime(struct timeval *tvp)
537{
538        struct bintime bt;
539
540        fbclock_bintime(&bt);
541        bintime2timeval(&bt, tvp);
542}
543
544void
545fbclock_getbinuptime(struct bintime *bt)
546{
547
548        GETTHMEMBER(bt, th_offset);
549}
550
551void
552fbclock_getnanouptime(struct timespec *tsp)
553{
554        struct bintime bt;
555
556        GETTHMEMBER(&bt, th_offset);
557        bintime2timespec(&bt, tsp);
558}
559
560void
561fbclock_getmicrouptime(struct timeval *tvp)
562{
563        struct bintime bt;
564
565        GETTHMEMBER(&bt, th_offset);
566        bintime2timeval(&bt, tvp);
567}
568
569void
570fbclock_getbintime(struct bintime *bt)
571{
572
573        GETTHMEMBER(bt, th_bintime);
574}
575
576void
577fbclock_getnanotime(struct timespec *tsp)
578{
579
580        GETTHMEMBER(tsp, th_nanotime);
581}
582
583void
584fbclock_getmicrotime(struct timeval *tvp)
585{
586
587        GETTHMEMBER(tvp, th_microtime);
588}
589#else /* !FFCLOCK */
590
591void
592binuptime(struct bintime *bt)
593{
594
595        GETTHBINTIME(bt, th_offset);
596}
597#ifdef __rtems__
598sbintime_t
599_Timecounter_Sbinuptime(void)
600{
601        struct timehands *th;
602        sbintime_t sbt;
603        uint64_t scale;
604        uint32_t delta;
605        uint32_t large_delta;
606        u_int gen;
607
608        do {
609                th = timehands;
610                gen = atomic_load_acq_int(&th->th_generation);
611                sbt = bttosbt(th->th_offset);
612                scale = th->th_scale;
613                delta = tc_delta(th);
614                large_delta = th->th_large_delta;
615                atomic_thread_fence_acq();
616#if defined(RTEMS_SMP)
617        } while (gen == 0 || gen != th->th_generation);
618#else
619        } while (gen != th->th_generation);
620#endif
621
622        if (__predict_false(delta >= large_delta)) {
623                /* Avoid overflow for scale * delta. */
624                sbt += (scale >> 32) * delta;
625                sbt += ((scale & 0xffffffff) * delta) >> 32;
626        } else {
627                sbt += (scale * delta) >> 32;
628        }
629
630        return (sbt);
631}
632#endif /* __rtems__ */
633
634void
635nanouptime(struct timespec *tsp)
636{
637        struct bintime bt;
638
639        binuptime(&bt);
640        bintime2timespec(&bt, tsp);
641}
642
643void
644microuptime(struct timeval *tvp)
645{
646        struct bintime bt;
647
648        binuptime(&bt);
649        bintime2timeval(&bt, tvp);
650}
651
652void
653bintime(struct bintime *bt)
654{
655
656        GETTHBINTIME(bt, th_bintime);
657}
658
659void
660nanotime(struct timespec *tsp)
661{
662        struct bintime bt;
663
664        bintime(&bt);
665        bintime2timespec(&bt, tsp);
666}
667
668void
669microtime(struct timeval *tvp)
670{
671        struct bintime bt;
672
673        bintime(&bt);
674        bintime2timeval(&bt, tvp);
675}
676
677void
678getbinuptime(struct bintime *bt)
679{
680
681        GETTHMEMBER(bt, th_offset);
682}
683
684void
685getnanouptime(struct timespec *tsp)
686{
687        struct bintime bt;
688
689        GETTHMEMBER(&bt, th_offset);
690        bintime2timespec(&bt, tsp);
691}
692
693void
694getmicrouptime(struct timeval *tvp)
695{
696        struct bintime bt;
697
698        GETTHMEMBER(&bt, th_offset);
699        bintime2timeval(&bt, tvp);
700}
701
702void
703getbintime(struct bintime *bt)
704{
705
706        GETTHMEMBER(bt, th_bintime);
707}
708
709void
710getnanotime(struct timespec *tsp)
711{
712
713        GETTHMEMBER(tsp, th_nanotime);
714}
715
716void
717getmicrotime(struct timeval *tvp)
718{
719
720        GETTHMEMBER(tvp, th_microtime);
721}
722#endif /* FFCLOCK */
723
724#ifdef __rtems__
725void
726rtems_clock_get_boot_time(struct timespec *boottime)
727{
728        struct bintime boottimebin;
729
730        getboottimebin(&boottimebin);
731        bintime2timespec(&boottimebin, boottime);
732}
733#endif /* __rtems__ */
734void
735getboottime(struct timeval *boottime)
736{
737        struct bintime boottimebin;
738
739        getboottimebin(&boottimebin);
740        bintime2timeval(&boottimebin, boottime);
741}
742
743void
744getboottimebin(struct bintime *boottimebin)
745{
746
747        GETTHMEMBER(boottimebin, th_boottime);
748}
749
750#ifdef FFCLOCK
751/*
752 * Support for feed-forward synchronization algorithms. This is heavily inspired
753 * by the timehands mechanism but kept independent from it. *_windup() functions
754 * have some connection to avoid accessing the timecounter hardware more than
755 * necessary.
756 */
757
758/* Feed-forward clock estimates kept updated by the synchronization daemon. */
759struct ffclock_estimate ffclock_estimate;
760struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
761uint32_t ffclock_status;                /* Feed-forward clock status. */
762int8_t ffclock_updated;                 /* New estimates are available. */
763struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
764
765struct fftimehands {
766        struct ffclock_estimate cest;
767        struct bintime          tick_time;
768        struct bintime          tick_time_lerp;
769        ffcounter               tick_ffcount;
770        uint64_t                period_lerp;
771        volatile uint8_t        gen;
772        struct fftimehands      *next;
773};
774
775#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
776
777static struct fftimehands ffth[10];
778static struct fftimehands *volatile fftimehands = ffth;
779
780static void
781ffclock_init(void)
782{
783        struct fftimehands *cur;
784        struct fftimehands *last;
785
786        memset(ffth, 0, sizeof(ffth));
787
788        last = ffth + NUM_ELEMENTS(ffth) - 1;
789        for (cur = ffth; cur < last; cur++)
790                cur->next = cur + 1;
791        last->next = ffth;
792
793        ffclock_updated = 0;
794        ffclock_status = FFCLOCK_STA_UNSYNC;
795        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
796}
797
798/*
799 * Reset the feed-forward clock estimates. Called from inittodr() to get things
800 * kick started and uses the timecounter nominal frequency as a first period
801 * estimate. Note: this function may be called several time just after boot.
802 * Note: this is the only function that sets the value of boot time for the
803 * monotonic (i.e. uptime) version of the feed-forward clock.
804 */
805void
806ffclock_reset_clock(struct timespec *ts)
807{
808        struct timecounter *tc;
809        struct ffclock_estimate cest;
810
811        tc = timehands->th_counter;
812        memset(&cest, 0, sizeof(struct ffclock_estimate));
813
814        timespec2bintime(ts, &ffclock_boottime);
815        timespec2bintime(ts, &(cest.update_time));
816        ffclock_read_counter(&cest.update_ffcount);
817        cest.leapsec_next = 0;
818        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
819        cest.errb_abs = 0;
820        cest.errb_rate = 0;
821        cest.status = FFCLOCK_STA_UNSYNC;
822        cest.leapsec_total = 0;
823        cest.leapsec = 0;
824
825        mtx_lock(&ffclock_mtx);
826        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
827        ffclock_updated = INT8_MAX;
828        mtx_unlock(&ffclock_mtx);
829
830        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
831            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
832            (unsigned long)ts->tv_nsec);
833}
834
835/*
836 * Sub-routine to convert a time interval measured in RAW counter units to time
837 * in seconds stored in bintime format.
838 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
839 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
840 * extra cycles.
841 */
842static void
843ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
844{
845        struct bintime bt2;
846        ffcounter delta, delta_max;
847
848        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
849        bintime_clear(bt);
850        do {
851                if (ffdelta > delta_max)
852                        delta = delta_max;
853                else
854                        delta = ffdelta;
855                bt2.sec = 0;
856                bt2.frac = period;
857                bintime_mul(&bt2, (unsigned int)delta);
858                bintime_add(bt, &bt2);
859                ffdelta -= delta;
860        } while (ffdelta > 0);
861}
862
863/*
864 * Update the fftimehands.
865 * Push the tick ffcount and time(s) forward based on current clock estimate.
866 * The conversion from ffcounter to bintime relies on the difference clock
867 * principle, whose accuracy relies on computing small time intervals. If a new
868 * clock estimate has been passed by the synchronisation daemon, make it
869 * current, and compute the linear interpolation for monotonic time if needed.
870 */
871static void
872ffclock_windup(unsigned int delta)
873{
874        struct ffclock_estimate *cest;
875        struct fftimehands *ffth;
876        struct bintime bt, gap_lerp;
877        ffcounter ffdelta;
878        uint64_t frac;
879        unsigned int polling;
880        uint8_t forward_jump, ogen;
881
882        /*
883         * Pick the next timehand, copy current ffclock estimates and move tick
884         * times and counter forward.
885         */
886        forward_jump = 0;
887        ffth = fftimehands->next;
888        ogen = ffth->gen;
889        ffth->gen = 0;
890        cest = &ffth->cest;
891        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
892        ffdelta = (ffcounter)delta;
893        ffth->period_lerp = fftimehands->period_lerp;
894
895        ffth->tick_time = fftimehands->tick_time;
896        ffclock_convert_delta(ffdelta, cest->period, &bt);
897        bintime_add(&ffth->tick_time, &bt);
898
899        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
900        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
901        bintime_add(&ffth->tick_time_lerp, &bt);
902
903        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
904
905        /*
906         * Assess the status of the clock, if the last update is too old, it is
907         * likely the synchronisation daemon is dead and the clock is free
908         * running.
909         */
910        if (ffclock_updated == 0) {
911                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
912                ffclock_convert_delta(ffdelta, cest->period, &bt);
913                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
914                        ffclock_status |= FFCLOCK_STA_UNSYNC;
915        }
916
917        /*
918         * If available, grab updated clock estimates and make them current.
919         * Recompute time at this tick using the updated estimates. The clock
920         * estimates passed the feed-forward synchronisation daemon may result
921         * in time conversion that is not monotonically increasing (just after
922         * the update). time_lerp is a particular linear interpolation over the
923         * synchronisation algo polling period that ensures monotonicity for the
924         * clock ids requesting it.
925         */
926        if (ffclock_updated > 0) {
927                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
928                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
929                ffth->tick_time = cest->update_time;
930                ffclock_convert_delta(ffdelta, cest->period, &bt);
931                bintime_add(&ffth->tick_time, &bt);
932
933                /* ffclock_reset sets ffclock_updated to INT8_MAX */
934                if (ffclock_updated == INT8_MAX)
935                        ffth->tick_time_lerp = ffth->tick_time;
936
937                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
938                        forward_jump = 1;
939                else
940                        forward_jump = 0;
941
942                bintime_clear(&gap_lerp);
943                if (forward_jump) {
944                        gap_lerp = ffth->tick_time;
945                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
946                } else {
947                        gap_lerp = ffth->tick_time_lerp;
948                        bintime_sub(&gap_lerp, &ffth->tick_time);
949                }
950
951                /*
952                 * The reset from the RTC clock may be far from accurate, and
953                 * reducing the gap between real time and interpolated time
954                 * could take a very long time if the interpolated clock insists
955                 * on strict monotonicity. The clock is reset under very strict
956                 * conditions (kernel time is known to be wrong and
957                 * synchronization daemon has been restarted recently.
958                 * ffclock_boottime absorbs the jump to ensure boot time is
959                 * correct and uptime functions stay consistent.
960                 */
961                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
962                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
963                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
964                        if (forward_jump)
965                                bintime_add(&ffclock_boottime, &gap_lerp);
966                        else
967                                bintime_sub(&ffclock_boottime, &gap_lerp);
968                        ffth->tick_time_lerp = ffth->tick_time;
969                        bintime_clear(&gap_lerp);
970                }
971
972                ffclock_status = cest->status;
973                ffth->period_lerp = cest->period;
974
975                /*
976                 * Compute corrected period used for the linear interpolation of
977                 * time. The rate of linear interpolation is capped to 5000PPM
978                 * (5ms/s).
979                 */
980                if (bintime_isset(&gap_lerp)) {
981                        ffdelta = cest->update_ffcount;
982                        ffdelta -= fftimehands->cest.update_ffcount;
983                        ffclock_convert_delta(ffdelta, cest->period, &bt);
984                        polling = bt.sec;
985                        bt.sec = 0;
986                        bt.frac = 5000000 * (uint64_t)18446744073LL;
987                        bintime_mul(&bt, polling);
988                        if (bintime_cmp(&gap_lerp, &bt, >))
989                                gap_lerp = bt;
990
991                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
992                        frac = 0;
993                        if (gap_lerp.sec > 0) {
994                                frac -= 1;
995                                frac /= ffdelta / gap_lerp.sec;
996                        }
997                        frac += gap_lerp.frac / ffdelta;
998
999                        if (forward_jump)
1000                                ffth->period_lerp += frac;
1001                        else
1002                                ffth->period_lerp -= frac;
1003                }
1004
1005                ffclock_updated = 0;
1006        }
1007        if (++ogen == 0)
1008                ogen = 1;
1009        ffth->gen = ogen;
1010        fftimehands = ffth;
1011}
1012
1013/*
1014 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
1015 * the old and new hardware counter cannot be read simultaneously. tc_windup()
1016 * does read the two counters 'back to back', but a few cycles are effectively
1017 * lost, and not accumulated in tick_ffcount. This is a fairly radical
1018 * operation for a feed-forward synchronization daemon, and it is its job to not
1019 * pushing irrelevant data to the kernel. Because there is no locking here,
1020 * simply force to ignore pending or next update to give daemon a chance to
1021 * realize the counter has changed.
1022 */
1023static void
1024ffclock_change_tc(struct timehands *th)
1025{
1026        struct fftimehands *ffth;
1027        struct ffclock_estimate *cest;
1028        struct timecounter *tc;
1029        uint8_t ogen;
1030
1031        tc = th->th_counter;
1032        ffth = fftimehands->next;
1033        ogen = ffth->gen;
1034        ffth->gen = 0;
1035
1036        cest = &ffth->cest;
1037        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
1038        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
1039        cest->errb_abs = 0;
1040        cest->errb_rate = 0;
1041        cest->status |= FFCLOCK_STA_UNSYNC;
1042
1043        ffth->tick_ffcount = fftimehands->tick_ffcount;
1044        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
1045        ffth->tick_time = fftimehands->tick_time;
1046        ffth->period_lerp = cest->period;
1047
1048        /* Do not lock but ignore next update from synchronization daemon. */
1049        ffclock_updated--;
1050
1051        if (++ogen == 0)
1052                ogen = 1;
1053        ffth->gen = ogen;
1054        fftimehands = ffth;
1055}
1056
1057/*
1058 * Retrieve feed-forward counter and time of last kernel tick.
1059 */
1060void
1061ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
1062{
1063        struct fftimehands *ffth;
1064        uint8_t gen;
1065
1066        /*
1067         * No locking but check generation has not changed. Also need to make
1068         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
1069         */
1070        do {
1071                ffth = fftimehands;
1072                gen = ffth->gen;
1073                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
1074                        *bt = ffth->tick_time_lerp;
1075                else
1076                        *bt = ffth->tick_time;
1077                *ffcount = ffth->tick_ffcount;
1078        } while (gen == 0 || gen != ffth->gen);
1079}
1080
1081/*
1082 * Absolute clock conversion. Low level function to convert ffcounter to
1083 * bintime. The ffcounter is converted using the current ffclock period estimate
1084 * or the "interpolated period" to ensure monotonicity.
1085 * NOTE: this conversion may have been deferred, and the clock updated since the
1086 * hardware counter has been read.
1087 */
1088void
1089ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
1090{
1091        struct fftimehands *ffth;
1092        struct bintime bt2;
1093        ffcounter ffdelta;
1094        uint8_t gen;
1095
1096        /*
1097         * No locking but check generation has not changed. Also need to make
1098         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
1099         */
1100        do {
1101                ffth = fftimehands;
1102                gen = ffth->gen;
1103                if (ffcount > ffth->tick_ffcount)
1104                        ffdelta = ffcount - ffth->tick_ffcount;
1105                else
1106                        ffdelta = ffth->tick_ffcount - ffcount;
1107
1108                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
1109                        *bt = ffth->tick_time_lerp;
1110                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
1111                } else {
1112                        *bt = ffth->tick_time;
1113                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
1114                }
1115
1116                if (ffcount > ffth->tick_ffcount)
1117                        bintime_add(bt, &bt2);
1118                else
1119                        bintime_sub(bt, &bt2);
1120        } while (gen == 0 || gen != ffth->gen);
1121}
1122
1123/*
1124 * Difference clock conversion.
1125 * Low level function to Convert a time interval measured in RAW counter units
1126 * into bintime. The difference clock allows measuring small intervals much more
1127 * reliably than the absolute clock.
1128 */
1129void
1130ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
1131{
1132        struct fftimehands *ffth;
1133        uint8_t gen;
1134
1135        /* No locking but check generation has not changed. */
1136        do {
1137                ffth = fftimehands;
1138                gen = ffth->gen;
1139                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
1140        } while (gen == 0 || gen != ffth->gen);
1141}
1142
1143/*
1144 * Access to current ffcounter value.
1145 */
1146void
1147ffclock_read_counter(ffcounter *ffcount)
1148{
1149        struct timehands *th;
1150        struct fftimehands *ffth;
1151        unsigned int gen, delta;
1152
1153        /*
1154         * ffclock_windup() called from tc_windup(), safe to rely on
1155         * th->th_generation only, for correct delta and ffcounter.
1156         */
1157        do {
1158                th = timehands;
1159                gen = atomic_load_acq_int(&th->th_generation);
1160                ffth = fftimehands;
1161                delta = tc_delta(th);
1162                *ffcount = ffth->tick_ffcount;
1163                atomic_thread_fence_acq();
1164        } while (gen == 0 || gen != th->th_generation);
1165
1166        *ffcount += delta;
1167}
1168
1169void
1170binuptime(struct bintime *bt)
1171{
1172
1173        binuptime_fromclock(bt, sysclock_active);
1174}
1175
1176void
1177nanouptime(struct timespec *tsp)
1178{
1179
1180        nanouptime_fromclock(tsp, sysclock_active);
1181}
1182
1183void
1184microuptime(struct timeval *tvp)
1185{
1186
1187        microuptime_fromclock(tvp, sysclock_active);
1188}
1189
1190void
1191bintime(struct bintime *bt)
1192{
1193
1194        bintime_fromclock(bt, sysclock_active);
1195}
1196
1197void
1198nanotime(struct timespec *tsp)
1199{
1200
1201        nanotime_fromclock(tsp, sysclock_active);
1202}
1203
1204void
1205microtime(struct timeval *tvp)
1206{
1207
1208        microtime_fromclock(tvp, sysclock_active);
1209}
1210
1211void
1212getbinuptime(struct bintime *bt)
1213{
1214
1215        getbinuptime_fromclock(bt, sysclock_active);
1216}
1217
1218void
1219getnanouptime(struct timespec *tsp)
1220{
1221
1222        getnanouptime_fromclock(tsp, sysclock_active);
1223}
1224
1225void
1226getmicrouptime(struct timeval *tvp)
1227{
1228
1229        getmicrouptime_fromclock(tvp, sysclock_active);
1230}
1231
1232void
1233getbintime(struct bintime *bt)
1234{
1235
1236        getbintime_fromclock(bt, sysclock_active);
1237}
1238
1239void
1240getnanotime(struct timespec *tsp)
1241{
1242
1243        getnanotime_fromclock(tsp, sysclock_active);
1244}
1245
1246void
1247getmicrotime(struct timeval *tvp)
1248{
1249
1250        getmicrouptime_fromclock(tvp, sysclock_active);
1251}
1252
1253#endif /* FFCLOCK */
1254
1255#ifndef __rtems__
1256/*
1257 * This is a clone of getnanotime and used for walltimestamps.
1258 * The dtrace_ prefix prevents fbt from creating probes for
1259 * it so walltimestamp can be safely used in all fbt probes.
1260 */
1261void
1262dtrace_getnanotime(struct timespec *tsp)
1263{
1264
1265        GETTHMEMBER(tsp, th_nanotime);
1266}
1267
1268/*
1269 * This is a clone of getnanouptime used for time since boot.
1270 * The dtrace_ prefix prevents fbt from creating probes for
1271 * it so an uptime that can be safely used in all fbt probes.
1272 */
1273void
1274dtrace_getnanouptime(struct timespec *tsp)
1275{
1276        struct bintime bt;
1277
1278        GETTHMEMBER(&bt, th_offset);
1279        bintime2timespec(&bt, tsp);
1280}
1281#endif /* __rtems__ */
1282
1283#ifdef FFCLOCK
1284/*
1285 * System clock currently providing time to the system. Modifiable via sysctl
1286 * when the FFCLOCK option is defined.
1287 */
1288int sysclock_active = SYSCLOCK_FBCK;
1289#endif
1290
1291/* Internal NTP status and error estimates. */
1292extern int time_status;
1293extern long time_esterror;
1294
1295#ifndef __rtems__
1296/*
1297 * Take a snapshot of sysclock data which can be used to compare system clocks
1298 * and generate timestamps after the fact.
1299 */
1300void
1301sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1302{
1303        struct fbclock_info *fbi;
1304        struct timehands *th;
1305        struct bintime bt;
1306        unsigned int delta, gen;
1307#ifdef FFCLOCK
1308        ffcounter ffcount;
1309        struct fftimehands *ffth;
1310        struct ffclock_info *ffi;
1311        struct ffclock_estimate cest;
1312
1313        ffi = &clock_snap->ff_info;
1314#endif
1315
1316        fbi = &clock_snap->fb_info;
1317        delta = 0;
1318
1319        do {
1320                th = timehands;
1321                gen = atomic_load_acq_int(&th->th_generation);
1322                fbi->th_scale = th->th_scale;
1323                fbi->tick_time = th->th_offset;
1324#ifdef FFCLOCK
1325                ffth = fftimehands;
1326                ffi->tick_time = ffth->tick_time_lerp;
1327                ffi->tick_time_lerp = ffth->tick_time_lerp;
1328                ffi->period = ffth->cest.period;
1329                ffi->period_lerp = ffth->period_lerp;
1330                clock_snap->ffcount = ffth->tick_ffcount;
1331                cest = ffth->cest;
1332#endif
1333                if (!fast)
1334                        delta = tc_delta(th);
1335                atomic_thread_fence_acq();
1336        } while (gen == 0 || gen != th->th_generation);
1337
1338        clock_snap->delta = delta;
1339#ifdef FFCLOCK
1340        clock_snap->sysclock_active = sysclock_active;
1341#endif
1342
1343        /* Record feedback clock status and error. */
1344        clock_snap->fb_info.status = time_status;
1345        /* XXX: Very crude estimate of feedback clock error. */
1346        bt.sec = time_esterror / 1000000;
1347        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1348            (uint64_t)18446744073709ULL;
1349        clock_snap->fb_info.error = bt;
1350
1351#ifdef FFCLOCK
1352        if (!fast)
1353                clock_snap->ffcount += delta;
1354
1355        /* Record feed-forward clock leap second adjustment. */
1356        ffi->leapsec_adjustment = cest.leapsec_total;
1357        if (clock_snap->ffcount > cest.leapsec_next)
1358                ffi->leapsec_adjustment -= cest.leapsec;
1359
1360        /* Record feed-forward clock status and error. */
1361        clock_snap->ff_info.status = cest.status;
1362        ffcount = clock_snap->ffcount - cest.update_ffcount;
1363        ffclock_convert_delta(ffcount, cest.period, &bt);
1364        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1365        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1366        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1367        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1368        clock_snap->ff_info.error = bt;
1369#endif
1370}
1371
1372/*
1373 * Convert a sysclock snapshot into a struct bintime based on the specified
1374 * clock source and flags.
1375 */
1376int
1377sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1378    int whichclock, uint32_t flags)
1379{
1380        struct bintime boottimebin;
1381#ifdef FFCLOCK
1382        struct bintime bt2;
1383        uint64_t period;
1384#endif
1385
1386        switch (whichclock) {
1387        case SYSCLOCK_FBCK:
1388                *bt = cs->fb_info.tick_time;
1389
1390                /* If snapshot was created with !fast, delta will be >0. */
1391                if (cs->delta > 0)
1392                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1393
1394                if ((flags & FBCLOCK_UPTIME) == 0) {
1395                        getboottimebin(&boottimebin);
1396                        bintime_add(bt, &boottimebin);
1397                }
1398                break;
1399#ifdef FFCLOCK
1400        case SYSCLOCK_FFWD:
1401                if (flags & FFCLOCK_LERP) {
1402                        *bt = cs->ff_info.tick_time_lerp;
1403                        period = cs->ff_info.period_lerp;
1404                } else {
1405                        *bt = cs->ff_info.tick_time;
1406                        period = cs->ff_info.period;
1407                }
1408
1409                /* If snapshot was created with !fast, delta will be >0. */
1410                if (cs->delta > 0) {
1411                        ffclock_convert_delta(cs->delta, period, &bt2);
1412                        bintime_add(bt, &bt2);
1413                }
1414
1415                /* Leap second adjustment. */
1416                if (flags & FFCLOCK_LEAPSEC)
1417                        bt->sec -= cs->ff_info.leapsec_adjustment;
1418
1419                /* Boot time adjustment, for uptime/monotonic clocks. */
1420                if (flags & FFCLOCK_UPTIME)
1421                        bintime_sub(bt, &ffclock_boottime);
1422                break;
1423#endif
1424        default:
1425                return (EINVAL);
1426                break;
1427        }
1428
1429        return (0);
1430}
1431#endif /* __rtems__ */
1432
1433/*
1434 * Initialize a new timecounter and possibly use it.
1435 */
1436void
1437tc_init(struct timecounter *tc)
1438{
1439#ifndef __rtems__
1440        uint32_t u;
1441        struct sysctl_oid *tc_root;
1442
1443        u = tc->tc_frequency / tc->tc_counter_mask;
1444        /* XXX: We need some margin here, 10% is a guess */
1445        u *= 11;
1446        u /= 10;
1447        if (u > hz && tc->tc_quality >= 0) {
1448                tc->tc_quality = -2000;
1449                if (bootverbose) {
1450                        printf("Timecounter \"%s\" frequency %ju Hz",
1451                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1452                        printf(" -- Insufficient hz, needs at least %u\n", u);
1453                }
1454        } else if (tc->tc_quality >= 0 || bootverbose) {
1455                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1456                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1457                    tc->tc_quality);
1458        }
1459
1460        /*
1461         * Set up sysctl tree for this counter.
1462         */
1463        tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
1464            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1465            CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1466            "timecounter description", "timecounter");
1467        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1468            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1469            "mask for implemented bits");
1470        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1471            "counter", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
1472            sizeof(*tc), sysctl_kern_timecounter_get, "IU",
1473            "current timecounter value");
1474        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1475            "frequency", CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, tc,
1476            sizeof(*tc), sysctl_kern_timecounter_freq, "QU",
1477            "timecounter frequency");
1478        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1479            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1480            "goodness of time counter");
1481
1482        mtx_lock(&tc_lock);
1483        tc->tc_next = timecounters;
1484        timecounters = tc;
1485
1486        /*
1487         * Do not automatically switch if the current tc was specifically
1488         * chosen.  Never automatically use a timecounter with negative quality.
1489         * Even though we run on the dummy counter, switching here may be
1490         * worse since this timecounter may not be monotonic.
1491         */
1492        if (tc_chosen)
1493                goto unlock;
1494        if (tc->tc_quality < 0)
1495                goto unlock;
1496        if (tc_from_tunable[0] != '\0' &&
1497            strcmp(tc->tc_name, tc_from_tunable) == 0) {
1498                tc_chosen = 1;
1499                tc_from_tunable[0] = '\0';
1500        } else {
1501                if (tc->tc_quality < timecounter->tc_quality)
1502                        goto unlock;
1503                if (tc->tc_quality == timecounter->tc_quality &&
1504                    tc->tc_frequency < timecounter->tc_frequency)
1505                        goto unlock;
1506        }
1507        (void)tc->tc_get_timecount(tc);
1508        timecounter = tc;
1509unlock:
1510        mtx_unlock(&tc_lock);
1511#else /* __rtems__ */
1512        if (tc->tc_quality < timecounter->tc_quality)
1513                return;
1514        if (tc->tc_quality == timecounter->tc_quality &&
1515            tc->tc_frequency < timecounter->tc_frequency)
1516                return;
1517        timecounter = tc;
1518        tc_windup(NULL);
1519#endif /* __rtems__ */
1520}
1521
1522/* Report the frequency of the current timecounter. */
1523uint64_t
1524tc_getfrequency(void)
1525{
1526
1527        return (timehands->th_counter->tc_frequency);
1528}
1529
1530#ifndef __rtems__
1531static bool
1532sleeping_on_old_rtc(struct thread *td)
1533{
1534
1535        /*
1536         * td_rtcgen is modified by curthread when it is running,
1537         * and by other threads in this function.  By finding the thread
1538         * on a sleepqueue and holding the lock on the sleepqueue
1539         * chain, we guarantee that the thread is not running and that
1540         * modifying td_rtcgen is safe.  Setting td_rtcgen to zero informs
1541         * the thread that it was woken due to a real-time clock adjustment.
1542         * (The declaration of td_rtcgen refers to this comment.)
1543         */
1544        if (td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation) {
1545                td->td_rtcgen = 0;
1546                return (true);
1547        }
1548        return (false);
1549}
1550
1551static struct mtx tc_setclock_mtx;
1552MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
1553#endif /* __rtems__ */
1554
1555/*
1556 * Step our concept of UTC.  This is done by modifying our estimate of
1557 * when we booted.
1558 */
1559void
1560#ifndef __rtems__
1561tc_setclock(struct timespec *ts)
1562#else /* __rtems__ */
1563_Timecounter_Set_clock(const struct bintime *_bt,
1564    ISR_lock_Context *lock_context)
1565#endif /* __rtems__ */
1566{
1567#ifndef __rtems__
1568        struct timespec tbef, taft;
1569#endif /* __rtems__ */
1570        struct bintime bt, bt2;
1571
1572#ifndef __rtems__
1573        timespec2bintime(ts, &bt);
1574        nanotime(&tbef);
1575        mtx_lock_spin(&tc_setclock_mtx);
1576        cpu_tick_calibrate(1);
1577#else /* __rtems__ */
1578        bt = *_bt;
1579#endif /* __rtems__ */
1580        binuptime(&bt2);
1581        bintime_sub(&bt, &bt2);
1582
1583        /* XXX fiddle all the little crinkly bits around the fiords... */
1584#ifndef __rtems__
1585        tc_windup(&bt);
1586        mtx_unlock_spin(&tc_setclock_mtx);
1587
1588        /* Avoid rtc_generation == 0, since td_rtcgen == 0 is special. */
1589        atomic_add_rel_int(&rtc_generation, 2);
1590        sleepq_chains_remove_matching(sleeping_on_old_rtc);
1591        if (timestepwarnings) {
1592                nanotime(&taft);
1593                log(LOG_INFO,
1594                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1595                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1596                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1597                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1598        }
1599#else /* __rtems__ */
1600        _Timecounter_Windup(&bt, lock_context);
1601#endif /* __rtems__ */
1602}
1603
1604/*
1605 * Recalculate the scaling factor.  We want the number of 1/2^64
1606 * fractions of a second per period of the hardware counter, taking
1607 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1608 * processing provides us with.
1609 *
1610 * The th_adjustment is nanoseconds per second with 32 bit binary
1611 * fraction and we want 64 bit binary fraction of second:
1612 *
1613 *       x = a * 2^32 / 10^9 = a * 4.294967296
1614 *
1615 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1616 * we can only multiply by about 850 without overflowing, that
1617 * leaves no suitably precise fractions for multiply before divide.
1618 *
1619 * Divide before multiply with a fraction of 2199/512 results in a
1620 * systematic undercompensation of 10PPM of th_adjustment.  On a
1621 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1622 *
1623 * We happily sacrifice the lowest of the 64 bits of our result
1624 * to the goddess of code clarity.
1625 */
1626static void
1627recalculate_scaling_factor_and_large_delta(struct timehands *th)
1628{
1629        uint64_t scale;
1630
1631        scale = (uint64_t)1 << 63;
1632        scale += (th->th_adjustment / 1024) * 2199;
1633        scale /= th->th_counter->tc_frequency;
1634        th->th_scale = scale * 2;
1635        th->th_large_delta = MIN(((uint64_t)1 << 63) / scale, UINT_MAX);
1636}
1637
1638/*
1639 * Initialize the next struct timehands in the ring and make
1640 * it the active timehands.  Along the way we might switch to a different
1641 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1642 */
1643static void
1644tc_windup(struct bintime *new_boottimebin)
1645#ifdef __rtems__
1646{
1647        ISR_lock_Context lock_context;
1648
1649        _Timecounter_Acquire(&lock_context);
1650        _Timecounter_Windup(new_boottimebin, &lock_context);
1651}
1652
1653static void
1654_Timecounter_Windup(struct bintime *new_boottimebin,
1655    ISR_lock_Context *lock_context)
1656#endif /* __rtems__ */
1657{
1658        struct bintime bt;
1659        struct timecounter *tc;
1660        struct timehands *th, *tho;
1661        uint32_t delta, ncount;
1662#if defined(RTEMS_SMP)
1663        u_int ogen;
1664#endif
1665        int i;
1666        time_t t;
1667#ifdef __rtems__
1668        Timecounter_NTP_update_second ntp_update_second_handler;
1669#endif
1670
1671        /*
1672         * Make the next timehands a copy of the current one, but do
1673         * not overwrite the generation or next pointer.  While we
1674         * update the contents, the generation must be zero.  We need
1675         * to ensure that the zero generation is visible before the
1676         * data updates become visible, which requires release fence.
1677         * For similar reasons, re-reading of the generation after the
1678         * data is read should use acquire fence.
1679         */
1680        tho = timehands;
1681#if defined(RTEMS_SMP)
1682        th = tho->th_next;
1683        ogen = th->th_generation;
1684        th->th_generation = 0;
1685        atomic_thread_fence_rel();
1686        memcpy(th, tho, offsetof(struct timehands, th_generation));
1687#else
1688        th = tho;
1689#endif
1690        if (new_boottimebin != NULL)
1691                th->th_boottime = *new_boottimebin;
1692
1693        /*
1694         * Capture a timecounter delta on the current timecounter and if
1695         * changing timecounters, a counter value from the new timecounter.
1696         * Update the offset fields accordingly.
1697         */
1698        tc = atomic_load_ptr(&timecounter);
1699        delta = tc_delta(th);
1700        if (th->th_counter != tc)
1701                ncount = tc->tc_get_timecount(tc);
1702        else
1703                ncount = 0;
1704#ifdef FFCLOCK
1705        ffclock_windup(delta);
1706#endif
1707        th->th_offset_count += delta;
1708        th->th_offset_count &= th->th_counter->tc_counter_mask;
1709        bintime_add_tc_delta(&th->th_offset, th->th_scale,
1710            th->th_large_delta, delta);
1711
1712#ifndef __rtems__
1713        /*
1714         * Hardware latching timecounters may not generate interrupts on
1715         * PPS events, so instead we poll them.  There is a finite risk that
1716         * the hardware might capture a count which is later than the one we
1717         * got above, and therefore possibly in the next NTP second which might
1718         * have a different rate than the current NTP second.  It doesn't
1719         * matter in practice.
1720         */
1721        if (tho->th_counter->tc_poll_pps)
1722                tho->th_counter->tc_poll_pps(tho->th_counter);
1723#endif /* __rtems__ */
1724
1725        /*
1726         * Deal with NTP second processing.  The loop normally
1727         * iterates at most once, but in extreme situations it might
1728         * keep NTP sane if timeouts are not run for several seconds.
1729         * At boot, the time step can be large when the TOD hardware
1730         * has been read, so on really large steps, we call
1731         * ntp_update_second only twice.  We need to call it twice in
1732         * case we missed a leap second.
1733         */
1734        bt = th->th_offset;
1735        bintime_add(&bt, &th->th_boottime);
1736#ifdef __rtems__
1737        ntp_update_second_handler = _Timecounter_NTP_update_second_handler;
1738        if (ntp_update_second_handler != NULL) {
1739#endif /* __rtems__ */
1740        i = bt.sec - tho->th_microtime.tv_sec;
1741        if (i > 0) {
1742                if (i > LARGE_STEP)
1743                        i = 2;
1744
1745                do {
1746                        t = bt.sec;
1747                        ntp_update_second(&th->th_adjustment, &bt.sec);
1748                        if (bt.sec != t)
1749                                th->th_boottime.sec += bt.sec - t;
1750                        --i;
1751                } while (i > 0);
1752
1753                recalculate_scaling_factor_and_large_delta(th);
1754        }
1755#ifdef __rtems__
1756        }
1757#endif /* __rtems__ */
1758
1759        /* Update the UTC timestamps used by the get*() functions. */
1760        th->th_bintime = bt;
1761        bintime2timeval(&bt, &th->th_microtime);
1762        bintime2timespec(&bt, &th->th_nanotime);
1763
1764        /* Now is a good time to change timecounters. */
1765        if (th->th_counter != tc) {
1766#ifndef __rtems__
1767#ifndef __arm__
1768                if ((tc->tc_flags & TC_FLAGS_C2STOP) != 0)
1769                        cpu_disable_c2_sleep++;
1770                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1771                        cpu_disable_c2_sleep--;
1772#endif
1773#endif /* __rtems__ */
1774                th->th_counter = tc;
1775                th->th_offset_count = ncount;
1776#ifndef __rtems__
1777                tc_min_ticktock_freq = max(1, tc->tc_frequency /
1778                    (((uint64_t)tc->tc_counter_mask + 1) / 3));
1779#endif /* __rtems__ */
1780                recalculate_scaling_factor_and_large_delta(th);
1781#ifdef FFCLOCK
1782                ffclock_change_tc(th);
1783#endif
1784        }
1785
1786#if defined(RTEMS_SMP)
1787        /*
1788         * Now that the struct timehands is again consistent, set the new
1789         * generation number, making sure to not make it zero.
1790         */
1791        if (++ogen == 0)
1792                ogen = 1;
1793        atomic_store_rel_int(&th->th_generation, ogen);
1794#else
1795        atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
1796#endif
1797
1798        /* Go live with the new struct timehands. */
1799#ifdef FFCLOCK
1800        switch (sysclock_active) {
1801        case SYSCLOCK_FBCK:
1802#endif
1803                time_second = th->th_microtime.tv_sec;
1804                time_uptime = th->th_offset.sec;
1805#ifdef FFCLOCK
1806                break;
1807        case SYSCLOCK_FFWD:
1808                time_second = fftimehands->tick_time_lerp.sec;
1809                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1810                break;
1811        }
1812#endif
1813
1814#if defined(RTEMS_SMP)
1815        timehands = th;
1816#endif
1817#ifndef __rtems__
1818        timekeep_push_vdso();
1819#endif /* __rtems__ */
1820#ifdef __rtems__
1821        _Timecounter_Release(lock_context);
1822#endif /* __rtems__ */
1823}
1824
1825#ifndef __rtems__
1826/* Report or change the active timecounter hardware. */
1827static int
1828sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1829{
1830        char newname[32];
1831        struct timecounter *newtc, *tc;
1832        int error;
1833
1834        mtx_lock(&tc_lock);
1835        tc = timecounter;
1836        strlcpy(newname, tc->tc_name, sizeof(newname));
1837        mtx_unlock(&tc_lock);
1838
1839        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1840        if (error != 0 || req->newptr == NULL)
1841                return (error);
1842
1843        mtx_lock(&tc_lock);
1844        /* Record that the tc in use now was specifically chosen. */
1845        tc_chosen = 1;
1846        if (strcmp(newname, tc->tc_name) == 0) {
1847                mtx_unlock(&tc_lock);
1848                return (0);
1849        }
1850        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1851                if (strcmp(newname, newtc->tc_name) != 0)
1852                        continue;
1853
1854                /* Warm up new timecounter. */
1855                (void)newtc->tc_get_timecount(newtc);
1856
1857                timecounter = newtc;
1858
1859                /*
1860                 * The vdso timehands update is deferred until the next
1861                 * 'tc_windup()'.
1862                 *
1863                 * This is prudent given that 'timekeep_push_vdso()' does not
1864                 * use any locking and that it can be called in hard interrupt
1865                 * context via 'tc_windup()'.
1866                 */
1867                break;
1868        }
1869        mtx_unlock(&tc_lock);
1870        return (newtc != NULL ? 0 : EINVAL);
1871}
1872SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware,
1873    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, 0, 0,
1874    sysctl_kern_timecounter_hardware, "A",
1875    "Timecounter hardware selected");
1876
1877/* Report the available timecounter hardware. */
1878static int
1879sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1880{
1881        struct sbuf sb;
1882        struct timecounter *tc;
1883        int error;
1884
1885        error = sysctl_wire_old_buffer(req, 0);
1886        if (error != 0)
1887                return (error);
1888        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1889        mtx_lock(&tc_lock);
1890        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1891                if (tc != timecounters)
1892                        sbuf_putc(&sb, ' ');
1893                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1894        }
1895        mtx_unlock(&tc_lock);
1896        error = sbuf_finish(&sb);
1897        sbuf_delete(&sb);
1898        return (error);
1899}
1900
1901SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice,
1902    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
1903    sysctl_kern_timecounter_choice, "A",
1904    "Timecounter hardware detected");
1905#endif /* __rtems__ */
1906
1907/*
1908 * RFC 2783 PPS-API implementation.
1909 */
1910
1911/*
1912 *  Return true if the driver is aware of the abi version extensions in the
1913 *  pps_state structure, and it supports at least the given abi version number.
1914 */
1915static inline int
1916abi_aware(struct pps_state *pps, int vers)
1917{
1918
1919        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1920}
1921
1922static int
1923pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1924{
1925#ifndef __rtems__
1926        int err, timo;
1927#else /* __rtems__ */
1928        int err;
1929#endif /* __rtems__ */
1930        pps_seq_t aseq, cseq;
1931#ifndef __rtems__
1932        struct timeval tv;
1933#endif /* __rtems__ */
1934
1935        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1936                return (EINVAL);
1937
1938        /*
1939         * If no timeout is requested, immediately return whatever values were
1940         * most recently captured.  If timeout seconds is -1, that's a request
1941         * to block without a timeout.  WITNESS won't let us sleep forever
1942         * without a lock (we really don't need a lock), so just repeatedly
1943         * sleep a long time.
1944         */
1945        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1946#ifndef __rtems__
1947                if (fapi->timeout.tv_sec == -1)
1948                        timo = 0x7fffffff;
1949                else {
1950                        tv.tv_sec = fapi->timeout.tv_sec;
1951                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1952                        timo = tvtohz(&tv);
1953                }
1954#endif /* __rtems__ */
1955                aseq = atomic_load_int(&pps->ppsinfo.assert_sequence);
1956                cseq = atomic_load_int(&pps->ppsinfo.clear_sequence);
1957                while (aseq == atomic_load_int(&pps->ppsinfo.assert_sequence) &&
1958                    cseq == atomic_load_int(&pps->ppsinfo.clear_sequence)) {
1959#ifndef __rtems__
1960                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1961                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1962                                        err = msleep_spin(pps, pps->driver_mtx,
1963                                            "ppsfch", timo);
1964                                } else {
1965                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1966                                            "ppsfch", timo);
1967                                }
1968                        } else {
1969                                err = tsleep(pps, PCATCH, "ppsfch", timo);
1970                        }
1971                        if (err == EWOULDBLOCK) {
1972                                if (fapi->timeout.tv_sec == -1) {
1973                                        continue;
1974                                } else {
1975                                        return (ETIMEDOUT);
1976                                }
1977                        } else if (err != 0) {
1978                                return (err);
1979                        }
1980#else /* __rtems__ */
1981                        _Assert(pps->wait != NULL);
1982                        err = (*pps->wait)(pps, fapi->timeout);
1983                        if (err != 0)
1984                                return (err);
1985#endif /* __rtems__ */
1986                }
1987        }
1988
1989        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1990        fapi->pps_info_buf = pps->ppsinfo;
1991
1992        return (0);
1993}
1994
1995int
1996pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1997{
1998        pps_params_t *app;
1999        struct pps_fetch_args *fapi;
2000#ifdef FFCLOCK
2001        struct pps_fetch_ffc_args *fapi_ffc;
2002#endif
2003#ifdef PPS_SYNC
2004        struct pps_kcbind_args *kapi;
2005#endif
2006
2007        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
2008        switch (cmd) {
2009        case PPS_IOC_CREATE:
2010                return (0);
2011        case PPS_IOC_DESTROY:
2012                return (0);
2013        case PPS_IOC_SETPARAMS:
2014                app = (pps_params_t *)data;
2015                if (app->mode & ~pps->ppscap)
2016                        return (EINVAL);
2017#ifdef FFCLOCK
2018                /* Ensure only a single clock is selected for ffc timestamp. */
2019                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
2020                        return (EINVAL);
2021#endif
2022                pps->ppsparam = *app;
2023                return (0);
2024        case PPS_IOC_GETPARAMS:
2025                app = (pps_params_t *)data;
2026                *app = pps->ppsparam;
2027                app->api_version = PPS_API_VERS_1;
2028                return (0);
2029        case PPS_IOC_GETCAP:
2030                *(int*)data = pps->ppscap;
2031                return (0);
2032        case PPS_IOC_FETCH:
2033                fapi = (struct pps_fetch_args *)data;
2034                return (pps_fetch(fapi, pps));
2035#ifdef FFCLOCK
2036        case PPS_IOC_FETCH_FFCOUNTER:
2037                fapi_ffc = (struct pps_fetch_ffc_args *)data;
2038                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
2039                    PPS_TSFMT_TSPEC)
2040                        return (EINVAL);
2041                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
2042                        return (EOPNOTSUPP);
2043                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
2044                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
2045                /* Overwrite timestamps if feedback clock selected. */
2046                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
2047                case PPS_TSCLK_FBCK:
2048                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
2049                            pps->ppsinfo.assert_timestamp;
2050                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
2051                            pps->ppsinfo.clear_timestamp;
2052                        break;
2053                case PPS_TSCLK_FFWD:
2054                        break;
2055                default:
2056                        break;
2057                }
2058                return (0);
2059#endif /* FFCLOCK */
2060        case PPS_IOC_KCBIND:
2061#ifdef PPS_SYNC
2062                kapi = (struct pps_kcbind_args *)data;
2063                /* XXX Only root should be able to do this */
2064                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
2065                        return (EINVAL);
2066                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
2067                        return (EINVAL);
2068                if (kapi->edge & ~pps->ppscap)
2069                        return (EINVAL);
2070                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
2071                    (pps->kcmode & KCMODE_ABIFLAG);
2072                return (0);
2073#else
2074                return (EOPNOTSUPP);
2075#endif
2076        default:
2077                return (ENOIOCTL);
2078        }
2079}
2080
2081#ifdef __rtems__
2082/*
2083 * The real implementation of hardpps() is defined in kern_ntptime.c.  Use it
2084 * only if the NTP support is needed by the application.
2085 */
2086RTEMS_WEAK void
2087hardpps(struct timespec *tsp, long nsec)
2088{
2089
2090        (void)tsp;
2091        (void)nsec;
2092}
2093
2094static int
2095default_wait(struct pps_state *pps, struct timespec timeout)
2096{
2097
2098        (void)pps;
2099        (void)timeout;
2100
2101        return (ETIMEDOUT);
2102}
2103
2104static void
2105default_wakeup(struct pps_state *pps)
2106{
2107
2108        (void)pps;
2109}
2110#endif /* __rtems__ */
2111void
2112pps_init(struct pps_state *pps)
2113{
2114#ifdef __rtems__
2115        pps->wait = default_wait;
2116        pps->wakeup = default_wakeup;
2117#endif /* __rtems__ */
2118        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
2119        if (pps->ppscap & PPS_CAPTUREASSERT)
2120                pps->ppscap |= PPS_OFFSETASSERT;
2121        if (pps->ppscap & PPS_CAPTURECLEAR)
2122                pps->ppscap |= PPS_OFFSETCLEAR;
2123#ifdef FFCLOCK
2124        pps->ppscap |= PPS_TSCLK_MASK;
2125#endif
2126        pps->kcmode &= ~KCMODE_ABIFLAG;
2127}
2128
2129void
2130pps_init_abi(struct pps_state *pps)
2131{
2132
2133        pps_init(pps);
2134        if (pps->driver_abi > 0) {
2135                pps->kcmode |= KCMODE_ABIFLAG;
2136                pps->kernel_abi = PPS_ABI_VERSION;
2137        }
2138}
2139
2140void
2141pps_capture(struct pps_state *pps)
2142{
2143        struct timehands *th;
2144        struct timecounter *tc;
2145
2146        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
2147        th = timehands;
2148        pps->capgen = atomic_load_acq_int(&th->th_generation);
2149        pps->capth = th;
2150#ifdef FFCLOCK
2151        pps->capffth = fftimehands;
2152#endif
2153        tc = th->th_counter;
2154        pps->capcount = tc->tc_get_timecount(tc);
2155}
2156
2157void
2158pps_event(struct pps_state *pps, int event)
2159{
2160        struct timehands *capth;
2161        struct timecounter *captc;
2162        uint64_t capth_scale;
2163        struct bintime bt;
2164        struct timespec *tsp, *osp;
2165        uint32_t tcount, *pcount;
2166        int foff;
2167        pps_seq_t *pseq;
2168#ifdef FFCLOCK
2169        struct timespec *tsp_ffc;
2170        pps_seq_t *pseq_ffc;
2171        ffcounter *ffcount;
2172#endif
2173#ifdef PPS_SYNC
2174        int fhard;
2175#endif
2176
2177        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
2178        /* Nothing to do if not currently set to capture this event type. */
2179        if ((event & pps->ppsparam.mode) == 0)
2180                return;
2181
2182        /* Make a snapshot of the captured timehand */
2183        capth = pps->capth;
2184        captc = capth->th_counter;
2185        capth_scale = capth->th_scale;
2186        tcount = capth->th_offset_count;
2187        bt = capth->th_bintime;
2188
2189        /* If the timecounter was wound up underneath us, bail out. */
2190        atomic_thread_fence_acq();
2191#if defined(RTEMS_SMP)
2192        if (pps->capgen == 0 || pps->capgen != capth->th_generation)
2193#else
2194        if (pps->capgen != capth->th_generation)
2195#endif
2196                return;
2197
2198        /* Things would be easier with arrays. */
2199        if (event == PPS_CAPTUREASSERT) {
2200                tsp = &pps->ppsinfo.assert_timestamp;
2201                osp = &pps->ppsparam.assert_offset;
2202                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
2203#ifdef PPS_SYNC
2204                fhard = pps->kcmode & PPS_CAPTUREASSERT;
2205#endif
2206                pcount = &pps->ppscount[0];
2207                pseq = &pps->ppsinfo.assert_sequence;
2208#ifdef FFCLOCK
2209                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
2210                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
2211                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
2212#endif
2213        } else {
2214                tsp = &pps->ppsinfo.clear_timestamp;
2215                osp = &pps->ppsparam.clear_offset;
2216                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
2217#ifdef PPS_SYNC
2218                fhard = pps->kcmode & PPS_CAPTURECLEAR;
2219#endif
2220                pcount = &pps->ppscount[1];
2221                pseq = &pps->ppsinfo.clear_sequence;
2222#ifdef FFCLOCK
2223                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
2224                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
2225                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
2226#endif
2227        }
2228
2229        *pcount = pps->capcount;
2230
2231        /*
2232         * If the timecounter changed, we cannot compare the count values, so
2233         * we have to drop the rest of the PPS-stuff until the next event.
2234         */
2235        if (__predict_false(pps->ppstc != captc)) {
2236                pps->ppstc = captc;
2237                pps->ppscount[2] = pps->capcount;
2238                return;
2239        }
2240
2241        (*pseq)++;
2242
2243        /* Convert the count to a timespec. */
2244        tcount = pps->capcount - tcount;
2245        tcount &= captc->tc_counter_mask;
2246        bintime_addx(&bt, capth_scale * tcount);
2247        bintime2timespec(&bt, tsp);
2248
2249        if (foff) {
2250                timespecadd(tsp, osp, tsp);
2251                if (tsp->tv_nsec < 0) {
2252                        tsp->tv_nsec += 1000000000;
2253                        tsp->tv_sec -= 1;
2254                }
2255        }
2256
2257#ifdef FFCLOCK
2258        *ffcount = pps->capffth->tick_ffcount + tcount;
2259        bt = pps->capffth->tick_time;
2260        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
2261        bintime_add(&bt, &pps->capffth->tick_time);
2262        (*pseq_ffc)++;
2263        bintime2timespec(&bt, tsp_ffc);
2264#endif
2265
2266#ifdef PPS_SYNC
2267        if (fhard) {
2268                uint64_t delta_nsec;
2269                uint64_t freq;
2270
2271                /*
2272                 * Feed the NTP PLL/FLL.
2273                 * The FLL wants to know how many (hardware) nanoseconds
2274                 * elapsed since the previous event.
2275                 */
2276                tcount = pps->capcount - pps->ppscount[2];
2277                pps->ppscount[2] = pps->capcount;
2278                tcount &= captc->tc_counter_mask;
2279                delta_nsec = 1000000000;
2280                delta_nsec *= tcount;
2281                freq = captc->tc_frequency;
2282                delta_nsec = (delta_nsec + freq / 2) / freq;
2283                hardpps(tsp, (long)delta_nsec);
2284        }
2285#endif
2286
2287        /* Wakeup anyone sleeping in pps_fetch().  */
2288#ifndef __rtems__
2289        wakeup(pps);
2290#else /* __rtems__ */
2291        _Assert(pps->wakeup != NULL);
2292        (*pps->wakeup)(pps);
2293#endif /* __rtems__ */
2294}
2295
2296/*
2297 * Timecounters need to be updated every so often to prevent the hardware
2298 * counter from overflowing.  Updating also recalculates the cached values
2299 * used by the get*() family of functions, so their precision depends on
2300 * the update frequency.
2301 */
2302
2303#ifndef __rtems__
2304static int tc_tick;
2305SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
2306    "Approximate number of hardclock ticks in a millisecond");
2307#endif /* __rtems__ */
2308
2309#ifndef __rtems__
2310void
2311tc_ticktock(int cnt)
2312{
2313        static int count;
2314
2315        if (mtx_trylock_spin(&tc_setclock_mtx)) {
2316                count += cnt;
2317                if (count >= tc_tick) {
2318                        count = 0;
2319                        tc_windup(NULL);
2320                }
2321                mtx_unlock_spin(&tc_setclock_mtx);
2322        }
2323}
2324#else /* __rtems__ */
2325void
2326_Timecounter_Tick(void)
2327{
2328        Per_CPU_Control *cpu_self = _Per_CPU_Get();
2329
2330#if defined(RTEMS_SMP)
2331        if (_Per_CPU_Is_boot_processor(cpu_self)) {
2332#endif
2333                tc_windup(NULL);
2334#if defined(RTEMS_SMP)
2335        }
2336#endif
2337
2338        _Watchdog_Tick(cpu_self);
2339}
2340
2341void
2342_Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
2343    ISR_lock_Context *lock_context)
2344{
2345        struct bintime bt;
2346        struct timehands *th;
2347#if defined(RTEMS_SMP)
2348        u_int ogen;
2349#endif
2350
2351        th = timehands;
2352#if defined(RTEMS_SMP)
2353        ogen = th->th_generation;
2354        th->th_generation = 0;
2355        atomic_thread_fence_rel();
2356#endif
2357
2358        th->th_offset_count = offset;
2359        bintime_addx(&th->th_offset, th->th_scale * delta);
2360        bt = th->th_offset;
2361        bintime_add(&bt, &th->th_boottime);
2362
2363        /* Update the UTC timestamps used by the get*() functions. */
2364        th->th_bintime = bt;
2365        bintime2timeval(&bt, &th->th_microtime);
2366        bintime2timespec(&bt, &th->th_nanotime);
2367
2368#if defined(RTEMS_SMP)
2369        /*
2370         * Now that the struct timehands is again consistent, set the new
2371         * generation number, making sure to not make it zero.
2372         */
2373        if (++ogen == 0)
2374                ogen = 1;
2375        atomic_store_rel_int(&th->th_generation, ogen);
2376#else
2377        atomic_store_rel_int(&th->th_generation, th->th_generation + 1);
2378#endif
2379
2380        /* Go live with the new struct timehands. */
2381        time_second = th->th_microtime.tv_sec;
2382        time_uptime = th->th_offset.sec;
2383
2384        _Timecounter_Release(lock_context);
2385
2386        _Watchdog_Tick(_Per_CPU_Get_snapshot());
2387}
2388#endif /* __rtems__ */
2389
2390#ifndef __rtems__
2391static void __inline
2392tc_adjprecision(void)
2393{
2394        int t;
2395
2396        if (tc_timepercentage > 0) {
2397                t = (99 + tc_timepercentage) / tc_timepercentage;
2398                tc_precexp = fls(t + (t >> 1)) - 1;
2399                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2400                FREQ2BT(hz, &bt_tickthreshold);
2401                bintime_shift(&bt_timethreshold, tc_precexp);
2402                bintime_shift(&bt_tickthreshold, tc_precexp);
2403        } else {
2404                tc_precexp = 31;
2405                bt_timethreshold.sec = INT_MAX;
2406                bt_timethreshold.frac = ~(uint64_t)0;
2407                bt_tickthreshold = bt_timethreshold;
2408        }
2409        sbt_timethreshold = bttosbt(bt_timethreshold);
2410        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2411}
2412#endif /* __rtems__ */
2413
2414#ifndef __rtems__
2415static int
2416sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2417{
2418        int error, val;
2419
2420        val = tc_timepercentage;
2421        error = sysctl_handle_int(oidp, &val, 0, req);
2422        if (error != 0 || req->newptr == NULL)
2423                return (error);
2424        tc_timepercentage = val;
2425        if (cold)
2426                goto done;
2427        tc_adjprecision();
2428done:
2429        return (0);
2430}
2431
2432/* Set up the requested number of timehands. */
2433static void
2434inittimehands(void *dummy)
2435{
2436        struct timehands *thp;
2437        int i;
2438
2439        TUNABLE_INT_FETCH("kern.timecounter.timehands_count",
2440            &timehands_count);
2441        if (timehands_count < 1)
2442                timehands_count = 1;
2443        if (timehands_count > nitems(ths))
2444                timehands_count = nitems(ths);
2445        for (i = 1, thp = &ths[0]; i < timehands_count;  thp = &ths[i++])
2446                thp->th_next = &ths[i];
2447        thp->th_next = &ths[0];
2448
2449        TUNABLE_STR_FETCH("kern.timecounter.hardware", tc_from_tunable,
2450            sizeof(tc_from_tunable));
2451
2452        mtx_init(&tc_lock, "tc", NULL, MTX_DEF);
2453}
2454SYSINIT(timehands, SI_SUB_TUNABLES, SI_ORDER_ANY, inittimehands, NULL);
2455
2456static void
2457inittimecounter(void *dummy)
2458{
2459        u_int p;
2460        int tick_rate;
2461
2462        /*
2463         * Set the initial timeout to
2464         * max(1, <approx. number of hardclock ticks in a millisecond>).
2465         * People should probably not use the sysctl to set the timeout
2466         * to smaller than its initial value, since that value is the
2467         * smallest reasonable one.  If they want better timestamps they
2468         * should use the non-"get"* functions.
2469         */
2470        if (hz > 1000)
2471                tc_tick = (hz + 500) / 1000;
2472        else
2473                tc_tick = 1;
2474        tc_adjprecision();
2475        FREQ2BT(hz, &tick_bt);
2476        tick_sbt = bttosbt(tick_bt);
2477        tick_rate = hz / tc_tick;
2478        FREQ2BT(tick_rate, &tc_tick_bt);
2479        tc_tick_sbt = bttosbt(tc_tick_bt);
2480        p = (tc_tick * 1000000) / hz;
2481        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2482
2483#ifdef FFCLOCK
2484        ffclock_init();
2485#endif
2486
2487        /* warm up new timecounter (again) and get rolling. */
2488        (void)timecounter->tc_get_timecount(timecounter);
2489        mtx_lock_spin(&tc_setclock_mtx);
2490        tc_windup(NULL);
2491        mtx_unlock_spin(&tc_setclock_mtx);
2492}
2493
2494SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2495
2496/* Cpu tick handling -------------------------------------------------*/
2497
2498static bool cpu_tick_variable;
2499static uint64_t cpu_tick_frequency;
2500
2501DPCPU_DEFINE_STATIC(uint64_t, tc_cpu_ticks_base);
2502DPCPU_DEFINE_STATIC(unsigned, tc_cpu_ticks_last);
2503
2504static uint64_t
2505tc_cpu_ticks(void)
2506{
2507        struct timecounter *tc;
2508        uint64_t res, *base;
2509        unsigned u, *last;
2510
2511        critical_enter();
2512        base = DPCPU_PTR(tc_cpu_ticks_base);
2513        last = DPCPU_PTR(tc_cpu_ticks_last);
2514        tc = timehands->th_counter;
2515        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2516        if (u < *last)
2517                *base += (uint64_t)tc->tc_counter_mask + 1;
2518        *last = u;
2519        res = u + *base;
2520        critical_exit();
2521        return (res);
2522}
2523
2524void
2525cpu_tick_calibration(void)
2526{
2527        static time_t last_calib;
2528
2529        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2530                cpu_tick_calibrate(0);
2531                last_calib = time_uptime;
2532        }
2533}
2534
2535/*
2536 * This function gets called every 16 seconds on only one designated
2537 * CPU in the system from hardclock() via cpu_tick_calibration()().
2538 *
2539 * Whenever the real time clock is stepped we get called with reset=1
2540 * to make sure we handle suspend/resume and similar events correctly.
2541 */
2542
2543static void
2544cpu_tick_calibrate(int reset)
2545{
2546        static uint64_t c_last;
2547        uint64_t c_this, c_delta;
2548        static struct bintime  t_last;
2549        struct bintime t_this, t_delta;
2550        uint32_t divi;
2551
2552        if (reset) {
2553                /* The clock was stepped, abort & reset */
2554                t_last.sec = 0;
2555                return;
2556        }
2557
2558        /* we don't calibrate fixed rate cputicks */
2559        if (!cpu_tick_variable)
2560                return;
2561
2562        getbinuptime(&t_this);
2563        c_this = cpu_ticks();
2564        if (t_last.sec != 0) {
2565                c_delta = c_this - c_last;
2566                t_delta = t_this;
2567                bintime_sub(&t_delta, &t_last);
2568                /*
2569                 * Headroom:
2570                 *      2^(64-20) / 16[s] =
2571                 *      2^(44) / 16[s] =
2572                 *      17.592.186.044.416 / 16 =
2573                 *      1.099.511.627.776 [Hz]
2574                 */
2575                divi = t_delta.sec << 20;
2576                divi |= t_delta.frac >> (64 - 20);
2577                c_delta <<= 20;
2578                c_delta /= divi;
2579                if (c_delta > cpu_tick_frequency) {
2580                        if (0 && bootverbose)
2581                                printf("cpu_tick increased to %ju Hz\n",
2582                                    c_delta);
2583                        cpu_tick_frequency = c_delta;
2584                }
2585        }
2586        c_last = c_this;
2587        t_last = t_this;
2588}
2589
2590void
2591set_cputicker(cpu_tick_f *func, uint64_t freq, bool isvariable)
2592{
2593
2594        if (func == NULL) {
2595                cpu_ticks = tc_cpu_ticks;
2596        } else {
2597                cpu_tick_frequency = freq;
2598                cpu_tick_variable = isvariable;
2599                cpu_ticks = func;
2600        }
2601}
2602
2603uint64_t
2604cpu_tickrate(void)
2605{
2606
2607        if (cpu_ticks == tc_cpu_ticks)
2608                return (tc_getfrequency());
2609        return (cpu_tick_frequency);
2610}
2611
2612/*
2613 * We need to be slightly careful converting cputicks to microseconds.
2614 * There is plenty of margin in 64 bits of microseconds (half a million
2615 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2616 * before divide conversion (to retain precision) we find that the
2617 * margin shrinks to 1.5 hours (one millionth of 146y).
2618 */
2619
2620uint64_t
2621cputick2usec(uint64_t tick)
2622{
2623        uint64_t tr;
2624        tr = cpu_tickrate();
2625        return ((tick / tr) * 1000000ULL) + ((tick % tr) * 1000000ULL) / tr;
2626}
2627
2628cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
2629#endif /* __rtems__ */
2630
2631#ifndef __rtems__
2632static int vdso_th_enable = 1;
2633static int
2634sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2635{
2636        int old_vdso_th_enable, error;
2637
2638        old_vdso_th_enable = vdso_th_enable;
2639        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2640        if (error != 0)
2641                return (error);
2642        vdso_th_enable = old_vdso_th_enable;
2643        return (0);
2644}
2645SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2646    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2647    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2648
2649uint32_t
2650tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2651{
2652        struct timehands *th;
2653        uint32_t enabled;
2654
2655        th = timehands;
2656        vdso_th->th_scale = th->th_scale;
2657        vdso_th->th_offset_count = th->th_offset_count;
2658        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2659        vdso_th->th_offset = th->th_offset;
2660        vdso_th->th_boottime = th->th_boottime;
2661        if (th->th_counter->tc_fill_vdso_timehands != NULL) {
2662                enabled = th->th_counter->tc_fill_vdso_timehands(vdso_th,
2663                    th->th_counter);
2664        } else
2665                enabled = 0;
2666        if (!vdso_th_enable)
2667                enabled = 0;
2668        return (enabled);
2669}
2670
2671#ifdef COMPAT_FREEBSD32
2672uint32_t
2673tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2674{
2675        struct timehands *th;
2676        uint32_t enabled;
2677
2678        th = timehands;
2679        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2680        vdso_th32->th_offset_count = th->th_offset_count;
2681        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2682        vdso_th32->th_offset.sec = th->th_offset.sec;
2683        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2684        vdso_th32->th_boottime.sec = th->th_boottime.sec;
2685        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
2686        if (th->th_counter->tc_fill_vdso_timehands32 != NULL) {
2687                enabled = th->th_counter->tc_fill_vdso_timehands32(vdso_th32,
2688                    th->th_counter);
2689        } else
2690                enabled = 0;
2691        if (!vdso_th_enable)
2692                enabled = 0;
2693        return (enabled);
2694}
2695#endif
2696
2697#include "opt_ddb.h"
2698#ifdef DDB
2699#include <ddb/ddb.h>
2700
2701DB_SHOW_COMMAND(timecounter, db_show_timecounter)
2702{
2703        struct timehands *th;
2704        struct timecounter *tc;
2705        u_int val1, val2;
2706
2707        th = timehands;
2708        tc = th->th_counter;
2709        val1 = tc->tc_get_timecount(tc);
2710        __compiler_membar();
2711        val2 = tc->tc_get_timecount(tc);
2712
2713        db_printf("timecounter %p %s\n", tc, tc->tc_name);
2714        db_printf("  mask %#x freq %ju qual %d flags %#x priv %p\n",
2715            tc->tc_counter_mask, (uintmax_t)tc->tc_frequency, tc->tc_quality,
2716            tc->tc_flags, tc->tc_priv);
2717        db_printf("  val %#x %#x\n", val1, val2);
2718        db_printf("timehands adj %#jx scale %#jx ldelta %d off_cnt %d gen %d\n",
2719            (uintmax_t)th->th_adjustment, (uintmax_t)th->th_scale,
2720            th->th_large_delta, th->th_offset_count, th->th_generation);
2721        db_printf("  offset %jd %jd boottime %jd %jd\n",
2722            (intmax_t)th->th_offset.sec, (uintmax_t)th->th_offset.frac,
2723            (intmax_t)th->th_boottime.sec, (uintmax_t)th->th_boottime.frac);
2724}
2725#endif
2726#else /* __rtems__ */
2727RTEMS_ALIAS(_Timecounter_Nanotime)
2728void rtems_clock_get_realtime(struct timespec *);
2729
2730RTEMS_ALIAS(_Timecounter_Bintime)
2731void rtems_clock_get_realtime_bintime(struct bintime *);
2732
2733RTEMS_ALIAS(_Timecounter_Microtime)
2734void rtems_clock_get_realtime_timeval(struct timeval *);
2735
2736RTEMS_ALIAS(_Timecounter_Getnanotime)
2737void rtems_clock_get_realtime_coarse(struct timespec *);
2738
2739RTEMS_ALIAS(_Timecounter_Getbintime)
2740void rtems_clock_get_realtime_coarse_bintime(struct bintime *);
2741
2742RTEMS_ALIAS(_Timecounter_Getmicrotime)
2743void rtems_clock_get_realtime_coarse_timeval(struct timeval *);
2744
2745RTEMS_ALIAS(_Timecounter_Nanouptime)
2746void rtems_clock_get_monotonic(struct timespec *);
2747
2748RTEMS_ALIAS(_Timecounter_Binuptime)
2749void rtems_clock_get_monotonic_bintime(struct bintime *);
2750
2751RTEMS_ALIAS(_Timecounter_Sbinuptime)
2752sbintime_t rtems_clock_get_monotonic_sbintime(void);
2753
2754RTEMS_ALIAS(_Timecounter_Microuptime)
2755void rtems_clock_get_monotonic_timeval(struct timeval *);
2756
2757RTEMS_ALIAS(_Timecounter_Getnanouptime)
2758void rtems_clock_get_monotonic_coarse(struct timespec *);
2759
2760RTEMS_ALIAS(_Timecounter_Getbinuptime)
2761void rtems_clock_get_monotonic_coarse_bintime(struct bintime *);
2762
2763RTEMS_ALIAS(_Timecounter_Getmicrouptime)
2764void rtems_clock_get_monotonic_coarse_timeval(struct timeval *);
2765
2766RTEMS_ALIAS(_Timecounter_Getboottimebin)
2767void rtems_clock_get_boot_time_bintime(struct bintime *);
2768
2769RTEMS_ALIAS(_Timecounter_Getboottime)
2770void rtems_clock_get_boot_time_timeval(struct timeval *);
2771#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.