source: rtems/cpukit/score/src/kern_tc.c @ 9278f3d

Last change on this file since 9278f3d was 9278f3d, checked in by Sebastian Huber <sebastian.huber@…>, on 11/27/20 at 16:21:23

score: Canonicalize Doxygen @file comments

Use common phrases for the file brief descriptions.

Update #3706.

  • Property mode set to 100644
File size: 62.3 KB
RevLine 
[9278f3d]1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreTimecounter
5 *
6 * @brief This source file contains the definition of
7 *  ::_Timecounter, ::_Timecounter_Time_second, and ::_Timecounter_Time_uptime
8 *  and the implementation of _Timecounter_Binuptime(),
9 *  _Timecounter_Nanouptime(), _Timecounter_Microuptime(),
10 *  _Timecounter_Bintime(), _Timecounter_Nanotime(), _Timecounter_Microtime(),
11 *  _Timecounter_Getbinuptime(), _Timecounter_Getnanouptime(),
12 *  _Timecounter_Getmicrouptime(), _Timecounter_Getbintime(),
13 *  _Timecounter_Getnanotime(), _Timecounter_Getmicrotime(),
14 *  _Timecounter_Getboottime(), _Timecounter_Getboottimebin(), and
15 *  _Timecounter_Install().
16 */
17
[4117cd1]18/*-
19 * ----------------------------------------------------------------------------
20 * "THE BEER-WARE LICENSE" (Revision 42):
21 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
22 * can do whatever you want with this stuff. If we meet some day, and you think
23 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
24 * ----------------------------------------------------------------------------
25 *
[74887157]26 * Copyright (c) 2011, 2015, 2016 The FreeBSD Foundation
[4117cd1]27 * All rights reserved.
28 *
29 * Portions of this software were developed by Julien Ridoux at the University
30 * of Melbourne under sponsorship from the FreeBSD Foundation.
[74887157]31 *
32 * Portions of this software were developed by Konstantin Belousov
33 * under sponsorship from the FreeBSD Foundation.
[4117cd1]34 */
35
[31be416]36#ifdef __rtems__
[6de41c5f]37#include <sys/lock.h>
[01226ec]38#define _KERNEL
39#define binuptime(_bt) _Timecounter_Binuptime(_bt)
40#define nanouptime(_tsp) _Timecounter_Nanouptime(_tsp)
41#define microuptime(_tvp) _Timecounter_Microuptime(_tvp)
42#define bintime(_bt) _Timecounter_Bintime(_bt)
43#define nanotime(_tsp) _Timecounter_Nanotime(_tsp)
44#define microtime(_tvp) _Timecounter_Microtime(_tvp)
45#define getbinuptime(_bt) _Timecounter_Getbinuptime(_bt)
46#define getnanouptime(_tsp) _Timecounter_Getnanouptime(_tsp)
47#define getmicrouptime(_tvp) _Timecounter_Getmicrouptime(_tvp)
48#define getbintime(_bt) _Timecounter_Getbintime(_bt)
49#define getnanotime(_tsp) _Timecounter_Getnanotime(_tsp)
50#define getmicrotime(_tvp) _Timecounter_Getmicrotime(_tvp)
[d310aa7]51#define getboottime(_tvp) _Timecounter_Getboottime(_tvp)
52#define getboottimebin(_bt) _Timecounter_Getboottimebin(_bt)
[01226ec]53#define tc_init _Timecounter_Install
54#define timecounter _Timecounter
55#define time_second _Timecounter_Time_second
56#define time_uptime _Timecounter_Time_uptime
[31be416]57#include <rtems/score/timecounterimpl.h>
[0163063]58#include <rtems/score/atomic.h>
[1ef8e4a8]59#include <rtems/score/smp.h>
[599d71f]60#include <rtems/score/todimpl.h>
[7cd2484]61#include <rtems/score/watchdogimpl.h>
[31be416]62#endif /* __rtems__ */
[4117cd1]63#include <sys/cdefs.h>
[d8b6f1c]64__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
[4117cd1]65
66#include "opt_compat.h"
67#include "opt_ntp.h"
68#include "opt_ffclock.h"
69
70#include <sys/param.h>
[31be416]71#ifndef __rtems__
[4117cd1]72#include <sys/kernel.h>
73#include <sys/limits.h>
74#include <sys/lock.h>
75#include <sys/mutex.h>
[952b42b6]76#include <sys/proc.h>
[0aef6fb]77#include <sys/sbuf.h>
[952b42b6]78#include <sys/sleepqueue.h>
[4117cd1]79#include <sys/sysctl.h>
80#include <sys/syslog.h>
81#include <sys/systm.h>
[31be416]82#endif /* __rtems__ */
[4117cd1]83#include <sys/timeffc.h>
84#include <sys/timepps.h>
85#include <sys/timetc.h>
86#include <sys/timex.h>
[31be416]87#ifndef __rtems__
[4117cd1]88#include <sys/vdso.h>
[31be416]89#endif /* __rtems__ */
90#ifdef __rtems__
[b5b8116]91#include <limits.h>
[65012bf]92#include <string.h>
[31be416]93#include <rtems.h>
[76ac1ee3]94ISR_LOCK_DEFINE(, _Timecounter_Lock, "Timecounter")
95#define _Timecounter_Release(lock_context) \
96  _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context)
[31be416]97#define hz rtems_clock_get_ticks_per_second()
98#define printf(...)
[07b76fd]99#define bcopy(x, y, z) memcpy(y, x, z);
[31be416]100#define log(...)
101static inline int
[4cd52cc4]102builtin_fls(int x)
[31be416]103{
104        return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
105}
[4cd52cc4]106#define fls(x) builtin_fls(x)
[31be416]107/* FIXME: https://devel.rtems.org/ticket/2348 */
108#define ntp_update_second(a, b) do { (void) a; (void) b; } while (0)
[0163063]109
110static inline void
111atomic_thread_fence_acq(void)
112{
113
114        _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
115}
116
117static inline void
118atomic_thread_fence_rel(void)
119{
120
121        _Atomic_Fence(ATOMIC_ORDER_RELEASE);
122}
123
124static inline u_int
125atomic_load_acq_int(Atomic_Uint *i)
126{
127
128        return (_Atomic_Load_uint(i, ATOMIC_ORDER_ACQUIRE));
129}
130
131static inline void
132atomic_store_rel_int(Atomic_Uint *i, u_int val)
133{
134
135        _Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
136}
[31be416]137#endif /* __rtems__ */
[4117cd1]138
139/*
140 * A large step happens on boot.  This constant detects such steps.
141 * It is relatively small so that ntp_update_second gets called enough
142 * in the typical 'missed a couple of seconds' case, but doesn't loop
143 * forever when the time step is large.
144 */
145#define LARGE_STEP      200
146
147/*
148 * Implement a dummy timecounter which we can use until we get a real one
149 * in the air.  This allows the console and other early stuff to use
150 * time services.
151 */
152
[664f844]153static uint32_t
[4117cd1]154dummy_get_timecount(struct timecounter *tc)
155{
[31be416]156#ifndef __rtems__
[664f844]157        static uint32_t now;
[4117cd1]158
159        return (++now);
[31be416]160#else /* __rtems__ */
161        return 0;
162#endif /* __rtems__ */
[4117cd1]163}
164
165static struct timecounter dummy_timecounter = {
[2763f53]166#ifndef __rtems__
[4117cd1]167        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
[2763f53]168#else /* __rtems__ */
169        dummy_get_timecount, ~(uint32_t)0, 1000000, "dummy", -1000000
170#endif /* __rtems__ */
[4117cd1]171};
172
173struct timehands {
174        /* These fields must be initialized by the driver. */
175        struct timecounter      *th_counter;
176        int64_t                 th_adjustment;
177        uint64_t                th_scale;
[664f844]178        uint32_t                th_offset_count;
[4117cd1]179        struct bintime          th_offset;
[c382cc83]180        struct bintime          th_bintime;
[4117cd1]181        struct timeval          th_microtime;
182        struct timespec         th_nanotime;
[b48aeaf]183        struct bintime          th_boottime;
[4117cd1]184        /* Fields not to be copied in tc_windup start with th_generation. */
[e1d61fe]185#ifndef __rtems__
186        u_int                   th_generation;
187#else /* __rtems__ */
[0163063]188        Atomic_Uint             th_generation;
[e1d61fe]189#endif /* __rtems__ */
[4117cd1]190        struct timehands        *th_next;
191};
192
[31be416]193#if defined(RTEMS_SMP)
[4117cd1]194static struct timehands th0;
[6d3c125]195static struct timehands th1 = {
196        .th_next = &th0
197};
[31be416]198#endif
[4117cd1]199static struct timehands th0 = {
[6d3c125]200        .th_counter = &dummy_timecounter,
201        .th_scale = (uint64_t)-1 / 1000000,
202        .th_offset = { .sec = 1 },
203        .th_generation = 1,
204#ifdef __rtems__
[c382cc83]205        .th_bintime = { .sec = TOD_SECONDS_1970_THROUGH_1988 },
[6d3c125]206        .th_microtime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
207        .th_nanotime = { TOD_SECONDS_1970_THROUGH_1988, 0 },
[b48aeaf]208        .th_boottime = { .sec = TOD_SECONDS_1970_THROUGH_1988 - 1 },
[599d71f]209#endif /* __rtems__ */
[31be416]210#if defined(RTEMS_SMP)
[6d3c125]211        .th_next = &th1
[31be416]212#else
[6d3c125]213        .th_next = &th0
[31be416]214#endif
[4117cd1]215};
216
217static struct timehands *volatile timehands = &th0;
218struct timecounter *timecounter = &dummy_timecounter;
[2763f53]219#ifndef __rtems__
[4117cd1]220static struct timecounter *timecounters = &dummy_timecounter;
221
222int tc_min_ticktock_freq = 1;
[31be416]223#endif /* __rtems__ */
[4117cd1]224
[599d71f]225#ifndef __rtems__
[4117cd1]226volatile time_t time_second = 1;
[5f02a57]227volatile time_t time_uptime = 1;
[599d71f]228#else /* __rtems__ */
229volatile time_t time_second = TOD_SECONDS_1970_THROUGH_1988;
[5f02a57]230volatile int32_t time_uptime = 1;
[599d71f]231#endif /* __rtems__ */
[4117cd1]232
[599d71f]233#ifndef __rtems__
[4117cd1]234static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
235SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
236    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
237
238SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
239static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
240
241static int timestepwarnings;
242SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
243    &timestepwarnings, 0, "Log time steps");
244
245struct bintime bt_timethreshold;
246struct bintime bt_tickthreshold;
247sbintime_t sbt_timethreshold;
248sbintime_t sbt_tickthreshold;
249struct bintime tc_tick_bt;
250sbintime_t tc_tick_sbt;
251int tc_precexp;
252int tc_timepercentage = TC_DEFAULTPERC;
253static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
254SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
255    CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
256    sysctl_kern_timecounter_adjprecision, "I",
257    "Allowed time interval deviation in percents");
[f1463c8]258
[952b42b6]259volatile int rtc_generation = 1;
260
[f1463c8]261static int tc_chosen;   /* Non-zero if a specific tc was chosen via sysctl. */
[31be416]262#endif /* __rtems__ */
[4117cd1]263
[b48aeaf]264static void tc_windup(struct bintime *new_boottimebin);
[31be416]265#ifndef __rtems__
[4117cd1]266static void cpu_tick_calibrate(int);
[1ef8e4a8]267#else /* __rtems__ */
[b48aeaf]268static void _Timecounter_Windup(struct bintime *new_boottimebin,
269    ISR_lock_Context *lock_context);
[31be416]270#endif /* __rtems__ */
[4117cd1]271
272void dtrace_getnanotime(struct timespec *tsp);
273
[31be416]274#ifndef __rtems__
[4117cd1]275static int
276sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
277{
[d310aa7]278        struct timeval boottime;
279
280        getboottime(&boottime);
281
[4117cd1]282#ifndef __mips__
283#ifdef SCTL_MASK32
284        int tv[2];
285
286        if (req->flags & SCTL_MASK32) {
287                tv[0] = boottime.tv_sec;
288                tv[1] = boottime.tv_usec;
[d310aa7]289                return (SYSCTL_OUT(req, tv, sizeof(tv)));
290        }
[4117cd1]291#endif
292#endif
[d310aa7]293        return (SYSCTL_OUT(req, &boottime, sizeof(boottime)));
[4117cd1]294}
295
296static int
297sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
298{
[664f844]299        uint32_t ncount;
[4117cd1]300        struct timecounter *tc = arg1;
301
302        ncount = tc->tc_get_timecount(tc);
[464fd5d]303        return (sysctl_handle_int(oidp, &ncount, 0, req));
[4117cd1]304}
305
306static int
307sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
308{
309        uint64_t freq;
310        struct timecounter *tc = arg1;
311
312        freq = tc->tc_frequency;
[464fd5d]313        return (sysctl_handle_64(oidp, &freq, 0, req));
[4117cd1]314}
[31be416]315#endif /* __rtems__ */
[4117cd1]316
317/*
318 * Return the difference between the timehands' counter value now and what
319 * was when we copied it to the timehands' offset_count.
320 */
[664f844]321static __inline uint32_t
[4117cd1]322tc_delta(struct timehands *th)
323{
324        struct timecounter *tc;
325
326        tc = th->th_counter;
327        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
328            tc->tc_counter_mask);
329}
330
331/*
332 * Functions for reading the time.  We have to loop until we are sure that
333 * the timehands that we operated on was not updated under our feet.  See
334 * the comment in <sys/time.h> for a description of these 12 functions.
335 */
336
337#ifdef FFCLOCK
338void
339fbclock_binuptime(struct bintime *bt)
340{
341        struct timehands *th;
342        unsigned int gen;
343
344        do {
345                th = timehands;
[0163063]346                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]347                *bt = th->th_offset;
348                bintime_addx(bt, th->th_scale * tc_delta(th));
[0163063]349                atomic_thread_fence_acq();
350        } while (gen == 0 || gen != th->th_generation);
[4117cd1]351}
352
353void
354fbclock_nanouptime(struct timespec *tsp)
355{
356        struct bintime bt;
357
358        fbclock_binuptime(&bt);
359        bintime2timespec(&bt, tsp);
360}
361
362void
363fbclock_microuptime(struct timeval *tvp)
364{
365        struct bintime bt;
366
367        fbclock_binuptime(&bt);
368        bintime2timeval(&bt, tvp);
369}
370
371void
372fbclock_bintime(struct bintime *bt)
373{
[b48aeaf]374        struct timehands *th;
375        unsigned int gen;
[4117cd1]376
[b48aeaf]377        do {
378                th = timehands;
379                gen = atomic_load_acq_int(&th->th_generation);
[c382cc83]380                *bt = th->th_bintime;
[b48aeaf]381                bintime_addx(bt, th->th_scale * tc_delta(th));
382                atomic_thread_fence_acq();
383        } while (gen == 0 || gen != th->th_generation);
[4117cd1]384}
385
386void
387fbclock_nanotime(struct timespec *tsp)
388{
389        struct bintime bt;
390
391        fbclock_bintime(&bt);
392        bintime2timespec(&bt, tsp);
393}
394
395void
396fbclock_microtime(struct timeval *tvp)
397{
398        struct bintime bt;
399
400        fbclock_bintime(&bt);
401        bintime2timeval(&bt, tvp);
402}
403
404void
405fbclock_getbinuptime(struct bintime *bt)
406{
407        struct timehands *th;
408        unsigned int gen;
409
410        do {
411                th = timehands;
[0163063]412                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]413                *bt = th->th_offset;
[0163063]414                atomic_thread_fence_acq();
415        } while (gen == 0 || gen != th->th_generation);
[4117cd1]416}
417
418void
419fbclock_getnanouptime(struct timespec *tsp)
420{
421        struct timehands *th;
422        unsigned int gen;
423
424        do {
425                th = timehands;
[0163063]426                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]427                bintime2timespec(&th->th_offset, tsp);
[0163063]428                atomic_thread_fence_acq();
429        } while (gen == 0 || gen != th->th_generation);
[4117cd1]430}
431
432void
433fbclock_getmicrouptime(struct timeval *tvp)
434{
435        struct timehands *th;
436        unsigned int gen;
437
438        do {
439                th = timehands;
[0163063]440                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]441                bintime2timeval(&th->th_offset, tvp);
[0163063]442                atomic_thread_fence_acq();
443        } while (gen == 0 || gen != th->th_generation);
[4117cd1]444}
445
446void
447fbclock_getbintime(struct bintime *bt)
448{
449        struct timehands *th;
450        unsigned int gen;
451
452        do {
453                th = timehands;
[0163063]454                gen = atomic_load_acq_int(&th->th_generation);
[c382cc83]455                *bt = th->th_bintime;
[0163063]456                atomic_thread_fence_acq();
457        } while (gen == 0 || gen != th->th_generation);
[4117cd1]458}
459
460void
461fbclock_getnanotime(struct timespec *tsp)
462{
463        struct timehands *th;
464        unsigned int gen;
465
466        do {
467                th = timehands;
[0163063]468                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]469                *tsp = th->th_nanotime;
[0163063]470                atomic_thread_fence_acq();
471        } while (gen == 0 || gen != th->th_generation);
[4117cd1]472}
473
474void
475fbclock_getmicrotime(struct timeval *tvp)
476{
477        struct timehands *th;
478        unsigned int gen;
479
480        do {
481                th = timehands;
[0163063]482                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]483                *tvp = th->th_microtime;
[0163063]484                atomic_thread_fence_acq();
485        } while (gen == 0 || gen != th->th_generation);
[4117cd1]486}
487#else /* !FFCLOCK */
488void
489binuptime(struct bintime *bt)
490{
491        struct timehands *th;
[664f844]492        uint32_t gen;
[4117cd1]493
494        do {
495                th = timehands;
[0163063]496                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]497                *bt = th->th_offset;
498                bintime_addx(bt, th->th_scale * tc_delta(th));
[0163063]499                atomic_thread_fence_acq();
500        } while (gen == 0 || gen != th->th_generation);
[4117cd1]501}
[65012bf]502#ifdef __rtems__
503sbintime_t
504_Timecounter_Sbinuptime(void)
505{
506        struct timehands *th;
507        uint32_t gen;
508        sbintime_t sbt;
509
510        do {
511                th = timehands;
[0163063]512                gen = atomic_load_acq_int(&th->th_generation);
[65012bf]513                sbt = bttosbt(th->th_offset);
514                sbt += (th->th_scale * tc_delta(th)) >> 32;
[0163063]515                atomic_thread_fence_acq();
516        } while (gen == 0 || gen != th->th_generation);
[65012bf]517
518        return (sbt);
519}
520#endif /* __rtems__ */
[4117cd1]521
522void
523nanouptime(struct timespec *tsp)
524{
525        struct bintime bt;
526
527        binuptime(&bt);
528        bintime2timespec(&bt, tsp);
529}
530
531void
532microuptime(struct timeval *tvp)
533{
534        struct bintime bt;
535
536        binuptime(&bt);
537        bintime2timeval(&bt, tvp);
538}
539
540void
541bintime(struct bintime *bt)
542{
[b48aeaf]543        struct timehands *th;
544        u_int gen;
[4117cd1]545
[b48aeaf]546        do {
547                th = timehands;
548                gen = atomic_load_acq_int(&th->th_generation);
[c382cc83]549                *bt = th->th_bintime;
[b48aeaf]550                bintime_addx(bt, th->th_scale * tc_delta(th));
551                atomic_thread_fence_acq();
552        } while (gen == 0 || gen != th->th_generation);
[4117cd1]553}
554
555void
556nanotime(struct timespec *tsp)
557{
558        struct bintime bt;
559
560        bintime(&bt);
561        bintime2timespec(&bt, tsp);
562}
563
564void
565microtime(struct timeval *tvp)
566{
567        struct bintime bt;
568
569        bintime(&bt);
570        bintime2timeval(&bt, tvp);
571}
572
573void
574getbinuptime(struct bintime *bt)
575{
576        struct timehands *th;
[664f844]577        uint32_t gen;
[4117cd1]578
579        do {
580                th = timehands;
[0163063]581                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]582                *bt = th->th_offset;
[0163063]583                atomic_thread_fence_acq();
584        } while (gen == 0 || gen != th->th_generation);
[4117cd1]585}
586
587void
588getnanouptime(struct timespec *tsp)
589{
590        struct timehands *th;
[664f844]591        uint32_t gen;
[4117cd1]592
593        do {
594                th = timehands;
[0163063]595                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]596                bintime2timespec(&th->th_offset, tsp);
[0163063]597                atomic_thread_fence_acq();
598        } while (gen == 0 || gen != th->th_generation);
[4117cd1]599}
600
601void
602getmicrouptime(struct timeval *tvp)
603{
604        struct timehands *th;
[664f844]605        uint32_t gen;
[4117cd1]606
607        do {
608                th = timehands;
[0163063]609                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]610                bintime2timeval(&th->th_offset, tvp);
[0163063]611                atomic_thread_fence_acq();
612        } while (gen == 0 || gen != th->th_generation);
[4117cd1]613}
614
615void
616getbintime(struct bintime *bt)
617{
618        struct timehands *th;
[664f844]619        uint32_t gen;
[4117cd1]620
621        do {
622                th = timehands;
[0163063]623                gen = atomic_load_acq_int(&th->th_generation);
[c382cc83]624                *bt = th->th_bintime;
[0163063]625                atomic_thread_fence_acq();
626        } while (gen == 0 || gen != th->th_generation);
[4117cd1]627}
628
629void
630getnanotime(struct timespec *tsp)
631{
632        struct timehands *th;
[664f844]633        uint32_t gen;
[4117cd1]634
635        do {
636                th = timehands;
[0163063]637                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]638                *tsp = th->th_nanotime;
[0163063]639                atomic_thread_fence_acq();
640        } while (gen == 0 || gen != th->th_generation);
[4117cd1]641}
642
643void
644getmicrotime(struct timeval *tvp)
645{
646        struct timehands *th;
[664f844]647        uint32_t gen;
[4117cd1]648
649        do {
650                th = timehands;
[0163063]651                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]652                *tvp = th->th_microtime;
[0163063]653                atomic_thread_fence_acq();
654        } while (gen == 0 || gen != th->th_generation);
[4117cd1]655}
656#endif /* FFCLOCK */
657
[d310aa7]658void
659getboottime(struct timeval *boottime)
660{
[b48aeaf]661        struct bintime boottimebin;
[d310aa7]662
[b48aeaf]663        getboottimebin(&boottimebin);
664        bintime2timeval(&boottimebin, boottime);
[d310aa7]665}
666
667void
668getboottimebin(struct bintime *boottimebin)
669{
[b48aeaf]670        struct timehands *th;
671        u_int gen;
[d310aa7]672
[b48aeaf]673        do {
674                th = timehands;
675                gen = atomic_load_acq_int(&th->th_generation);
676                *boottimebin = th->th_boottime;
677                atomic_thread_fence_acq();
678        } while (gen == 0 || gen != th->th_generation);
[d310aa7]679}
680
[4117cd1]681#ifdef FFCLOCK
682/*
683 * Support for feed-forward synchronization algorithms. This is heavily inspired
684 * by the timehands mechanism but kept independent from it. *_windup() functions
685 * have some connection to avoid accessing the timecounter hardware more than
686 * necessary.
687 */
688
689/* Feed-forward clock estimates kept updated by the synchronization daemon. */
690struct ffclock_estimate ffclock_estimate;
691struct bintime ffclock_boottime;        /* Feed-forward boot time estimate. */
692uint32_t ffclock_status;                /* Feed-forward clock status. */
693int8_t ffclock_updated;                 /* New estimates are available. */
694struct mtx ffclock_mtx;                 /* Mutex on ffclock_estimate. */
695
696struct fftimehands {
697        struct ffclock_estimate cest;
698        struct bintime          tick_time;
699        struct bintime          tick_time_lerp;
700        ffcounter               tick_ffcount;
701        uint64_t                period_lerp;
702        volatile uint8_t        gen;
703        struct fftimehands      *next;
704};
705
706#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
707
708static struct fftimehands ffth[10];
709static struct fftimehands *volatile fftimehands = ffth;
710
711static void
712ffclock_init(void)
713{
714        struct fftimehands *cur;
715        struct fftimehands *last;
716
717        memset(ffth, 0, sizeof(ffth));
718
719        last = ffth + NUM_ELEMENTS(ffth) - 1;
720        for (cur = ffth; cur < last; cur++)
721                cur->next = cur + 1;
722        last->next = ffth;
723
724        ffclock_updated = 0;
725        ffclock_status = FFCLOCK_STA_UNSYNC;
726        mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
727}
728
729/*
730 * Reset the feed-forward clock estimates. Called from inittodr() to get things
731 * kick started and uses the timecounter nominal frequency as a first period
732 * estimate. Note: this function may be called several time just after boot.
733 * Note: this is the only function that sets the value of boot time for the
734 * monotonic (i.e. uptime) version of the feed-forward clock.
735 */
736void
737ffclock_reset_clock(struct timespec *ts)
738{
739        struct timecounter *tc;
740        struct ffclock_estimate cest;
741
742        tc = timehands->th_counter;
743        memset(&cest, 0, sizeof(struct ffclock_estimate));
744
745        timespec2bintime(ts, &ffclock_boottime);
746        timespec2bintime(ts, &(cest.update_time));
747        ffclock_read_counter(&cest.update_ffcount);
748        cest.leapsec_next = 0;
749        cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
750        cest.errb_abs = 0;
751        cest.errb_rate = 0;
752        cest.status = FFCLOCK_STA_UNSYNC;
753        cest.leapsec_total = 0;
754        cest.leapsec = 0;
755
756        mtx_lock(&ffclock_mtx);
757        bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
758        ffclock_updated = INT8_MAX;
759        mtx_unlock(&ffclock_mtx);
760
761        printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
762            (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
763            (unsigned long)ts->tv_nsec);
764}
765
766/*
767 * Sub-routine to convert a time interval measured in RAW counter units to time
768 * in seconds stored in bintime format.
769 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
770 * larger than the max value of u_int (on 32 bit architecture). Loop to consume
771 * extra cycles.
772 */
773static void
774ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
775{
776        struct bintime bt2;
777        ffcounter delta, delta_max;
778
779        delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
780        bintime_clear(bt);
781        do {
782                if (ffdelta > delta_max)
783                        delta = delta_max;
784                else
785                        delta = ffdelta;
786                bt2.sec = 0;
787                bt2.frac = period;
788                bintime_mul(&bt2, (unsigned int)delta);
789                bintime_add(bt, &bt2);
790                ffdelta -= delta;
791        } while (ffdelta > 0);
792}
793
794/*
795 * Update the fftimehands.
796 * Push the tick ffcount and time(s) forward based on current clock estimate.
797 * The conversion from ffcounter to bintime relies on the difference clock
798 * principle, whose accuracy relies on computing small time intervals. If a new
799 * clock estimate has been passed by the synchronisation daemon, make it
800 * current, and compute the linear interpolation for monotonic time if needed.
801 */
802static void
803ffclock_windup(unsigned int delta)
804{
805        struct ffclock_estimate *cest;
806        struct fftimehands *ffth;
807        struct bintime bt, gap_lerp;
808        ffcounter ffdelta;
809        uint64_t frac;
810        unsigned int polling;
811        uint8_t forward_jump, ogen;
812
813        /*
814         * Pick the next timehand, copy current ffclock estimates and move tick
815         * times and counter forward.
816         */
817        forward_jump = 0;
818        ffth = fftimehands->next;
819        ogen = ffth->gen;
820        ffth->gen = 0;
821        cest = &ffth->cest;
822        bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
823        ffdelta = (ffcounter)delta;
824        ffth->period_lerp = fftimehands->period_lerp;
825
826        ffth->tick_time = fftimehands->tick_time;
827        ffclock_convert_delta(ffdelta, cest->period, &bt);
828        bintime_add(&ffth->tick_time, &bt);
829
830        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
831        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
832        bintime_add(&ffth->tick_time_lerp, &bt);
833
834        ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
835
836        /*
837         * Assess the status of the clock, if the last update is too old, it is
838         * likely the synchronisation daemon is dead and the clock is free
839         * running.
840         */
841        if (ffclock_updated == 0) {
842                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
843                ffclock_convert_delta(ffdelta, cest->period, &bt);
844                if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
845                        ffclock_status |= FFCLOCK_STA_UNSYNC;
846        }
847
848        /*
849         * If available, grab updated clock estimates and make them current.
850         * Recompute time at this tick using the updated estimates. The clock
851         * estimates passed the feed-forward synchronisation daemon may result
852         * in time conversion that is not monotonically increasing (just after
853         * the update). time_lerp is a particular linear interpolation over the
854         * synchronisation algo polling period that ensures monotonicity for the
855         * clock ids requesting it.
856         */
857        if (ffclock_updated > 0) {
858                bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
859                ffdelta = ffth->tick_ffcount - cest->update_ffcount;
860                ffth->tick_time = cest->update_time;
861                ffclock_convert_delta(ffdelta, cest->period, &bt);
862                bintime_add(&ffth->tick_time, &bt);
863
864                /* ffclock_reset sets ffclock_updated to INT8_MAX */
865                if (ffclock_updated == INT8_MAX)
866                        ffth->tick_time_lerp = ffth->tick_time;
867
868                if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
869                        forward_jump = 1;
870                else
871                        forward_jump = 0;
872
873                bintime_clear(&gap_lerp);
874                if (forward_jump) {
875                        gap_lerp = ffth->tick_time;
876                        bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
877                } else {
878                        gap_lerp = ffth->tick_time_lerp;
879                        bintime_sub(&gap_lerp, &ffth->tick_time);
880                }
881
882                /*
883                 * The reset from the RTC clock may be far from accurate, and
884                 * reducing the gap between real time and interpolated time
885                 * could take a very long time if the interpolated clock insists
886                 * on strict monotonicity. The clock is reset under very strict
887                 * conditions (kernel time is known to be wrong and
888                 * synchronization daemon has been restarted recently.
889                 * ffclock_boottime absorbs the jump to ensure boot time is
890                 * correct and uptime functions stay consistent.
891                 */
892                if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
893                    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
894                    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
895                        if (forward_jump)
896                                bintime_add(&ffclock_boottime, &gap_lerp);
897                        else
898                                bintime_sub(&ffclock_boottime, &gap_lerp);
899                        ffth->tick_time_lerp = ffth->tick_time;
900                        bintime_clear(&gap_lerp);
901                }
902
903                ffclock_status = cest->status;
904                ffth->period_lerp = cest->period;
905
906                /*
907                 * Compute corrected period used for the linear interpolation of
908                 * time. The rate of linear interpolation is capped to 5000PPM
909                 * (5ms/s).
910                 */
911                if (bintime_isset(&gap_lerp)) {
912                        ffdelta = cest->update_ffcount;
913                        ffdelta -= fftimehands->cest.update_ffcount;
914                        ffclock_convert_delta(ffdelta, cest->period, &bt);
915                        polling = bt.sec;
916                        bt.sec = 0;
917                        bt.frac = 5000000 * (uint64_t)18446744073LL;
918                        bintime_mul(&bt, polling);
919                        if (bintime_cmp(&gap_lerp, &bt, >))
920                                gap_lerp = bt;
921
922                        /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
923                        frac = 0;
924                        if (gap_lerp.sec > 0) {
925                                frac -= 1;
926                                frac /= ffdelta / gap_lerp.sec;
927                        }
928                        frac += gap_lerp.frac / ffdelta;
929
930                        if (forward_jump)
931                                ffth->period_lerp += frac;
932                        else
933                                ffth->period_lerp -= frac;
934                }
935
936                ffclock_updated = 0;
937        }
938        if (++ogen == 0)
939                ogen = 1;
940        ffth->gen = ogen;
941        fftimehands = ffth;
942}
943
944/*
945 * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
946 * the old and new hardware counter cannot be read simultaneously. tc_windup()
947 * does read the two counters 'back to back', but a few cycles are effectively
948 * lost, and not accumulated in tick_ffcount. This is a fairly radical
949 * operation for a feed-forward synchronization daemon, and it is its job to not
950 * pushing irrelevant data to the kernel. Because there is no locking here,
951 * simply force to ignore pending or next update to give daemon a chance to
952 * realize the counter has changed.
953 */
954static void
955ffclock_change_tc(struct timehands *th)
956{
957        struct fftimehands *ffth;
958        struct ffclock_estimate *cest;
959        struct timecounter *tc;
960        uint8_t ogen;
961
962        tc = th->th_counter;
963        ffth = fftimehands->next;
964        ogen = ffth->gen;
965        ffth->gen = 0;
966
967        cest = &ffth->cest;
968        bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
969        cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
970        cest->errb_abs = 0;
971        cest->errb_rate = 0;
972        cest->status |= FFCLOCK_STA_UNSYNC;
973
974        ffth->tick_ffcount = fftimehands->tick_ffcount;
975        ffth->tick_time_lerp = fftimehands->tick_time_lerp;
976        ffth->tick_time = fftimehands->tick_time;
977        ffth->period_lerp = cest->period;
978
979        /* Do not lock but ignore next update from synchronization daemon. */
980        ffclock_updated--;
981
982        if (++ogen == 0)
983                ogen = 1;
984        ffth->gen = ogen;
985        fftimehands = ffth;
986}
987
988/*
989 * Retrieve feed-forward counter and time of last kernel tick.
990 */
991void
992ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
993{
994        struct fftimehands *ffth;
995        uint8_t gen;
996
997        /*
998         * No locking but check generation has not changed. Also need to make
999         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
1000         */
1001        do {
1002                ffth = fftimehands;
1003                gen = ffth->gen;
1004                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
1005                        *bt = ffth->tick_time_lerp;
1006                else
1007                        *bt = ffth->tick_time;
1008                *ffcount = ffth->tick_ffcount;
1009        } while (gen == 0 || gen != ffth->gen);
1010}
1011
1012/*
1013 * Absolute clock conversion. Low level function to convert ffcounter to
1014 * bintime. The ffcounter is converted using the current ffclock period estimate
1015 * or the "interpolated period" to ensure monotonicity.
1016 * NOTE: this conversion may have been deferred, and the clock updated since the
1017 * hardware counter has been read.
1018 */
1019void
1020ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
1021{
1022        struct fftimehands *ffth;
1023        struct bintime bt2;
1024        ffcounter ffdelta;
1025        uint8_t gen;
1026
1027        /*
1028         * No locking but check generation has not changed. Also need to make
1029         * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
1030         */
1031        do {
1032                ffth = fftimehands;
1033                gen = ffth->gen;
1034                if (ffcount > ffth->tick_ffcount)
1035                        ffdelta = ffcount - ffth->tick_ffcount;
1036                else
1037                        ffdelta = ffth->tick_ffcount - ffcount;
1038
1039                if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
1040                        *bt = ffth->tick_time_lerp;
1041                        ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
1042                } else {
1043                        *bt = ffth->tick_time;
1044                        ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
1045                }
1046
1047                if (ffcount > ffth->tick_ffcount)
1048                        bintime_add(bt, &bt2);
1049                else
1050                        bintime_sub(bt, &bt2);
1051        } while (gen == 0 || gen != ffth->gen);
1052}
1053
1054/*
1055 * Difference clock conversion.
1056 * Low level function to Convert a time interval measured in RAW counter units
1057 * into bintime. The difference clock allows measuring small intervals much more
1058 * reliably than the absolute clock.
1059 */
1060void
1061ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
1062{
1063        struct fftimehands *ffth;
1064        uint8_t gen;
1065
1066        /* No locking but check generation has not changed. */
1067        do {
1068                ffth = fftimehands;
1069                gen = ffth->gen;
1070                ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
1071        } while (gen == 0 || gen != ffth->gen);
1072}
1073
1074/*
1075 * Access to current ffcounter value.
1076 */
1077void
1078ffclock_read_counter(ffcounter *ffcount)
1079{
1080        struct timehands *th;
1081        struct fftimehands *ffth;
1082        unsigned int gen, delta;
1083
1084        /*
1085         * ffclock_windup() called from tc_windup(), safe to rely on
1086         * th->th_generation only, for correct delta and ffcounter.
1087         */
1088        do {
1089                th = timehands;
[0163063]1090                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]1091                ffth = fftimehands;
1092                delta = tc_delta(th);
1093                *ffcount = ffth->tick_ffcount;
[0163063]1094                atomic_thread_fence_acq();
1095        } while (gen == 0 || gen != th->th_generation);
[4117cd1]1096
1097        *ffcount += delta;
1098}
1099
1100void
1101binuptime(struct bintime *bt)
1102{
1103
1104        binuptime_fromclock(bt, sysclock_active);
1105}
1106
1107void
1108nanouptime(struct timespec *tsp)
1109{
1110
1111        nanouptime_fromclock(tsp, sysclock_active);
1112}
1113
1114void
1115microuptime(struct timeval *tvp)
1116{
1117
1118        microuptime_fromclock(tvp, sysclock_active);
1119}
1120
1121void
1122bintime(struct bintime *bt)
1123{
1124
1125        bintime_fromclock(bt, sysclock_active);
1126}
1127
1128void
1129nanotime(struct timespec *tsp)
1130{
1131
1132        nanotime_fromclock(tsp, sysclock_active);
1133}
1134
1135void
1136microtime(struct timeval *tvp)
1137{
1138
1139        microtime_fromclock(tvp, sysclock_active);
1140}
1141
1142void
1143getbinuptime(struct bintime *bt)
1144{
1145
1146        getbinuptime_fromclock(bt, sysclock_active);
1147}
1148
1149void
1150getnanouptime(struct timespec *tsp)
1151{
1152
1153        getnanouptime_fromclock(tsp, sysclock_active);
1154}
1155
1156void
1157getmicrouptime(struct timeval *tvp)
1158{
1159
1160        getmicrouptime_fromclock(tvp, sysclock_active);
1161}
1162
1163void
1164getbintime(struct bintime *bt)
1165{
1166
1167        getbintime_fromclock(bt, sysclock_active);
1168}
1169
1170void
1171getnanotime(struct timespec *tsp)
1172{
1173
1174        getnanotime_fromclock(tsp, sysclock_active);
1175}
1176
1177void
1178getmicrotime(struct timeval *tvp)
1179{
1180
1181        getmicrouptime_fromclock(tvp, sysclock_active);
1182}
1183
1184#endif /* FFCLOCK */
1185
[31be416]1186#ifndef __rtems__
[4117cd1]1187/*
1188 * This is a clone of getnanotime and used for walltimestamps.
1189 * The dtrace_ prefix prevents fbt from creating probes for
1190 * it so walltimestamp can be safely used in all fbt probes.
1191 */
1192void
1193dtrace_getnanotime(struct timespec *tsp)
1194{
1195        struct timehands *th;
[664f844]1196        uint32_t gen;
[4117cd1]1197
1198        do {
1199                th = timehands;
[0163063]1200                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]1201                *tsp = th->th_nanotime;
[0163063]1202                atomic_thread_fence_acq();
1203        } while (gen == 0 || gen != th->th_generation);
[4117cd1]1204}
[31be416]1205#endif /* __rtems__ */
[4117cd1]1206
[cc693845]1207#ifdef FFCLOCK
[4117cd1]1208/*
1209 * System clock currently providing time to the system. Modifiable via sysctl
1210 * when the FFCLOCK option is defined.
1211 */
1212int sysclock_active = SYSCLOCK_FBCK;
[cc693845]1213#endif
[4117cd1]1214
1215/* Internal NTP status and error estimates. */
1216extern int time_status;
1217extern long time_esterror;
1218
[31be416]1219#ifndef __rtems__
[4117cd1]1220/*
1221 * Take a snapshot of sysclock data which can be used to compare system clocks
1222 * and generate timestamps after the fact.
1223 */
1224void
1225sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1226{
1227        struct fbclock_info *fbi;
1228        struct timehands *th;
1229        struct bintime bt;
1230        unsigned int delta, gen;
1231#ifdef FFCLOCK
1232        ffcounter ffcount;
1233        struct fftimehands *ffth;
1234        struct ffclock_info *ffi;
1235        struct ffclock_estimate cest;
1236
1237        ffi = &clock_snap->ff_info;
1238#endif
1239
1240        fbi = &clock_snap->fb_info;
1241        delta = 0;
1242
1243        do {
1244                th = timehands;
[0163063]1245                gen = atomic_load_acq_int(&th->th_generation);
[4117cd1]1246                fbi->th_scale = th->th_scale;
1247                fbi->tick_time = th->th_offset;
1248#ifdef FFCLOCK
1249                ffth = fftimehands;
1250                ffi->tick_time = ffth->tick_time_lerp;
1251                ffi->tick_time_lerp = ffth->tick_time_lerp;
1252                ffi->period = ffth->cest.period;
1253                ffi->period_lerp = ffth->period_lerp;
1254                clock_snap->ffcount = ffth->tick_ffcount;
1255                cest = ffth->cest;
1256#endif
1257                if (!fast)
1258                        delta = tc_delta(th);
[0163063]1259                atomic_thread_fence_acq();
1260        } while (gen == 0 || gen != th->th_generation);
[4117cd1]1261
1262        clock_snap->delta = delta;
[cc693845]1263#ifdef FFCLOCK
[4117cd1]1264        clock_snap->sysclock_active = sysclock_active;
[cc693845]1265#endif
[4117cd1]1266
1267        /* Record feedback clock status and error. */
1268        clock_snap->fb_info.status = time_status;
1269        /* XXX: Very crude estimate of feedback clock error. */
1270        bt.sec = time_esterror / 1000000;
1271        bt.frac = ((time_esterror - bt.sec) * 1000000) *
1272            (uint64_t)18446744073709ULL;
1273        clock_snap->fb_info.error = bt;
1274
1275#ifdef FFCLOCK
1276        if (!fast)
1277                clock_snap->ffcount += delta;
1278
1279        /* Record feed-forward clock leap second adjustment. */
1280        ffi->leapsec_adjustment = cest.leapsec_total;
1281        if (clock_snap->ffcount > cest.leapsec_next)
1282                ffi->leapsec_adjustment -= cest.leapsec;
1283
1284        /* Record feed-forward clock status and error. */
1285        clock_snap->ff_info.status = cest.status;
1286        ffcount = clock_snap->ffcount - cest.update_ffcount;
1287        ffclock_convert_delta(ffcount, cest.period, &bt);
1288        /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1289        bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1290        /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1291        bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1292        clock_snap->ff_info.error = bt;
1293#endif
1294}
1295
1296/*
1297 * Convert a sysclock snapshot into a struct bintime based on the specified
1298 * clock source and flags.
1299 */
1300int
1301sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1302    int whichclock, uint32_t flags)
1303{
[d310aa7]1304        struct bintime boottimebin;
[4117cd1]1305#ifdef FFCLOCK
1306        struct bintime bt2;
1307        uint64_t period;
1308#endif
1309
1310        switch (whichclock) {
1311        case SYSCLOCK_FBCK:
1312                *bt = cs->fb_info.tick_time;
1313
1314                /* If snapshot was created with !fast, delta will be >0. */
1315                if (cs->delta > 0)
1316                        bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1317
[d310aa7]1318                if ((flags & FBCLOCK_UPTIME) == 0) {
1319                        getboottimebin(&boottimebin);
[4117cd1]1320                        bintime_add(bt, &boottimebin);
[d310aa7]1321                }
[4117cd1]1322                break;
1323#ifdef FFCLOCK
1324        case SYSCLOCK_FFWD:
1325                if (flags & FFCLOCK_LERP) {
1326                        *bt = cs->ff_info.tick_time_lerp;
1327                        period = cs->ff_info.period_lerp;
1328                } else {
1329                        *bt = cs->ff_info.tick_time;
1330                        period = cs->ff_info.period;
1331                }
1332
1333                /* If snapshot was created with !fast, delta will be >0. */
1334                if (cs->delta > 0) {
1335                        ffclock_convert_delta(cs->delta, period, &bt2);
1336                        bintime_add(bt, &bt2);
1337                }
1338
1339                /* Leap second adjustment. */
1340                if (flags & FFCLOCK_LEAPSEC)
1341                        bt->sec -= cs->ff_info.leapsec_adjustment;
1342
1343                /* Boot time adjustment, for uptime/monotonic clocks. */
1344                if (flags & FFCLOCK_UPTIME)
1345                        bintime_sub(bt, &ffclock_boottime);
1346                break;
1347#endif
1348        default:
1349                return (EINVAL);
1350                break;
1351        }
1352
1353        return (0);
1354}
[31be416]1355#endif /* __rtems__ */
[4117cd1]1356
1357/*
1358 * Initialize a new timecounter and possibly use it.
1359 */
1360void
1361tc_init(struct timecounter *tc)
1362{
[31be416]1363#ifndef __rtems__
[664f844]1364        uint32_t u;
[4117cd1]1365        struct sysctl_oid *tc_root;
1366
1367        u = tc->tc_frequency / tc->tc_counter_mask;
1368        /* XXX: We need some margin here, 10% is a guess */
1369        u *= 11;
1370        u /= 10;
1371        if (u > hz && tc->tc_quality >= 0) {
1372                tc->tc_quality = -2000;
1373                if (bootverbose) {
1374                        printf("Timecounter \"%s\" frequency %ju Hz",
1375                            tc->tc_name, (uintmax_t)tc->tc_frequency);
1376                        printf(" -- Insufficient hz, needs at least %u\n", u);
1377                }
1378        } else if (tc->tc_quality >= 0 || bootverbose) {
1379                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1380                    tc->tc_name, (uintmax_t)tc->tc_frequency,
1381                    tc->tc_quality);
1382        }
1383
1384        tc->tc_next = timecounters;
1385        timecounters = tc;
1386        /*
1387         * Set up sysctl tree for this counter.
1388         */
[a9219e7]1389        tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
[4117cd1]1390            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
[a9219e7]1391            CTLFLAG_RW, 0, "timecounter description", "timecounter");
[4117cd1]1392        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1393            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1394            "mask for implemented bits");
1395        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1396            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1397            sysctl_kern_timecounter_get, "IU", "current timecounter value");
1398        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1399            "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1400             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1401        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1402            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1403            "goodness of time counter");
1404        /*
[f1463c8]1405         * Do not automatically switch if the current tc was specifically
1406         * chosen.  Never automatically use a timecounter with negative quality.
[4117cd1]1407         * Even though we run on the dummy counter, switching here may be
[f1463c8]1408         * worse since this timecounter may not be monotonic.
[4117cd1]1409         */
[f1463c8]1410        if (tc_chosen)
1411                return;
[4117cd1]1412        if (tc->tc_quality < 0)
1413                return;
[8d989c5]1414#endif /* __rtems__ */
[4117cd1]1415        if (tc->tc_quality < timecounter->tc_quality)
1416                return;
1417        if (tc->tc_quality == timecounter->tc_quality &&
1418            tc->tc_frequency < timecounter->tc_frequency)
1419                return;
[8d989c5]1420#ifndef __rtems__
[4117cd1]1421        (void)tc->tc_get_timecount(tc);
1422        (void)tc->tc_get_timecount(tc);
[47e5c1d]1423#endif /* __rtems__ */
[4117cd1]1424        timecounter = tc;
[31be416]1425#ifdef __rtems__
[b48aeaf]1426        tc_windup(NULL);
[31be416]1427#endif /* __rtems__ */
[4117cd1]1428}
1429
[31be416]1430#ifndef __rtems__
[4117cd1]1431/* Report the frequency of the current timecounter. */
1432uint64_t
1433tc_getfrequency(void)
1434{
1435
1436        return (timehands->th_counter->tc_frequency);
1437}
[b48aeaf]1438
[952b42b6]1439static bool
1440sleeping_on_old_rtc(struct thread *td)
1441{
1442
[5167d0e]1443        /*
1444         * td_rtcgen is modified by curthread when it is running,
1445         * and by other threads in this function.  By finding the thread
1446         * on a sleepqueue and holding the lock on the sleepqueue
1447         * chain, we guarantee that the thread is not running and that
1448         * modifying td_rtcgen is safe.  Setting td_rtcgen to zero informs
1449         * the thread that it was woken due to a real-time clock adjustment.
1450         * (The declaration of td_rtcgen refers to this comment.)
1451         */
[952b42b6]1452        if (td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation) {
1453                td->td_rtcgen = 0;
1454                return (true);
1455        }
1456        return (false);
1457}
1458
[b48aeaf]1459static struct mtx tc_setclock_mtx;
1460MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN);
[31be416]1461#endif /* __rtems__ */
[4117cd1]1462
1463/*
1464 * Step our concept of UTC.  This is done by modifying our estimate of
1465 * when we booted.
1466 */
1467void
[1ef8e4a8]1468#ifndef __rtems__
[4117cd1]1469tc_setclock(struct timespec *ts)
[31be416]1470#else /* __rtems__ */
[1ef8e4a8]1471_Timecounter_Set_clock(const struct bintime *_bt,
1472    ISR_lock_Context *lock_context)
[31be416]1473#endif /* __rtems__ */
[4117cd1]1474{
[31be416]1475#ifndef __rtems__
[4117cd1]1476        struct timespec tbef, taft;
[31be416]1477#endif /* __rtems__ */
[4117cd1]1478        struct bintime bt, bt2;
1479
[31be416]1480#ifndef __rtems__
[4117cd1]1481        timespec2bintime(ts, &bt);
[b48aeaf]1482        nanotime(&tbef);
1483        mtx_lock_spin(&tc_setclock_mtx);
1484        cpu_tick_calibrate(1);
[1ef8e4a8]1485#else /* __rtems__ */
1486        bt = *_bt;
1487#endif /* __rtems__ */
[4117cd1]1488        binuptime(&bt2);
1489        bintime_sub(&bt, &bt2);
1490
1491        /* XXX fiddle all the little crinkly bits around the fiords... */
[b48aeaf]1492#ifndef __rtems__
1493        tc_windup(&bt);
1494        mtx_unlock_spin(&tc_setclock_mtx);
[5167d0e]1495
[952b42b6]1496        /* Avoid rtc_generation == 0, since td_rtcgen == 0 is special. */
1497        atomic_add_rel_int(&rtc_generation, 2);
1498        sleepq_chains_remove_matching(sleeping_on_old_rtc);
[4117cd1]1499        if (timestepwarnings) {
[b48aeaf]1500                nanotime(&taft);
[4117cd1]1501                log(LOG_INFO,
1502                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1503                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1504                    (intmax_t)taft.tv_sec, taft.tv_nsec,
1505                    (intmax_t)ts->tv_sec, ts->tv_nsec);
1506        }
[1ef8e4a8]1507#else /* __rtems__ */
[b48aeaf]1508        _Timecounter_Windup(&bt, lock_context);
[31be416]1509#endif /* __rtems__ */
[4117cd1]1510}
1511
1512/*
1513 * Initialize the next struct timehands in the ring and make
1514 * it the active timehands.  Along the way we might switch to a different
1515 * timecounter and/or do seconds processing in NTP.  Slightly magic.
1516 */
1517static void
[b48aeaf]1518tc_windup(struct bintime *new_boottimebin)
[1ef8e4a8]1519#ifdef __rtems__
1520{
1521        ISR_lock_Context lock_context;
1522
1523        _Timecounter_Acquire(&lock_context);
[b48aeaf]1524        _Timecounter_Windup(new_boottimebin, &lock_context);
[1ef8e4a8]1525}
1526
1527static void
[b48aeaf]1528_Timecounter_Windup(struct bintime *new_boottimebin,
1529    ISR_lock_Context *lock_context)
[1ef8e4a8]1530#endif /* __rtems__ */
[4117cd1]1531{
1532        struct bintime bt;
1533        struct timehands *th, *tho;
1534        uint64_t scale;
[664f844]1535        uint32_t delta, ncount, ogen;
[4117cd1]1536        int i;
1537        time_t t;
1538
1539        /*
[0163063]1540         * Make the next timehands a copy of the current one, but do
1541         * not overwrite the generation or next pointer.  While we
1542         * update the contents, the generation must be zero.  We need
1543         * to ensure that the zero generation is visible before the
1544         * data updates become visible, which requires release fence.
1545         * For similar reasons, re-reading of the generation after the
1546         * data is read should use acquire fence.
[4117cd1]1547         */
1548        tho = timehands;
[4bf79af]1549#if defined(RTEMS_SMP)
[4117cd1]1550        th = tho->th_next;
[4bf79af]1551#else
1552        th = tho;
1553#endif
[4117cd1]1554        ogen = th->th_generation;
[0163063]1555        th->th_generation = 0;
1556        atomic_thread_fence_rel();
[4bf79af]1557#if defined(RTEMS_SMP)
[4117cd1]1558        bcopy(tho, th, offsetof(struct timehands, th_generation));
[4bf79af]1559#endif
[b48aeaf]1560        if (new_boottimebin != NULL)
1561                th->th_boottime = *new_boottimebin;
[4117cd1]1562
1563        /*
1564         * Capture a timecounter delta on the current timecounter and if
1565         * changing timecounters, a counter value from the new timecounter.
1566         * Update the offset fields accordingly.
1567         */
1568        delta = tc_delta(th);
1569        if (th->th_counter != timecounter)
1570                ncount = timecounter->tc_get_timecount(timecounter);
1571        else
1572                ncount = 0;
1573#ifdef FFCLOCK
1574        ffclock_windup(delta);
1575#endif
1576        th->th_offset_count += delta;
1577        th->th_offset_count &= th->th_counter->tc_counter_mask;
1578        while (delta > th->th_counter->tc_frequency) {
1579                /* Eat complete unadjusted seconds. */
1580                delta -= th->th_counter->tc_frequency;
1581                th->th_offset.sec++;
1582        }
1583        if ((delta > th->th_counter->tc_frequency / 2) &&
1584            (th->th_scale * delta < ((uint64_t)1 << 63))) {
1585                /* The product th_scale * delta just barely overflows. */
1586                th->th_offset.sec++;
1587        }
1588        bintime_addx(&th->th_offset, th->th_scale * delta);
1589
[2763f53]1590#ifndef __rtems__
[4117cd1]1591        /*
1592         * Hardware latching timecounters may not generate interrupts on
1593         * PPS events, so instead we poll them.  There is a finite risk that
1594         * the hardware might capture a count which is later than the one we
1595         * got above, and therefore possibly in the next NTP second which might
1596         * have a different rate than the current NTP second.  It doesn't
1597         * matter in practice.
1598         */
1599        if (tho->th_counter->tc_poll_pps)
1600                tho->th_counter->tc_poll_pps(tho->th_counter);
[2763f53]1601#endif /* __rtems__ */
[4117cd1]1602
1603        /*
1604         * Deal with NTP second processing.  The for loop normally
1605         * iterates at most once, but in extreme situations it might
1606         * keep NTP sane if timeouts are not run for several seconds.
1607         * At boot, the time step can be large when the TOD hardware
1608         * has been read, so on really large steps, we call
1609         * ntp_update_second only twice.  We need to call it twice in
1610         * case we missed a leap second.
1611         */
1612        bt = th->th_offset;
[b48aeaf]1613        bintime_add(&bt, &th->th_boottime);
[4117cd1]1614        i = bt.sec - tho->th_microtime.tv_sec;
1615        if (i > LARGE_STEP)
1616                i = 2;
1617        for (; i > 0; i--) {
1618                t = bt.sec;
1619                ntp_update_second(&th->th_adjustment, &bt.sec);
1620                if (bt.sec != t)
[b48aeaf]1621                        th->th_boottime.sec += bt.sec - t;
[4117cd1]1622        }
1623        /* Update the UTC timestamps used by the get*() functions. */
[bcbbe76]1624        th->th_bintime = bt;
[4117cd1]1625        bintime2timeval(&bt, &th->th_microtime);
1626        bintime2timespec(&bt, &th->th_nanotime);
1627
1628        /* Now is a good time to change timecounters. */
1629        if (th->th_counter != timecounter) {
[31be416]1630#ifndef __rtems__
[4117cd1]1631#ifndef __arm__
1632                if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1633                        cpu_disable_c2_sleep++;
1634                if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1635                        cpu_disable_c2_sleep--;
1636#endif
[31be416]1637#endif /* __rtems__ */
[4117cd1]1638                th->th_counter = timecounter;
1639                th->th_offset_count = ncount;
[31be416]1640#ifndef __rtems__
[4117cd1]1641                tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1642                    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
[31be416]1643#endif /* __rtems__ */
[4117cd1]1644#ifdef FFCLOCK
1645                ffclock_change_tc(th);
1646#endif
1647        }
1648
1649        /*-
1650         * Recalculate the scaling factor.  We want the number of 1/2^64
1651         * fractions of a second per period of the hardware counter, taking
1652         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1653         * processing provides us with.
1654         *
1655         * The th_adjustment is nanoseconds per second with 32 bit binary
1656         * fraction and we want 64 bit binary fraction of second:
1657         *
1658         *       x = a * 2^32 / 10^9 = a * 4.294967296
1659         *
1660         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1661         * we can only multiply by about 850 without overflowing, that
1662         * leaves no suitably precise fractions for multiply before divide.
1663         *
1664         * Divide before multiply with a fraction of 2199/512 results in a
1665         * systematic undercompensation of 10PPM of th_adjustment.  On a
1666         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1667         *
1668         * We happily sacrifice the lowest of the 64 bits of our result
1669         * to the goddess of code clarity.
1670         *
1671         */
1672        scale = (uint64_t)1 << 63;
1673        scale += (th->th_adjustment / 1024) * 2199;
1674        scale /= th->th_counter->tc_frequency;
1675        th->th_scale = scale * 2;
1676
1677        /*
1678         * Now that the struct timehands is again consistent, set the new
1679         * generation number, making sure to not make it zero.
1680         */
1681        if (++ogen == 0)
1682                ogen = 1;
[0163063]1683        atomic_store_rel_int(&th->th_generation, ogen);
[4117cd1]1684
1685        /* Go live with the new struct timehands. */
1686#ifdef FFCLOCK
1687        switch (sysclock_active) {
1688        case SYSCLOCK_FBCK:
1689#endif
1690                time_second = th->th_microtime.tv_sec;
1691                time_uptime = th->th_offset.sec;
1692#ifdef FFCLOCK
1693                break;
1694        case SYSCLOCK_FFWD:
1695                time_second = fftimehands->tick_time_lerp.sec;
1696                time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1697                break;
1698        }
1699#endif
1700
[4bf79af]1701#if defined(RTEMS_SMP)
[4117cd1]1702        timehands = th;
[4bf79af]1703#endif
[31be416]1704#ifndef __rtems__
[4117cd1]1705        timekeep_push_vdso();
[31be416]1706#endif /* __rtems__ */
1707#ifdef __rtems__
[1ef8e4a8]1708        _Timecounter_Release(lock_context);
[31be416]1709#endif /* __rtems__ */
[4117cd1]1710}
1711
[31be416]1712#ifndef __rtems__
[4117cd1]1713/* Report or change the active timecounter hardware. */
1714static int
1715sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1716{
1717        char newname[32];
1718        struct timecounter *newtc, *tc;
1719        int error;
1720
1721        tc = timecounter;
1722        strlcpy(newname, tc->tc_name, sizeof(newname));
1723
1724        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
[f1463c8]1725        if (error != 0 || req->newptr == NULL)
[4117cd1]1726                return (error);
[f1463c8]1727        /* Record that the tc in use now was specifically chosen. */
1728        tc_chosen = 1;
1729        if (strcmp(newname, tc->tc_name) == 0)
1730                return (0);
[4117cd1]1731        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1732                if (strcmp(newname, newtc->tc_name) != 0)
1733                        continue;
1734
1735                /* Warm up new timecounter. */
1736                (void)newtc->tc_get_timecount(newtc);
1737                (void)newtc->tc_get_timecount(newtc);
1738
1739                timecounter = newtc;
1740
1741                /*
1742                 * The vdso timehands update is deferred until the next
1743                 * 'tc_windup()'.
1744                 *
1745                 * This is prudent given that 'timekeep_push_vdso()' does not
1746                 * use any locking and that it can be called in hard interrupt
1747                 * context via 'tc_windup()'.
1748                 */
1749                return (0);
1750        }
1751        return (EINVAL);
1752}
1753
1754SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1755    0, 0, sysctl_kern_timecounter_hardware, "A",
1756    "Timecounter hardware selected");
1757
1758
[f1463c8]1759/* Report the available timecounter hardware. */
[4117cd1]1760static int
1761sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1762{
[cb6fde65]1763        struct sbuf sb;
[4117cd1]1764        struct timecounter *tc;
1765        int error;
1766
[cb6fde65]1767        sbuf_new_for_sysctl(&sb, NULL, 0, req);
1768        for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1769                if (tc != timecounters)
1770                        sbuf_putc(&sb, ' ');
1771                sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
[4117cd1]1772        }
[cb6fde65]1773        error = sbuf_finish(&sb);
1774        sbuf_delete(&sb);
[4117cd1]1775        return (error);
1776}
1777
1778SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1779    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
[31be416]1780#endif /* __rtems__ */
[4117cd1]1781
[31be416]1782#ifndef __rtems__
[4117cd1]1783/*
1784 * RFC 2783 PPS-API implementation.
1785 */
1786
[7e1a9ac]1787/*
1788 *  Return true if the driver is aware of the abi version extensions in the
1789 *  pps_state structure, and it supports at least the given abi version number.
1790 */
1791static inline int
1792abi_aware(struct pps_state *pps, int vers)
1793{
1794
1795        return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1796}
1797
[4117cd1]1798static int
1799pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1800{
1801        int err, timo;
1802        pps_seq_t aseq, cseq;
1803        struct timeval tv;
1804
1805        if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1806                return (EINVAL);
1807
1808        /*
1809         * If no timeout is requested, immediately return whatever values were
1810         * most recently captured.  If timeout seconds is -1, that's a request
1811         * to block without a timeout.  WITNESS won't let us sleep forever
1812         * without a lock (we really don't need a lock), so just repeatedly
1813         * sleep a long time.
1814         */
1815        if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1816                if (fapi->timeout.tv_sec == -1)
1817                        timo = 0x7fffffff;
1818                else {
1819                        tv.tv_sec = fapi->timeout.tv_sec;
1820                        tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1821                        timo = tvtohz(&tv);
1822                }
1823                aseq = pps->ppsinfo.assert_sequence;
1824                cseq = pps->ppsinfo.clear_sequence;
1825                while (aseq == pps->ppsinfo.assert_sequence &&
1826                    cseq == pps->ppsinfo.clear_sequence) {
[51304dde]1827                        if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1828                                if (pps->flags & PPSFLAG_MTX_SPIN) {
1829                                        err = msleep_spin(pps, pps->driver_mtx,
1830                                            "ppsfch", timo);
1831                                } else {
1832                                        err = msleep(pps, pps->driver_mtx, PCATCH,
1833                                            "ppsfch", timo);
1834                                }
1835                        } else {
[b5b8116]1836                                err = tsleep(pps, PCATCH, "ppsfch", timo);
[51304dde]1837                        }
[ec349b58]1838                        if (err == EWOULDBLOCK) {
1839                                if (fapi->timeout.tv_sec == -1) {
1840                                        continue;
1841                                } else {
1842                                        return (ETIMEDOUT);
1843                                }
[4117cd1]1844                        } else if (err != 0) {
1845                                return (err);
1846                        }
1847                }
1848        }
1849
1850        pps->ppsinfo.current_mode = pps->ppsparam.mode;
1851        fapi->pps_info_buf = pps->ppsinfo;
1852
1853        return (0);
1854}
1855
1856int
1857pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1858{
1859        pps_params_t *app;
1860        struct pps_fetch_args *fapi;
1861#ifdef FFCLOCK
1862        struct pps_fetch_ffc_args *fapi_ffc;
1863#endif
1864#ifdef PPS_SYNC
1865        struct pps_kcbind_args *kapi;
1866#endif
1867
1868        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1869        switch (cmd) {
1870        case PPS_IOC_CREATE:
1871                return (0);
1872        case PPS_IOC_DESTROY:
1873                return (0);
1874        case PPS_IOC_SETPARAMS:
1875                app = (pps_params_t *)data;
1876                if (app->mode & ~pps->ppscap)
1877                        return (EINVAL);
1878#ifdef FFCLOCK
1879                /* Ensure only a single clock is selected for ffc timestamp. */
1880                if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1881                        return (EINVAL);
1882#endif
1883                pps->ppsparam = *app;
1884                return (0);
1885        case PPS_IOC_GETPARAMS:
1886                app = (pps_params_t *)data;
1887                *app = pps->ppsparam;
1888                app->api_version = PPS_API_VERS_1;
1889                return (0);
1890        case PPS_IOC_GETCAP:
1891                *(int*)data = pps->ppscap;
1892                return (0);
1893        case PPS_IOC_FETCH:
1894                fapi = (struct pps_fetch_args *)data;
1895                return (pps_fetch(fapi, pps));
1896#ifdef FFCLOCK
1897        case PPS_IOC_FETCH_FFCOUNTER:
1898                fapi_ffc = (struct pps_fetch_ffc_args *)data;
1899                if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1900                    PPS_TSFMT_TSPEC)
1901                        return (EINVAL);
1902                if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1903                        return (EOPNOTSUPP);
1904                pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1905                fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1906                /* Overwrite timestamps if feedback clock selected. */
1907                switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1908                case PPS_TSCLK_FBCK:
1909                        fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1910                            pps->ppsinfo.assert_timestamp;
1911                        fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1912                            pps->ppsinfo.clear_timestamp;
1913                        break;
1914                case PPS_TSCLK_FFWD:
1915                        break;
1916                default:
1917                        break;
1918                }
1919                return (0);
1920#endif /* FFCLOCK */
1921        case PPS_IOC_KCBIND:
1922#ifdef PPS_SYNC
1923                kapi = (struct pps_kcbind_args *)data;
1924                /* XXX Only root should be able to do this */
1925                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1926                        return (EINVAL);
1927                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1928                        return (EINVAL);
1929                if (kapi->edge & ~pps->ppscap)
1930                        return (EINVAL);
[7e1a9ac]1931                pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1932                    (pps->kcmode & KCMODE_ABIFLAG);
[4117cd1]1933                return (0);
1934#else
1935                return (EOPNOTSUPP);
1936#endif
1937        default:
1938                return (ENOIOCTL);
1939        }
1940}
1941
1942void
1943pps_init(struct pps_state *pps)
1944{
1945        pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1946        if (pps->ppscap & PPS_CAPTUREASSERT)
1947                pps->ppscap |= PPS_OFFSETASSERT;
1948        if (pps->ppscap & PPS_CAPTURECLEAR)
1949                pps->ppscap |= PPS_OFFSETCLEAR;
1950#ifdef FFCLOCK
1951        pps->ppscap |= PPS_TSCLK_MASK;
1952#endif
[7e1a9ac]1953        pps->kcmode &= ~KCMODE_ABIFLAG;
1954}
1955
1956void
1957pps_init_abi(struct pps_state *pps)
1958{
1959
1960        pps_init(pps);
1961        if (pps->driver_abi > 0) {
1962                pps->kcmode |= KCMODE_ABIFLAG;
1963                pps->kernel_abi = PPS_ABI_VERSION;
1964        }
[4117cd1]1965}
1966
1967void
1968pps_capture(struct pps_state *pps)
1969{
1970        struct timehands *th;
1971
1972        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1973        th = timehands;
[0163063]1974        pps->capgen = atomic_load_acq_int(&th->th_generation);
[4117cd1]1975        pps->capth = th;
1976#ifdef FFCLOCK
1977        pps->capffth = fftimehands;
1978#endif
1979        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
[0163063]1980        atomic_thread_fence_acq();
1981        if (pps->capgen != th->th_generation)
[4117cd1]1982                pps->capgen = 0;
1983}
1984
1985void
1986pps_event(struct pps_state *pps, int event)
1987{
1988        struct bintime bt;
1989        struct timespec ts, *tsp, *osp;
[664f844]1990        uint32_t tcount, *pcount;
[4cd742e]1991        int foff;
[4117cd1]1992        pps_seq_t *pseq;
1993#ifdef FFCLOCK
1994        struct timespec *tsp_ffc;
1995        pps_seq_t *pseq_ffc;
1996        ffcounter *ffcount;
1997#endif
[4cd742e]1998#ifdef PPS_SYNC
1999        int fhard;
2000#endif
[4117cd1]2001
2002        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
[7494681]2003        /* Nothing to do if not currently set to capture this event type. */
2004        if ((event & pps->ppsparam.mode) == 0)
2005                return;
[4117cd1]2006        /* If the timecounter was wound up underneath us, bail out. */
[0163063]2007        if (pps->capgen == 0 || pps->capgen !=
2008            atomic_load_acq_int(&pps->capth->th_generation))
[4117cd1]2009                return;
2010
2011        /* Things would be easier with arrays. */
2012        if (event == PPS_CAPTUREASSERT) {
2013                tsp = &pps->ppsinfo.assert_timestamp;
2014                osp = &pps->ppsparam.assert_offset;
2015                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
[4cd742e]2016#ifdef PPS_SYNC
[4117cd1]2017                fhard = pps->kcmode & PPS_CAPTUREASSERT;
[4cd742e]2018#endif
[4117cd1]2019                pcount = &pps->ppscount[0];
2020                pseq = &pps->ppsinfo.assert_sequence;
2021#ifdef FFCLOCK
2022                ffcount = &pps->ppsinfo_ffc.assert_ffcount;
2023                tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
2024                pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
2025#endif
2026        } else {
2027                tsp = &pps->ppsinfo.clear_timestamp;
2028                osp = &pps->ppsparam.clear_offset;
2029                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
[4cd742e]2030#ifdef PPS_SYNC
[4117cd1]2031                fhard = pps->kcmode & PPS_CAPTURECLEAR;
[4cd742e]2032#endif
[4117cd1]2033                pcount = &pps->ppscount[1];
2034                pseq = &pps->ppsinfo.clear_sequence;
2035#ifdef FFCLOCK
2036                ffcount = &pps->ppsinfo_ffc.clear_ffcount;
2037                tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
2038                pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
2039#endif
2040        }
2041
2042        /*
2043         * If the timecounter changed, we cannot compare the count values, so
2044         * we have to drop the rest of the PPS-stuff until the next event.
2045         */
2046        if (pps->ppstc != pps->capth->th_counter) {
2047                pps->ppstc = pps->capth->th_counter;
2048                *pcount = pps->capcount;
2049                pps->ppscount[2] = pps->capcount;
2050                return;
2051        }
2052
2053        /* Convert the count to a timespec. */
2054        tcount = pps->capcount - pps->capth->th_offset_count;
2055        tcount &= pps->capth->th_counter->tc_counter_mask;
[c382cc83]2056        bt = pps->capth->th_bintime;
[4117cd1]2057        bintime_addx(&bt, pps->capth->th_scale * tcount);
2058        bintime2timespec(&bt, &ts);
2059
2060        /* If the timecounter was wound up underneath us, bail out. */
[0163063]2061        atomic_thread_fence_acq();
2062        if (pps->capgen != pps->capth->th_generation)
[4117cd1]2063                return;
2064
2065        *pcount = pps->capcount;
2066        (*pseq)++;
2067        *tsp = ts;
2068
2069        if (foff) {
[6695d02]2070                timespecadd(tsp, osp, tsp);
[4117cd1]2071                if (tsp->tv_nsec < 0) {
2072                        tsp->tv_nsec += 1000000000;
2073                        tsp->tv_sec -= 1;
2074                }
2075        }
2076
2077#ifdef FFCLOCK
2078        *ffcount = pps->capffth->tick_ffcount + tcount;
2079        bt = pps->capffth->tick_time;
2080        ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
2081        bintime_add(&bt, &pps->capffth->tick_time);
2082        bintime2timespec(&bt, &ts);
2083        (*pseq_ffc)++;
2084        *tsp_ffc = ts;
2085#endif
2086
2087#ifdef PPS_SYNC
2088        if (fhard) {
2089                uint64_t scale;
2090
2091                /*
2092                 * Feed the NTP PLL/FLL.
2093                 * The FLL wants to know how many (hardware) nanoseconds
2094                 * elapsed since the previous event.
2095                 */
2096                tcount = pps->capcount - pps->ppscount[2];
2097                pps->ppscount[2] = pps->capcount;
2098                tcount &= pps->capth->th_counter->tc_counter_mask;
2099                scale = (uint64_t)1 << 63;
2100                scale /= pps->capth->th_counter->tc_frequency;
2101                scale *= 2;
2102                bt.sec = 0;
2103                bt.frac = 0;
2104                bintime_addx(&bt, scale * tcount);
2105                bintime2timespec(&bt, &ts);
2106                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
2107        }
2108#endif
2109
2110        /* Wakeup anyone sleeping in pps_fetch().  */
2111        wakeup(pps);
2112}
[31be416]2113#else /* __rtems__ */
2114/* FIXME: https://devel.rtems.org/ticket/2349 */
2115#endif /* __rtems__ */
[4117cd1]2116
2117/*
2118 * Timecounters need to be updated every so often to prevent the hardware
2119 * counter from overflowing.  Updating also recalculates the cached values
2120 * used by the get*() family of functions, so their precision depends on
2121 * the update frequency.
2122 */
2123
[31be416]2124#ifndef __rtems__
[4117cd1]2125static int tc_tick;
2126SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
2127    "Approximate number of hardclock ticks in a millisecond");
[31be416]2128#endif /* __rtems__ */
[4117cd1]2129
[31be416]2130#ifndef __rtems__
[4117cd1]2131void
2132tc_ticktock(int cnt)
2133{
2134        static int count;
2135
[b48aeaf]2136        if (mtx_trylock_spin(&tc_setclock_mtx)) {
2137                count += cnt;
2138                if (count >= tc_tick) {
2139                        count = 0;
2140                        tc_windup(NULL);
2141                }
2142                mtx_unlock_spin(&tc_setclock_mtx);
2143        }
[1ef8e4a8]2144}
[31be416]2145#else /* __rtems__ */
2146void
2147_Timecounter_Tick(void)
2148{
[90d8567]2149        Per_CPU_Control *cpu_self = _Per_CPU_Get();
2150
2151        if (_Per_CPU_Is_boot_processor(cpu_self)) {
[b48aeaf]2152                tc_windup(NULL);
[1ef8e4a8]2153        }
[90d8567]2154
2155        _Watchdog_Tick(cpu_self);
[4117cd1]2156}
[1ef8e4a8]2157
[31be416]2158void
[76ac1ee3]2159_Timecounter_Tick_simple(uint32_t delta, uint32_t offset,
2160    ISR_lock_Context *lock_context)
[31be416]2161{
2162        struct bintime bt;
2163        struct timehands *th;
2164        uint32_t ogen;
2165
2166        th = timehands;
2167        ogen = th->th_generation;
2168        th->th_offset_count = offset;
2169        bintime_addx(&th->th_offset, th->th_scale * delta);
2170
2171        bt = th->th_offset;
[c382cc83]2172        bintime_add(&bt, &th->th_boottime);
[31be416]2173        /* Update the UTC timestamps used by the get*() functions. */
[c382cc83]2174        th->th_bintime = bt;
[31be416]2175        bintime2timeval(&bt, &th->th_microtime);
2176        bintime2timespec(&bt, &th->th_nanotime);
2177
2178        /*
2179         * Now that the struct timehands is again consistent, set the new
2180         * generation number, making sure to not make it zero.
2181         */
2182        if (++ogen == 0)
2183                ogen = 1;
2184        th->th_generation = ogen;
2185
2186        /* Go live with the new struct timehands. */
2187        time_second = th->th_microtime.tv_sec;
2188        time_uptime = th->th_offset.sec;
2189
[76ac1ee3]2190        _Timecounter_Release(lock_context);
[7cd2484]2191
[03b900d]2192        _Watchdog_Tick(_Per_CPU_Get_snapshot());
[31be416]2193}
2194#endif /* __rtems__ */
[4117cd1]2195
[31be416]2196#ifndef __rtems__
[4117cd1]2197static void __inline
2198tc_adjprecision(void)
2199{
2200        int t;
2201
2202        if (tc_timepercentage > 0) {
2203                t = (99 + tc_timepercentage) / tc_timepercentage;
2204                tc_precexp = fls(t + (t >> 1)) - 1;
2205                FREQ2BT(hz / tc_tick, &bt_timethreshold);
2206                FREQ2BT(hz, &bt_tickthreshold);
2207                bintime_shift(&bt_timethreshold, tc_precexp);
2208                bintime_shift(&bt_tickthreshold, tc_precexp);
2209        } else {
2210                tc_precexp = 31;
2211                bt_timethreshold.sec = INT_MAX;
2212                bt_timethreshold.frac = ~(uint64_t)0;
2213                bt_tickthreshold = bt_timethreshold;
2214        }
2215        sbt_timethreshold = bttosbt(bt_timethreshold);
2216        sbt_tickthreshold = bttosbt(bt_tickthreshold);
2217}
[31be416]2218#endif /* __rtems__ */
[4117cd1]2219
[31be416]2220#ifndef __rtems__
[4117cd1]2221static int
2222sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2223{
2224        int error, val;
2225
2226        val = tc_timepercentage;
2227        error = sysctl_handle_int(oidp, &val, 0, req);
2228        if (error != 0 || req->newptr == NULL)
2229                return (error);
2230        tc_timepercentage = val;
2231        if (cold)
2232                goto done;
2233        tc_adjprecision();
2234done:
2235        return (0);
2236}
2237
2238static void
2239inittimecounter(void *dummy)
2240{
2241        u_int p;
2242        int tick_rate;
2243
2244        /*
2245         * Set the initial timeout to
2246         * max(1, <approx. number of hardclock ticks in a millisecond>).
2247         * People should probably not use the sysctl to set the timeout
[65f2cd7a]2248         * to smaller than its initial value, since that value is the
[4117cd1]2249         * smallest reasonable one.  If they want better timestamps they
2250         * should use the non-"get"* functions.
2251         */
2252        if (hz > 1000)
2253                tc_tick = (hz + 500) / 1000;
2254        else
2255                tc_tick = 1;
2256        tc_adjprecision();
2257        FREQ2BT(hz, &tick_bt);
2258        tick_sbt = bttosbt(tick_bt);
2259        tick_rate = hz / tc_tick;
2260        FREQ2BT(tick_rate, &tc_tick_bt);
2261        tc_tick_sbt = bttosbt(tc_tick_bt);
2262        p = (tc_tick * 1000000) / hz;
2263        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2264
2265#ifdef FFCLOCK
2266        ffclock_init();
2267#endif
2268        /* warm up new timecounter (again) and get rolling. */
2269        (void)timecounter->tc_get_timecount(timecounter);
2270        (void)timecounter->tc_get_timecount(timecounter);
[b48aeaf]2271        mtx_lock_spin(&tc_setclock_mtx);
2272        tc_windup(NULL);
2273        mtx_unlock_spin(&tc_setclock_mtx);
[4117cd1]2274}
2275
2276SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2277
2278/* Cpu tick handling -------------------------------------------------*/
2279
2280static int cpu_tick_variable;
2281static uint64_t cpu_tick_frequency;
2282
[f013c14]2283static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
2284static DPCPU_DEFINE(unsigned, tc_cpu_ticks_last);
2285
[4117cd1]2286static uint64_t
2287tc_cpu_ticks(void)
2288{
2289        struct timecounter *tc;
[f013c14]2290        uint64_t res, *base;
2291        unsigned u, *last;
[4117cd1]2292
[f013c14]2293        critical_enter();
2294        base = DPCPU_PTR(tc_cpu_ticks_base);
2295        last = DPCPU_PTR(tc_cpu_ticks_last);
[4117cd1]2296        tc = timehands->th_counter;
2297        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
[f013c14]2298        if (u < *last)
2299                *base += (uint64_t)tc->tc_counter_mask + 1;
2300        *last = u;
2301        res = u + *base;
2302        critical_exit();
2303        return (res);
[4117cd1]2304}
2305
2306void
2307cpu_tick_calibration(void)
2308{
2309        static time_t last_calib;
2310
2311        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2312                cpu_tick_calibrate(0);
2313                last_calib = time_uptime;
2314        }
2315}
2316
2317/*
2318 * This function gets called every 16 seconds on only one designated
2319 * CPU in the system from hardclock() via cpu_tick_calibration()().
2320 *
2321 * Whenever the real time clock is stepped we get called with reset=1
2322 * to make sure we handle suspend/resume and similar events correctly.
2323 */
2324
2325static void
2326cpu_tick_calibrate(int reset)
2327{
2328        static uint64_t c_last;
2329        uint64_t c_this, c_delta;
2330        static struct bintime  t_last;
2331        struct bintime t_this, t_delta;
2332        uint32_t divi;
2333
2334        if (reset) {
2335                /* The clock was stepped, abort & reset */
2336                t_last.sec = 0;
2337                return;
2338        }
2339
2340        /* we don't calibrate fixed rate cputicks */
2341        if (!cpu_tick_variable)
2342                return;
2343
2344        getbinuptime(&t_this);
2345        c_this = cpu_ticks();
2346        if (t_last.sec != 0) {
2347                c_delta = c_this - c_last;
2348                t_delta = t_this;
2349                bintime_sub(&t_delta, &t_last);
2350                /*
2351                 * Headroom:
2352                 *      2^(64-20) / 16[s] =
2353                 *      2^(44) / 16[s] =
2354                 *      17.592.186.044.416 / 16 =
2355                 *      1.099.511.627.776 [Hz]
2356                 */
2357                divi = t_delta.sec << 20;
2358                divi |= t_delta.frac >> (64 - 20);
2359                c_delta <<= 20;
2360                c_delta /= divi;
2361                if (c_delta > cpu_tick_frequency) {
2362                        if (0 && bootverbose)
2363                                printf("cpu_tick increased to %ju Hz\n",
2364                                    c_delta);
2365                        cpu_tick_frequency = c_delta;
2366                }
2367        }
2368        c_last = c_this;
2369        t_last = t_this;
2370}
2371
2372void
2373set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
2374{
2375
2376        if (func == NULL) {
2377                cpu_ticks = tc_cpu_ticks;
2378        } else {
2379                cpu_tick_frequency = freq;
2380                cpu_tick_variable = var;
2381                cpu_ticks = func;
2382        }
2383}
2384
2385uint64_t
2386cpu_tickrate(void)
2387{
2388
2389        if (cpu_ticks == tc_cpu_ticks)
2390                return (tc_getfrequency());
2391        return (cpu_tick_frequency);
2392}
2393
2394/*
2395 * We need to be slightly careful converting cputicks to microseconds.
2396 * There is plenty of margin in 64 bits of microseconds (half a million
2397 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2398 * before divide conversion (to retain precision) we find that the
2399 * margin shrinks to 1.5 hours (one millionth of 146y).
2400 * With a three prong approach we never lose significant bits, no
2401 * matter what the cputick rate and length of timeinterval is.
2402 */
2403
2404uint64_t
2405cputick2usec(uint64_t tick)
2406{
2407
2408        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
2409                return (tick / (cpu_tickrate() / 1000000LL));
2410        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
2411                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2412        else
2413                return ((tick * 1000000LL) / cpu_tickrate());
2414}
2415
2416cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
[31be416]2417#endif /* __rtems__ */
[4117cd1]2418
[31be416]2419#ifndef __rtems__
[4117cd1]2420static int vdso_th_enable = 1;
2421static int
2422sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2423{
2424        int old_vdso_th_enable, error;
2425
2426        old_vdso_th_enable = vdso_th_enable;
2427        error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2428        if (error != 0)
2429                return (error);
2430        vdso_th_enable = old_vdso_th_enable;
2431        return (0);
2432}
2433SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2434    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2435    NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2436
2437uint32_t
2438tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2439{
2440        struct timehands *th;
2441        uint32_t enabled;
2442
2443        th = timehands;
2444        vdso_th->th_scale = th->th_scale;
2445        vdso_th->th_offset_count = th->th_offset_count;
2446        vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2447        vdso_th->th_offset = th->th_offset;
[b48aeaf]2448        vdso_th->th_boottime = th->th_boottime;
[74887157]2449        if (th->th_counter->tc_fill_vdso_timehands != NULL) {
2450                enabled = th->th_counter->tc_fill_vdso_timehands(vdso_th,
2451                    th->th_counter);
2452        } else
2453                enabled = 0;
[4117cd1]2454        if (!vdso_th_enable)
2455                enabled = 0;
2456        return (enabled);
2457}
[31be416]2458#endif /* __rtems__ */
[4117cd1]2459
2460#ifdef COMPAT_FREEBSD32
2461uint32_t
2462tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2463{
2464        struct timehands *th;
2465        uint32_t enabled;
2466
2467        th = timehands;
2468        *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2469        vdso_th32->th_offset_count = th->th_offset_count;
2470        vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2471        vdso_th32->th_offset.sec = th->th_offset.sec;
2472        *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
[b48aeaf]2473        vdso_th32->th_boottime.sec = th->th_boottime.sec;
2474        *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
[74887157]2475        if (th->th_counter->tc_fill_vdso_timehands32 != NULL) {
2476                enabled = th->th_counter->tc_fill_vdso_timehands32(vdso_th32,
2477                    th->th_counter);
2478        } else
2479                enabled = 0;
[4117cd1]2480        if (!vdso_th_enable)
2481                enabled = 0;
2482        return (enabled);
2483}
2484#endif
Note: See TracBrowser for help on using the repository browser.