source: rtems-libbsd/freebsd/kern/kern_tc.c @ 562783d

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 562783d was 562783d, checked in by Joel Sherrill <joel.sherrill@…>, on 03/09/12 at 15:19:06

Add time support files from FreeBSD to build to resolve more symbols

  • Property mode set to 100644
File size: 24.6 KB
Line 
1#include <freebsd/machine/rtems-bsd-config.h>
2
3/*-
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 */
11
12#include <freebsd/sys/cdefs.h>
13__FBSDID("$FreeBSD$");
14
15#include <freebsd/local/opt_ntp.h>
16
17#include <freebsd/sys/param.h>
18#include <freebsd/sys/kernel.h>
19#include <freebsd/sys/sysctl.h>
20#include <freebsd/sys/syslog.h>
21#include <freebsd/sys/systm.h>
22#include <freebsd/sys/timepps.h>
23#include <freebsd/sys/timetc.h>
24#include <freebsd/sys/timex.h>
25
26/*
27 * A large step happens on boot.  This constant detects such steps.
28 * It is relatively small so that ntp_update_second gets called enough
29 * in the typical 'missed a couple of seconds' case, but doesn't loop
30 * forever when the time step is large.
31 */
32#define LARGE_STEP      200
33
34/*
35 * Implement a dummy timecounter which we can use until we get a real one
36 * in the air.  This allows the console and other early stuff to use
37 * time services.
38 */
39
40static u_int
41dummy_get_timecount(struct timecounter *tc)
42{
43        static u_int now;
44
45        return (++now);
46}
47
48static struct timecounter dummy_timecounter = {
49        dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
50};
51
52struct timehands {
53        /* These fields must be initialized by the driver. */
54        struct timecounter      *th_counter;
55        int64_t                 th_adjustment;
56        u_int64_t               th_scale;
57        u_int                   th_offset_count;
58        struct bintime          th_offset;
59        struct timeval          th_microtime;
60        struct timespec         th_nanotime;
61        /* Fields not to be copied in tc_windup start with th_generation. */
62        volatile u_int          th_generation;
63        struct timehands        *th_next;
64};
65
66static struct timehands th0;
67static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
68static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
69static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
70static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
71static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
72static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
73static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
74static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
75static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
76static struct timehands th0 = {
77        &dummy_timecounter,
78        0,
79        (uint64_t)-1 / 1000000,
80        0,
81        {1, 0},
82        {0, 0},
83        {0, 0},
84        1,
85        &th1
86};
87
88static struct timehands *volatile timehands = &th0;
89struct timecounter *timecounter = &dummy_timecounter;
90static struct timecounter *timecounters = &dummy_timecounter;
91
92time_t time_second = 1;
93time_t time_uptime = 1;
94
95static struct bintime boottimebin;
96struct timeval boottime;
97static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
98SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
99    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
100
101SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
102SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
103
104static int timestepwarnings;
105SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
106    &timestepwarnings, 0, "Log time steps");
107
108static void tc_windup(void);
109static void cpu_tick_calibrate(int);
110
111static int
112sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
113{
114#ifdef SCTL_MASK32
115        int tv[2];
116
117        if (req->flags & SCTL_MASK32) {
118                tv[0] = boottime.tv_sec;
119                tv[1] = boottime.tv_usec;
120                return SYSCTL_OUT(req, tv, sizeof(tv));
121        } else
122#endif
123                return SYSCTL_OUT(req, &boottime, sizeof(boottime));
124}
125
126static int
127sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
128{
129        u_int ncount;
130        struct timecounter *tc = arg1;
131
132        ncount = tc->tc_get_timecount(tc);
133        return sysctl_handle_int(oidp, &ncount, 0, req);
134}
135
136static int
137sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
138{
139        u_int64_t freq;
140        struct timecounter *tc = arg1;
141
142        freq = tc->tc_frequency;
143        return sysctl_handle_quad(oidp, &freq, 0, req);
144}
145
146/*
147 * Return the difference between the timehands' counter value now and what
148 * was when we copied it to the timehands' offset_count.
149 */
150static __inline u_int
151tc_delta(struct timehands *th)
152{
153        struct timecounter *tc;
154
155        tc = th->th_counter;
156        return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
157            tc->tc_counter_mask);
158}
159
160/*
161 * Functions for reading the time.  We have to loop until we are sure that
162 * the timehands that we operated on was not updated under our feet.  See
163 * the comment in <sys/time.h> for a description of these 12 functions.
164 */
165
166void
167binuptime(struct bintime *bt)
168{
169        struct timehands *th;
170        u_int gen;
171
172        do {
173                th = timehands;
174                gen = th->th_generation;
175                *bt = th->th_offset;
176                bintime_addx(bt, th->th_scale * tc_delta(th));
177        } while (gen == 0 || gen != th->th_generation);
178}
179
180void
181nanouptime(struct timespec *tsp)
182{
183        struct bintime bt;
184
185        binuptime(&bt);
186        bintime2timespec(&bt, tsp);
187}
188
189void
190microuptime(struct timeval *tvp)
191{
192        struct bintime bt;
193
194        binuptime(&bt);
195        bintime2timeval(&bt, tvp);
196}
197
198void
199bintime(struct bintime *bt)
200{
201
202        binuptime(bt);
203        bintime_add(bt, &boottimebin);
204}
205
206void
207nanotime(struct timespec *tsp)
208{
209        struct bintime bt;
210
211        bintime(&bt);
212        bintime2timespec(&bt, tsp);
213}
214
215void
216microtime(struct timeval *tvp)
217{
218        struct bintime bt;
219
220        bintime(&bt);
221        bintime2timeval(&bt, tvp);
222}
223
224void
225getbinuptime(struct bintime *bt)
226{
227        struct timehands *th;
228        u_int gen;
229
230        do {
231                th = timehands;
232                gen = th->th_generation;
233                *bt = th->th_offset;
234        } while (gen == 0 || gen != th->th_generation);
235}
236
237void
238getnanouptime(struct timespec *tsp)
239{
240        struct timehands *th;
241        u_int gen;
242
243        do {
244                th = timehands;
245                gen = th->th_generation;
246                bintime2timespec(&th->th_offset, tsp);
247        } while (gen == 0 || gen != th->th_generation);
248}
249
250void
251getmicrouptime(struct timeval *tvp)
252{
253        struct timehands *th;
254        u_int gen;
255
256        do {
257                th = timehands;
258                gen = th->th_generation;
259                bintime2timeval(&th->th_offset, tvp);
260        } while (gen == 0 || gen != th->th_generation);
261}
262
263void
264getbintime(struct bintime *bt)
265{
266        struct timehands *th;
267        u_int gen;
268
269        do {
270                th = timehands;
271                gen = th->th_generation;
272                *bt = th->th_offset;
273        } while (gen == 0 || gen != th->th_generation);
274        bintime_add(bt, &boottimebin);
275}
276
277void
278getnanotime(struct timespec *tsp)
279{
280        struct timehands *th;
281        u_int gen;
282
283        do {
284                th = timehands;
285                gen = th->th_generation;
286                *tsp = th->th_nanotime;
287        } while (gen == 0 || gen != th->th_generation);
288}
289
290void
291getmicrotime(struct timeval *tvp)
292{
293        struct timehands *th;
294        u_int gen;
295
296        do {
297                th = timehands;
298                gen = th->th_generation;
299                *tvp = th->th_microtime;
300        } while (gen == 0 || gen != th->th_generation);
301}
302
303/*
304 * Initialize a new timecounter and possibly use it.
305 */
306void
307tc_init(struct timecounter *tc)
308{
309        u_int u;
310        struct sysctl_oid *tc_root;
311
312        u = tc->tc_frequency / tc->tc_counter_mask;
313        /* XXX: We need some margin here, 10% is a guess */
314        u *= 11;
315        u /= 10;
316        if (u > hz && tc->tc_quality >= 0) {
317                tc->tc_quality = -2000;
318                if (bootverbose) {
319                        printf("Timecounter \"%s\" frequency %ju Hz",
320                            tc->tc_name, (uintmax_t)tc->tc_frequency);
321                        printf(" -- Insufficient hz, needs at least %u\n", u);
322                }
323        } else if (tc->tc_quality >= 0 || bootverbose) {
324                printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
325                    tc->tc_name, (uintmax_t)tc->tc_frequency,
326                    tc->tc_quality);
327        }
328
329        tc->tc_next = timecounters;
330        timecounters = tc;
331        /*
332         * Set up sysctl tree for this counter.
333         */
334        tc_root = SYSCTL_ADD_NODE(NULL,
335            SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
336            CTLFLAG_RW, 0, "timecounter description");
337        SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
338            "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
339            "mask for implemented bits");
340        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
341            "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
342            sysctl_kern_timecounter_get, "IU", "current timecounter value");
343        SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
344            "frequency", CTLTYPE_QUAD | CTLFLAG_RD, tc, sizeof(*tc),
345             sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
346        SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
347            "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
348            "goodness of time counter");
349        /*
350         * Never automatically use a timecounter with negative quality.
351         * Even though we run on the dummy counter, switching here may be
352         * worse since this timecounter may not be monotonous.
353         */
354        if (tc->tc_quality < 0)
355                return;
356        if (tc->tc_quality < timecounter->tc_quality)
357                return;
358        if (tc->tc_quality == timecounter->tc_quality &&
359            tc->tc_frequency < timecounter->tc_frequency)
360                return;
361        (void)tc->tc_get_timecount(tc);
362        (void)tc->tc_get_timecount(tc);
363        timecounter = tc;
364}
365
366/* Report the frequency of the current timecounter. */
367u_int64_t
368tc_getfrequency(void)
369{
370
371        return (timehands->th_counter->tc_frequency);
372}
373
374/*
375 * Step our concept of UTC.  This is done by modifying our estimate of
376 * when we booted.
377 * XXX: not locked.
378 */
379void
380tc_setclock(struct timespec *ts)
381{
382        struct timespec tbef, taft;
383        struct bintime bt, bt2;
384
385        cpu_tick_calibrate(1);
386        nanotime(&tbef);
387        timespec2bintime(ts, &bt);
388        binuptime(&bt2);
389        bintime_sub(&bt, &bt2);
390        bintime_add(&bt2, &boottimebin);
391        boottimebin = bt;
392        bintime2timeval(&bt, &boottime);
393
394        /* XXX fiddle all the little crinkly bits around the fiords... */
395        tc_windup();
396        nanotime(&taft);
397        if (timestepwarnings) {
398                log(LOG_INFO,
399                    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
400                    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
401                    (intmax_t)taft.tv_sec, taft.tv_nsec,
402                    (intmax_t)ts->tv_sec, ts->tv_nsec);
403        }
404        cpu_tick_calibrate(1);
405}
406
407/*
408 * Initialize the next struct timehands in the ring and make
409 * it the active timehands.  Along the way we might switch to a different
410 * timecounter and/or do seconds processing in NTP.  Slightly magic.
411 */
412static void
413tc_windup(void)
414{
415        struct bintime bt;
416        struct timehands *th, *tho;
417        u_int64_t scale;
418        u_int delta, ncount, ogen;
419        int i;
420        time_t t;
421
422        /*
423         * Make the next timehands a copy of the current one, but do not
424         * overwrite the generation or next pointer.  While we update
425         * the contents, the generation must be zero.
426         */
427        tho = timehands;
428        th = tho->th_next;
429        ogen = th->th_generation;
430        th->th_generation = 0;
431        bcopy(tho, th, offsetof(struct timehands, th_generation));
432
433        /*
434         * Capture a timecounter delta on the current timecounter and if
435         * changing timecounters, a counter value from the new timecounter.
436         * Update the offset fields accordingly.
437         */
438        delta = tc_delta(th);
439        if (th->th_counter != timecounter)
440                ncount = timecounter->tc_get_timecount(timecounter);
441        else
442                ncount = 0;
443        th->th_offset_count += delta;
444        th->th_offset_count &= th->th_counter->tc_counter_mask;
445        while (delta > th->th_counter->tc_frequency) {
446                /* Eat complete unadjusted seconds. */
447                delta -= th->th_counter->tc_frequency;
448                th->th_offset.sec++;
449        }
450        if ((delta > th->th_counter->tc_frequency / 2) &&
451            (th->th_scale * delta < ((uint64_t)1 << 63))) {
452                /* The product th_scale * delta just barely overflows. */
453                th->th_offset.sec++;
454        }
455        bintime_addx(&th->th_offset, th->th_scale * delta);
456
457        /*
458         * Hardware latching timecounters may not generate interrupts on
459         * PPS events, so instead we poll them.  There is a finite risk that
460         * the hardware might capture a count which is later than the one we
461         * got above, and therefore possibly in the next NTP second which might
462         * have a different rate than the current NTP second.  It doesn't
463         * matter in practice.
464         */
465        if (tho->th_counter->tc_poll_pps)
466                tho->th_counter->tc_poll_pps(tho->th_counter);
467
468        /*
469         * Deal with NTP second processing.  The for loop normally
470         * iterates at most once, but in extreme situations it might
471         * keep NTP sane if timeouts are not run for several seconds.
472         * At boot, the time step can be large when the TOD hardware
473         * has been read, so on really large steps, we call
474         * ntp_update_second only twice.  We need to call it twice in
475         * case we missed a leap second.
476         */
477        bt = th->th_offset;
478        bintime_add(&bt, &boottimebin);
479        i = bt.sec - tho->th_microtime.tv_sec;
480        if (i > LARGE_STEP)
481                i = 2;
482        for (; i > 0; i--) {
483                t = bt.sec;
484                ntp_update_second(&th->th_adjustment, &bt.sec);
485                if (bt.sec != t)
486                        boottimebin.sec += bt.sec - t;
487        }
488        /* Update the UTC timestamps used by the get*() functions. */
489        /* XXX shouldn't do this here.  Should force non-`get' versions. */
490        bintime2timeval(&bt, &th->th_microtime);
491        bintime2timespec(&bt, &th->th_nanotime);
492
493        /* Now is a good time to change timecounters. */
494        if (th->th_counter != timecounter) {
495                th->th_counter = timecounter;
496                th->th_offset_count = ncount;
497        }
498
499        /*-
500         * Recalculate the scaling factor.  We want the number of 1/2^64
501         * fractions of a second per period of the hardware counter, taking
502         * into account the th_adjustment factor which the NTP PLL/adjtime(2)
503         * processing provides us with.
504         *
505         * The th_adjustment is nanoseconds per second with 32 bit binary
506         * fraction and we want 64 bit binary fraction of second:
507         *
508         *       x = a * 2^32 / 10^9 = a * 4.294967296
509         *
510         * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
511         * we can only multiply by about 850 without overflowing, that
512         * leaves no suitably precise fractions for multiply before divide.
513         *
514         * Divide before multiply with a fraction of 2199/512 results in a
515         * systematic undercompensation of 10PPM of th_adjustment.  On a
516         * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
517         *
518         * We happily sacrifice the lowest of the 64 bits of our result
519         * to the goddess of code clarity.
520         *
521         */
522        scale = (u_int64_t)1 << 63;
523        scale += (th->th_adjustment / 1024) * 2199;
524        scale /= th->th_counter->tc_frequency;
525        th->th_scale = scale * 2;
526
527        /*
528         * Now that the struct timehands is again consistent, set the new
529         * generation number, making sure to not make it zero.
530         */
531        if (++ogen == 0)
532                ogen = 1;
533        th->th_generation = ogen;
534
535        /* Go live with the new struct timehands. */
536        time_second = th->th_microtime.tv_sec;
537        time_uptime = th->th_offset.sec;
538        timehands = th;
539}
540
541/* Report or change the active timecounter hardware. */
542static int
543sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
544{
545        char newname[32];
546        struct timecounter *newtc, *tc;
547        int error;
548
549        tc = timecounter;
550        strlcpy(newname, tc->tc_name, sizeof(newname));
551
552        error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
553        if (error != 0 || req->newptr == NULL ||
554            strcmp(newname, tc->tc_name) == 0)
555                return (error);
556        for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
557                if (strcmp(newname, newtc->tc_name) != 0)
558                        continue;
559
560                /* Warm up new timecounter. */
561                (void)newtc->tc_get_timecount(newtc);
562                (void)newtc->tc_get_timecount(newtc);
563
564                timecounter = newtc;
565                return (0);
566        }
567        return (EINVAL);
568}
569
570SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
571    0, 0, sysctl_kern_timecounter_hardware, "A",
572    "Timecounter hardware selected");
573
574
575/* Report or change the active timecounter hardware. */
576static int
577sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
578{
579        char buf[32], *spc;
580        struct timecounter *tc;
581        int error;
582
583        spc = "";
584        error = 0;
585        for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
586                sprintf(buf, "%s%s(%d)",
587                    spc, tc->tc_name, tc->tc_quality);
588                error = SYSCTL_OUT(req, buf, strlen(buf));
589                spc = " ";
590        }
591        return (error);
592}
593
594SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
595    0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
596
597/*
598 * RFC 2783 PPS-API implementation.
599 */
600
601int
602pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
603{
604        pps_params_t *app;
605        struct pps_fetch_args *fapi;
606#ifdef PPS_SYNC
607        struct pps_kcbind_args *kapi;
608#endif
609
610        KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
611        switch (cmd) {
612        case PPS_IOC_CREATE:
613                return (0);
614        case PPS_IOC_DESTROY:
615                return (0);
616        case PPS_IOC_SETPARAMS:
617                app = (pps_params_t *)data;
618                if (app->mode & ~pps->ppscap)
619                        return (EINVAL);
620                pps->ppsparam = *app;
621                return (0);
622        case PPS_IOC_GETPARAMS:
623                app = (pps_params_t *)data;
624                *app = pps->ppsparam;
625                app->api_version = PPS_API_VERS_1;
626                return (0);
627        case PPS_IOC_GETCAP:
628                *(int*)data = pps->ppscap;
629                return (0);
630        case PPS_IOC_FETCH:
631                fapi = (struct pps_fetch_args *)data;
632                if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
633                        return (EINVAL);
634                if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
635                        return (EOPNOTSUPP);
636                pps->ppsinfo.current_mode = pps->ppsparam.mode;
637                fapi->pps_info_buf = pps->ppsinfo;
638                return (0);
639        case PPS_IOC_KCBIND:
640#ifdef PPS_SYNC
641                kapi = (struct pps_kcbind_args *)data;
642                /* XXX Only root should be able to do this */
643                if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
644                        return (EINVAL);
645                if (kapi->kernel_consumer != PPS_KC_HARDPPS)
646                        return (EINVAL);
647                if (kapi->edge & ~pps->ppscap)
648                        return (EINVAL);
649                pps->kcmode = kapi->edge;
650                return (0);
651#else
652                return (EOPNOTSUPP);
653#endif
654        default:
655                return (ENOIOCTL);
656        }
657}
658
659void
660pps_init(struct pps_state *pps)
661{
662        pps->ppscap |= PPS_TSFMT_TSPEC;
663        if (pps->ppscap & PPS_CAPTUREASSERT)
664                pps->ppscap |= PPS_OFFSETASSERT;
665        if (pps->ppscap & PPS_CAPTURECLEAR)
666                pps->ppscap |= PPS_OFFSETCLEAR;
667}
668
669void
670pps_capture(struct pps_state *pps)
671{
672        struct timehands *th;
673
674        KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
675        th = timehands;
676        pps->capgen = th->th_generation;
677        pps->capth = th;
678        pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
679        if (pps->capgen != th->th_generation)
680                pps->capgen = 0;
681}
682
683void
684pps_event(struct pps_state *pps, int event)
685{
686        struct bintime bt;
687        struct timespec ts, *tsp, *osp;
688        u_int tcount, *pcount;
689        int foff, fhard;
690        pps_seq_t *pseq;
691
692        KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
693        /* If the timecounter was wound up underneath us, bail out. */
694        if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
695                return;
696
697        /* Things would be easier with arrays. */
698        if (event == PPS_CAPTUREASSERT) {
699                tsp = &pps->ppsinfo.assert_timestamp;
700                osp = &pps->ppsparam.assert_offset;
701                foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
702                fhard = pps->kcmode & PPS_CAPTUREASSERT;
703                pcount = &pps->ppscount[0];
704                pseq = &pps->ppsinfo.assert_sequence;
705        } else {
706                tsp = &pps->ppsinfo.clear_timestamp;
707                osp = &pps->ppsparam.clear_offset;
708                foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
709                fhard = pps->kcmode & PPS_CAPTURECLEAR;
710                pcount = &pps->ppscount[1];
711                pseq = &pps->ppsinfo.clear_sequence;
712        }
713
714        /*
715         * If the timecounter changed, we cannot compare the count values, so
716         * we have to drop the rest of the PPS-stuff until the next event.
717         */
718        if (pps->ppstc != pps->capth->th_counter) {
719                pps->ppstc = pps->capth->th_counter;
720                *pcount = pps->capcount;
721                pps->ppscount[2] = pps->capcount;
722                return;
723        }
724
725        /* Convert the count to a timespec. */
726        tcount = pps->capcount - pps->capth->th_offset_count;
727        tcount &= pps->capth->th_counter->tc_counter_mask;
728        bt = pps->capth->th_offset;
729        bintime_addx(&bt, pps->capth->th_scale * tcount);
730        bintime_add(&bt, &boottimebin);
731        bintime2timespec(&bt, &ts);
732
733        /* If the timecounter was wound up underneath us, bail out. */
734        if (pps->capgen != pps->capth->th_generation)
735                return;
736
737        *pcount = pps->capcount;
738        (*pseq)++;
739        *tsp = ts;
740
741        if (foff) {
742                timespecadd(tsp, osp);
743                if (tsp->tv_nsec < 0) {
744                        tsp->tv_nsec += 1000000000;
745                        tsp->tv_sec -= 1;
746                }
747        }
748#ifdef PPS_SYNC
749        if (fhard) {
750                u_int64_t scale;
751
752                /*
753                 * Feed the NTP PLL/FLL.
754                 * The FLL wants to know how many (hardware) nanoseconds
755                 * elapsed since the previous event.
756                 */
757                tcount = pps->capcount - pps->ppscount[2];
758                pps->ppscount[2] = pps->capcount;
759                tcount &= pps->capth->th_counter->tc_counter_mask;
760                scale = (u_int64_t)1 << 63;
761                scale /= pps->capth->th_counter->tc_frequency;
762                scale *= 2;
763                bt.sec = 0;
764                bt.frac = 0;
765                bintime_addx(&bt, scale * tcount);
766                bintime2timespec(&bt, &ts);
767                hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
768        }
769#endif
770}
771
772/*
773 * Timecounters need to be updated every so often to prevent the hardware
774 * counter from overflowing.  Updating also recalculates the cached values
775 * used by the get*() family of functions, so their precision depends on
776 * the update frequency.
777 */
778
779static int tc_tick;
780SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
781    "Approximate number of hardclock ticks in a millisecond");
782
783void
784tc_ticktock(void)
785{
786        static int count;
787        static time_t last_calib;
788
789        if (++count < tc_tick)
790                return;
791        count = 0;
792        tc_windup();
793        if (time_uptime != last_calib && !(time_uptime & 0xf)) {
794                cpu_tick_calibrate(0);
795                last_calib = time_uptime;
796        }
797}
798
799static void
800inittimecounter(void *dummy)
801{
802        u_int p;
803
804        /*
805         * Set the initial timeout to
806         * max(1, <approx. number of hardclock ticks in a millisecond>).
807         * People should probably not use the sysctl to set the timeout
808         * to smaller than its inital value, since that value is the
809         * smallest reasonable one.  If they want better timestamps they
810         * should use the non-"get"* functions.
811         */
812        if (hz > 1000)
813                tc_tick = (hz + 500) / 1000;
814        else
815                tc_tick = 1;
816        p = (tc_tick * 1000000) / hz;
817        printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
818
819        /* warm up new timecounter (again) and get rolling. */
820        (void)timecounter->tc_get_timecount(timecounter);
821        (void)timecounter->tc_get_timecount(timecounter);
822}
823
824SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
825
826/* Cpu tick handling -------------------------------------------------*/
827
828static int cpu_tick_variable;
829static uint64_t cpu_tick_frequency;
830
831static uint64_t
832tc_cpu_ticks(void)
833{
834        static uint64_t base;
835        static unsigned last;
836        unsigned u;
837        struct timecounter *tc;
838
839        tc = timehands->th_counter;
840        u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
841        if (u < last)
842                base += (uint64_t)tc->tc_counter_mask + 1;
843        last = u;
844        return (u + base);
845}
846
847/*
848 * This function gets called every 16 seconds on only one designated
849 * CPU in the system from hardclock() via tc_ticktock().
850 *
851 * Whenever the real time clock is stepped we get called with reset=1
852 * to make sure we handle suspend/resume and similar events correctly.
853 */
854
855static void
856cpu_tick_calibrate(int reset)
857{
858        static uint64_t c_last;
859        uint64_t c_this, c_delta;
860        static struct bintime  t_last;
861        struct bintime t_this, t_delta;
862        uint32_t divi;
863
864        if (reset) {
865                /* The clock was stepped, abort & reset */
866                t_last.sec = 0;
867                return;
868        }
869
870        /* we don't calibrate fixed rate cputicks */
871        if (!cpu_tick_variable)
872                return;
873
874        getbinuptime(&t_this);
875        c_this = cpu_ticks();
876        if (t_last.sec != 0) {
877                c_delta = c_this - c_last;
878                t_delta = t_this;
879                bintime_sub(&t_delta, &t_last);
880                /*
881                 * Validate that 16 +/- 1/256 seconds passed.
882                 * After division by 16 this gives us a precision of
883                 * roughly 250PPM which is sufficient
884                 */
885                if (t_delta.sec > 16 || (
886                    t_delta.sec == 16 && t_delta.frac >= (0x01LL << 56))) {
887                        /* too long */
888                        if (bootverbose)
889                                printf("t_delta %ju.%016jx too long\n",
890                                    (uintmax_t)t_delta.sec,
891                                    (uintmax_t)t_delta.frac);
892                } else if (t_delta.sec < 15 ||
893                    (t_delta.sec == 15 && t_delta.frac <= (0xffLL << 56))) {
894                        /* too short */
895                        if (bootverbose)
896                                printf("t_delta %ju.%016jx too short\n",
897                                    (uintmax_t)t_delta.sec,
898                                    (uintmax_t)t_delta.frac);
899                } else {
900                        /* just right */
901                        /*
902                         * Headroom:
903                         *      2^(64-20) / 16[s] =
904                         *      2^(44) / 16[s] =
905                         *      17.592.186.044.416 / 16 =
906                         *      1.099.511.627.776 [Hz]
907                         */
908                        divi = t_delta.sec << 20;
909                        divi |= t_delta.frac >> (64 - 20);
910                        c_delta <<= 20;
911                        c_delta /= divi;
912                        if (c_delta  > cpu_tick_frequency) {
913                                if (0 && bootverbose)
914                                        printf("cpu_tick increased to %ju Hz\n",
915                                            c_delta);
916                                cpu_tick_frequency = c_delta;
917                        }
918                }
919        }
920        c_last = c_this;
921        t_last = t_this;
922}
923
924void
925set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
926{
927
928        if (func == NULL) {
929                cpu_ticks = tc_cpu_ticks;
930        } else {
931                cpu_tick_frequency = freq;
932                cpu_tick_variable = var;
933                cpu_ticks = func;
934        }
935}
936
937uint64_t
938cpu_tickrate(void)
939{
940
941        if (cpu_ticks == tc_cpu_ticks)
942                return (tc_getfrequency());
943        return (cpu_tick_frequency);
944}
945
946/*
947 * We need to be slightly careful converting cputicks to microseconds.
948 * There is plenty of margin in 64 bits of microseconds (half a million
949 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
950 * before divide conversion (to retain precision) we find that the
951 * margin shrinks to 1.5 hours (one millionth of 146y).
952 * With a three prong approach we never lose significant bits, no
953 * matter what the cputick rate and length of timeinterval is.
954 */
955
956uint64_t
957cputick2usec(uint64_t tick)
958{
959
960        if (tick > 18446744073709551LL)         /* floor(2^64 / 1000) */
961                return (tick / (cpu_tickrate() / 1000000LL));
962        else if (tick > 18446744073709LL)       /* floor(2^64 / 1000000) */
963                return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
964        else
965                return ((tick * 1000000LL) / cpu_tickrate());
966}
967
968cpu_tick_f      *cpu_ticks = tc_cpu_ticks;
Note: See TracBrowser for help on using the repository browser.