source: rtems-libbsd/freebsd/sys/kern/kern_timeout.c @ 6d9d7b1

55-freebsd-126-freebsd-12
Last change on this file since 6d9d7b1 was 6d9d7b1, checked in by Sebastian Huber <sebastian.huber@…>, on 07/26/18 at 12:12:46

Critical bug fix for callouts

FreeBSD has two callout executors, one in software and one in hardware
interrupt context. In libbsd, all callouts are executed by the timer
server. Entirely remove the different execution contexts for libbsd.
Previously, this was not properly done which could result an invalid
callout_drain() sequence leading to system memory corruption.

  • Property mode set to 100644
File size: 50.5 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 1982, 1986, 1991, 1993
5 *      The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *      From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#include <rtems/bsd/local/opt_callout_profiling.h>
43#include <rtems/bsd/local/opt_ddb.h>
44#if defined(__arm__) || defined(__rtems__)
45#include <rtems/bsd/local/opt_timer.h>
46#endif
47#include <rtems/bsd/local/opt_rss.h>
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/bus.h>
52#include <sys/callout.h>
53#include <sys/file.h>
54#include <sys/interrupt.h>
55#include <sys/kernel.h>
56#include <sys/ktr.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/sdt.h>
62#include <sys/sleepqueue.h>
63#include <sys/sysctl.h>
64#include <sys/smp.h>
65
66#ifdef DDB
67#include <ddb/ddb.h>
68#include <machine/_inttypes.h>
69#endif
70
71#ifdef SMP
72#include <machine/cpu.h>
73#endif
74
75#ifndef NO_EVENTTIMERS
76DPCPU_DECLARE(sbintime_t, hardclocktime);
77#endif
78
79SDT_PROVIDER_DEFINE(callout_execute);
80SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
81SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
82
83#ifdef CALLOUT_PROFILING
84static int avg_depth;
85SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
86    "Average number of items examined per softclock call. Units = 1/1000");
87static int avg_gcalls;
88SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
89    "Average number of Giant callouts made per softclock call. Units = 1/1000");
90static int avg_lockcalls;
91SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
92    "Average number of lock callouts made per softclock call. Units = 1/1000");
93static int avg_mpcalls;
94SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
95    "Average number of MP callouts made per softclock call. Units = 1/1000");
96static int avg_depth_dir;
97SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
98    "Average number of direct callouts examined per callout_process call. "
99    "Units = 1/1000");
100static int avg_lockcalls_dir;
101SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
102    &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
103    "callout_process call. Units = 1/1000");
104static int avg_mpcalls_dir;
105SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
106    0, "Average number of MP direct callouts made per callout_process call. "
107    "Units = 1/1000");
108#endif
109
110#ifndef __rtems__
111static int ncallout;
112SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
113    "Number of entries in callwheel and size of timeout() preallocation");
114#else /* __rtems__ */
115#define ncallout 16
116#endif /* __rtems__ */
117
118#ifdef  RSS
119static int pin_default_swi = 1;
120static int pin_pcpu_swi = 1;
121#else
122static int pin_default_swi = 0;
123static int pin_pcpu_swi = 0;
124#endif
125
126SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
127    0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
128SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
129    0, "Pin the per-CPU swis (except PCPU 0, which is also default");
130
131/*
132 * TODO:
133 *      allocate more timeout table slots when table overflows.
134 */
135u_int callwheelsize, callwheelmask;
136
137/*
138 * The callout cpu exec entities represent informations necessary for
139 * describing the state of callouts currently running on the CPU and the ones
140 * necessary for migrating callouts to the new callout cpu. In particular,
141 * the first entry of the array cc_exec_entity holds informations for callout
142 * running in SWI thread context, while the second one holds informations
143 * for callout running directly from hardware interrupt context.
144 * The cached informations are very important for deferring migration when
145 * the migrating callout is already running.
146 */
147struct cc_exec {
148        struct callout          *cc_curr;
149        void                    (*cc_drain)(void *);
150#ifdef SMP
151        void                    (*ce_migration_func)(void *);
152        void                    *ce_migration_arg;
153        int                     ce_migration_cpu;
154        sbintime_t              ce_migration_time;
155        sbintime_t              ce_migration_prec;
156#endif
157        bool                    cc_cancel;
158        bool                    cc_waiting;
159};
160
161/*
162 * There is one struct callout_cpu per cpu, holding all relevant
163 * state for the callout processing thread on the individual CPU.
164 */
165struct callout_cpu {
166        struct mtx_padalign     cc_lock;
167#ifndef __rtems__
168        struct cc_exec          cc_exec_entity[2];
169#else /* __rtems__ */
170        struct cc_exec          cc_exec_entity;
171#endif /* __rtems__ */
172        struct callout          *cc_next;
173        struct callout          *cc_callout;
174        struct callout_list     *cc_callwheel;
175#ifndef __rtems__
176        struct callout_tailq    cc_expireq;
177#endif /* __rtems__ */
178        struct callout_slist    cc_callfree;
179        sbintime_t              cc_firstevent;
180        sbintime_t              cc_lastscan;
181        void                    *cc_cookie;
182        u_int                   cc_bucket;
183        u_int                   cc_inited;
184        char                    cc_ktr_event_name[20];
185};
186
187#define callout_migrating(c)    ((c)->c_iflags & CALLOUT_DFRMIGRATION)
188
189#ifndef __rtems__
190#define cc_exec_curr(cc, dir)           cc->cc_exec_entity[dir].cc_curr
191#define cc_exec_drain(cc, dir)          cc->cc_exec_entity[dir].cc_drain
192#else /* __rtems__ */
193#define cc_exec_curr(cc, dir)           cc->cc_exec_entity.cc_curr
194#define cc_exec_drain(cc, dir)          cc->cc_exec_entity.cc_drain
195#endif /* __rtems__ */
196#define cc_exec_next(cc)                cc->cc_next
197#ifndef __rtems__
198#define cc_exec_cancel(cc, dir)         cc->cc_exec_entity[dir].cc_cancel
199#define cc_exec_waiting(cc, dir)        cc->cc_exec_entity[dir].cc_waiting
200#else /* __rtems__ */
201#define cc_exec_cancel(cc, dir)         cc->cc_exec_entity.cc_cancel
202#define cc_exec_waiting(cc, dir)        cc->cc_exec_entity.cc_waiting
203#endif /* __rtems__ */
204#ifdef SMP
205#define cc_migration_func(cc, dir)      cc->cc_exec_entity[dir].ce_migration_func
206#define cc_migration_arg(cc, dir)       cc->cc_exec_entity[dir].ce_migration_arg
207#define cc_migration_cpu(cc, dir)       cc->cc_exec_entity[dir].ce_migration_cpu
208#define cc_migration_time(cc, dir)      cc->cc_exec_entity[dir].ce_migration_time
209#define cc_migration_prec(cc, dir)      cc->cc_exec_entity[dir].ce_migration_prec
210
211struct callout_cpu cc_cpu[MAXCPU];
212#define CPUBLOCK        MAXCPU
213#define CC_CPU(cpu)     (&cc_cpu[(cpu)])
214#define CC_SELF()       CC_CPU(PCPU_GET(cpuid))
215#else
216struct callout_cpu cc_cpu;
217#define CC_CPU(cpu)     &cc_cpu
218#define CC_SELF()       &cc_cpu
219#endif
220#define CC_LOCK(cc)     mtx_lock_spin(&(cc)->cc_lock)
221#define CC_UNLOCK(cc)   mtx_unlock_spin(&(cc)->cc_lock)
222#define CC_LOCK_ASSERT(cc)      mtx_assert(&(cc)->cc_lock, MA_OWNED)
223
224static int timeout_cpu;
225
226static void     callout_cpu_init(struct callout_cpu *cc, int cpu);
227static void     softclock_call_cc(struct callout *c, struct callout_cpu *cc,
228#ifdef CALLOUT_PROFILING
229                    int *mpcalls, int *lockcalls, int *gcalls,
230#endif
231                    int direct);
232
233static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
234
235/**
236 * Locked by cc_lock:
237 *   cc_curr         - If a callout is in progress, it is cc_curr.
238 *                     If cc_curr is non-NULL, threads waiting in
239 *                     callout_drain() will be woken up as soon as the
240 *                     relevant callout completes.
241 *   cc_cancel       - Changing to 1 with both callout_lock and cc_lock held
242 *                     guarantees that the current callout will not run.
243 *                     The softclock() function sets this to 0 before it
244 *                     drops callout_lock to acquire c_lock, and it calls
245 *                     the handler only if curr_cancelled is still 0 after
246 *                     cc_lock is successfully acquired.
247 *   cc_waiting      - If a thread is waiting in callout_drain(), then
248 *                     callout_wait is nonzero.  Set only when
249 *                     cc_curr is non-NULL.
250 */
251
252/*
253 * Resets the execution entity tied to a specific callout cpu.
254 */
255static void
256cc_cce_cleanup(struct callout_cpu *cc, int direct)
257{
258
259        cc_exec_curr(cc, direct) = NULL;
260        cc_exec_cancel(cc, direct) = false;
261        cc_exec_waiting(cc, direct) = false;
262#ifdef SMP
263        cc_migration_cpu(cc, direct) = CPUBLOCK;
264        cc_migration_time(cc, direct) = 0;
265        cc_migration_prec(cc, direct) = 0;
266        cc_migration_func(cc, direct) = NULL;
267        cc_migration_arg(cc, direct) = NULL;
268#endif
269}
270
271/*
272 * Checks if migration is requested by a specific callout cpu.
273 */
274static int
275cc_cce_migrating(struct callout_cpu *cc, int direct)
276{
277
278#ifdef SMP
279        return (cc_migration_cpu(cc, direct) != CPUBLOCK);
280#else
281        return (0);
282#endif
283}
284
285/*
286 * Kernel low level callwheel initialization
287 * called on cpu0 during kernel startup.
288 */
289#ifdef __rtems__
290static void rtems_bsd_timeout_init_early(void *);
291
292static void
293rtems_bsd_callout_timer(rtems_id id, void *arg)
294{
295        rtems_status_code sc;
296
297        (void) arg;
298
299        sc = rtems_timer_reset(id);
300        BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
301
302        callout_process(sbinuptime());
303}
304
305static void
306rtems_bsd_timeout_init_late(void *unused)
307{
308        rtems_status_code sc;
309        rtems_id id;
310
311        (void) unused;
312
313        sc = rtems_timer_create(rtems_build_name('_', 'C', 'L', 'O'), &id);
314        BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
315
316        sc = rtems_timer_server_fire_after(id, 1, rtems_bsd_callout_timer, NULL);
317        BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
318}
319
320SYSINIT(rtems_bsd_timeout_early, SI_SUB_VM, SI_ORDER_FIRST,
321    rtems_bsd_timeout_init_early, NULL);
322
323SYSINIT(rtems_bsd_timeout_late, SI_SUB_LAST, SI_ORDER_FIRST,
324    rtems_bsd_timeout_init_late, NULL);
325
326static void
327rtems_bsd_timeout_init_early(void *dummy)
328#else /* __rtems__ */
329static void
330callout_callwheel_init(void *dummy)
331#endif /* __rtems__ */
332{
333        struct callout_cpu *cc;
334#ifdef __rtems__
335        (void) dummy;
336#endif /* __rtems__ */
337
338        /*
339         * Calculate the size of the callout wheel and the preallocated
340         * timeout() structures.
341         * XXX: Clip callout to result of previous function of maxusers
342         * maximum 384.  This is still huge, but acceptable.
343         */
344        memset(CC_CPU(0), 0, sizeof(cc_cpu));
345#ifndef __rtems__
346        ncallout = imin(16 + maxproc + maxfiles, 18508);
347        TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
348#endif /* __rtems__ */
349
350        /*
351         * Calculate callout wheel size, should be next power of two higher
352         * than 'ncallout'.
353         */
354        callwheelsize = 1 << fls(ncallout);
355        callwheelmask = callwheelsize - 1;
356
357#ifndef __rtems__
358        /*
359         * Fetch whether we're pinning the swi's or not.
360         */
361        TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
362        TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
363#endif /* __rtems__ */
364
365        /*
366         * Only cpu0 handles timeout(9) and receives a preallocation.
367         *
368         * XXX: Once all timeout(9) consumers are converted this can
369         * be removed.
370         */
371        timeout_cpu = PCPU_GET(cpuid);
372        cc = CC_CPU(timeout_cpu);
373        cc->cc_callout = malloc(ncallout * sizeof(struct callout),
374            M_CALLOUT, M_WAITOK);
375        callout_cpu_init(cc, timeout_cpu);
376}
377#ifndef __rtems__
378SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
379#endif /* __rtems__ */
380
381/*
382 * Initialize the per-cpu callout structures.
383 */
384static void
385callout_cpu_init(struct callout_cpu *cc, int cpu)
386{
387        struct callout *c;
388        int i;
389
390        mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
391        SLIST_INIT(&cc->cc_callfree);
392        cc->cc_inited = 1;
393        cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
394            M_CALLOUT, M_WAITOK);
395        for (i = 0; i < callwheelsize; i++)
396                LIST_INIT(&cc->cc_callwheel[i]);
397#ifndef __rtems__
398        TAILQ_INIT(&cc->cc_expireq);
399#endif /* __rtems__ */
400        cc->cc_firstevent = SBT_MAX;
401        for (i = 0; i < 2; i++)
402                cc_cce_cleanup(cc, i);
403        snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
404            "callwheel cpu %d", cpu);
405        if (cc->cc_callout == NULL)     /* Only cpu0 handles timeout(9) */
406                return;
407        for (i = 0; i < ncallout; i++) {
408                c = &cc->cc_callout[i];
409                callout_init(c, 0);
410                c->c_iflags = CALLOUT_LOCAL_ALLOC;
411                SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
412        }
413}
414
415#ifdef SMP
416/*
417 * Switches the cpu tied to a specific callout.
418 * The function expects a locked incoming callout cpu and returns with
419 * locked outcoming callout cpu.
420 */
421static struct callout_cpu *
422callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
423{
424        struct callout_cpu *new_cc;
425
426        MPASS(c != NULL && cc != NULL);
427        CC_LOCK_ASSERT(cc);
428
429        /*
430         * Avoid interrupts and preemption firing after the callout cpu
431         * is blocked in order to avoid deadlocks as the new thread
432         * may be willing to acquire the callout cpu lock.
433         */
434        c->c_cpu = CPUBLOCK;
435        spinlock_enter();
436        CC_UNLOCK(cc);
437        new_cc = CC_CPU(new_cpu);
438        CC_LOCK(new_cc);
439        spinlock_exit();
440        c->c_cpu = new_cpu;
441        return (new_cc);
442}
443#endif
444
445#ifndef __rtems__
446/*
447 * Start standard softclock thread.
448 */
449static void
450start_softclock(void *dummy)
451{
452        struct callout_cpu *cc;
453        char name[MAXCOMLEN];
454#ifdef SMP
455        int cpu;
456        struct intr_event *ie;
457#endif
458
459        cc = CC_CPU(timeout_cpu);
460        snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
461        if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
462            INTR_MPSAFE, &cc->cc_cookie))
463                panic("died while creating standard software ithreads");
464        if (pin_default_swi &&
465            (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
466                printf("%s: timeout clock couldn't be pinned to cpu %d\n",
467                    __func__,
468                    timeout_cpu);
469        }
470
471#ifdef SMP
472        CPU_FOREACH(cpu) {
473                if (cpu == timeout_cpu)
474                        continue;
475                cc = CC_CPU(cpu);
476                cc->cc_callout = NULL;  /* Only cpu0 handles timeout(9). */
477                callout_cpu_init(cc, cpu);
478                snprintf(name, sizeof(name), "clock (%d)", cpu);
479                ie = NULL;
480                if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
481                    INTR_MPSAFE, &cc->cc_cookie))
482                        panic("died while creating standard software ithreads");
483                if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
484                        printf("%s: per-cpu clock couldn't be pinned to "
485                            "cpu %d\n",
486                            __func__,
487                            cpu);
488                }
489        }
490#endif
491}
492SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
493#endif /* __rtems__ */
494
495#define CC_HASH_SHIFT   8
496
497static inline u_int
498callout_hash(sbintime_t sbt)
499{
500
501        return (sbt >> (32 - CC_HASH_SHIFT));
502}
503
504static inline u_int
505callout_get_bucket(sbintime_t sbt)
506{
507
508        return (callout_hash(sbt) & callwheelmask);
509}
510
511void
512callout_process(sbintime_t now)
513{
514#ifndef __rtems__
515        struct callout *tmp, *tmpn;
516#else /* __rtems__ */
517        struct callout *tmp;
518#endif /* __rtems__ */
519        struct callout_cpu *cc;
520        struct callout_list *sc;
521        sbintime_t first, last, max, tmp_max;
522        uint32_t lookahead;
523        u_int firstb, lastb, nowb;
524#ifdef CALLOUT_PROFILING
525        int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
526#endif
527
528        cc = CC_SELF();
529        mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
530
531        /* Compute the buckets of the last scan and present times. */
532        firstb = callout_hash(cc->cc_lastscan);
533        cc->cc_lastscan = now;
534        nowb = callout_hash(now);
535
536        /* Compute the last bucket and minimum time of the bucket after it. */
537        if (nowb == firstb)
538                lookahead = (SBT_1S / 16);
539        else if (nowb - firstb == 1)
540                lookahead = (SBT_1S / 8);
541        else
542                lookahead = (SBT_1S / 2);
543        first = last = now;
544        first += (lookahead / 2);
545        last += lookahead;
546        last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
547        lastb = callout_hash(last) - 1;
548        max = last;
549
550        /*
551         * Check if we wrapped around the entire wheel from the last scan.
552         * In case, we need to scan entirely the wheel for pending callouts.
553         */
554        if (lastb - firstb >= callwheelsize) {
555                lastb = firstb + callwheelsize - 1;
556                if (nowb - firstb >= callwheelsize)
557                        nowb = lastb;
558        }
559
560        /* Iterate callwheel from firstb to nowb and then up to lastb. */
561        do {
562                sc = &cc->cc_callwheel[firstb & callwheelmask];
563                tmp = LIST_FIRST(sc);
564                while (tmp != NULL) {
565                        /* Run the callout if present time within allowed. */
566                        if (tmp->c_time <= now) {
567#ifndef __rtems__
568                                /*
569                                 * Consumer told us the callout may be run
570                                 * directly from hardware interrupt context.
571                                 */
572                                if (tmp->c_iflags & CALLOUT_DIRECT) {
573#endif /* __rtems__ */
574#ifdef CALLOUT_PROFILING
575                                        ++depth_dir;
576#endif
577                                        cc_exec_next(cc) =
578                                            LIST_NEXT(tmp, c_links.le);
579                                        cc->cc_bucket = firstb & callwheelmask;
580                                        LIST_REMOVE(tmp, c_links.le);
581                                        softclock_call_cc(tmp, cc,
582#ifdef CALLOUT_PROFILING
583                                            &mpcalls_dir, &lockcalls_dir, NULL,
584#endif
585                                            1);
586                                        tmp = cc_exec_next(cc);
587                                        cc_exec_next(cc) = NULL;
588#ifndef __rtems__
589                                } else {
590                                        tmpn = LIST_NEXT(tmp, c_links.le);
591                                        LIST_REMOVE(tmp, c_links.le);
592                                        TAILQ_INSERT_TAIL(&cc->cc_expireq,
593                                            tmp, c_links.tqe);
594                                        tmp->c_iflags |= CALLOUT_PROCESSED;
595                                        tmp = tmpn;
596                                }
597#endif /* __rtems__ */
598                                continue;
599                        }
600                        /* Skip events from distant future. */
601                        if (tmp->c_time >= max)
602                                goto next;
603                        /*
604                         * Event minimal time is bigger than present maximal
605                         * time, so it cannot be aggregated.
606                         */
607                        if (tmp->c_time > last) {
608                                lastb = nowb;
609                                goto next;
610                        }
611                        /* Update first and last time, respecting this event. */
612                        if (tmp->c_time < first)
613                                first = tmp->c_time;
614                        tmp_max = tmp->c_time + tmp->c_precision;
615                        if (tmp_max < last)
616                                last = tmp_max;
617next:
618                        tmp = LIST_NEXT(tmp, c_links.le);
619                }
620                /* Proceed with the next bucket. */
621                firstb++;
622                /*
623                 * Stop if we looked after present time and found
624                 * some event we can't execute at now.
625                 * Stop if we looked far enough into the future.
626                 */
627        } while (((int)(firstb - lastb)) <= 0);
628        cc->cc_firstevent = last;
629#ifndef NO_EVENTTIMERS
630        cpu_new_callout(curcpu, last, first);
631#endif
632#ifdef CALLOUT_PROFILING
633        avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
634        avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
635        avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
636#endif
637        mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
638#ifndef __rtems__
639        /*
640         * swi_sched acquires the thread lock, so we don't want to call it
641         * with cc_lock held; incorrect locking order.
642         */
643        if (!TAILQ_EMPTY(&cc->cc_expireq))
644                swi_sched(cc->cc_cookie, 0);
645#endif /* __rtems__ */
646}
647
648static struct callout_cpu *
649callout_lock(struct callout *c)
650{
651        struct callout_cpu *cc;
652        int cpu;
653
654        for (;;) {
655                cpu = c->c_cpu;
656#ifdef SMP
657                if (cpu == CPUBLOCK) {
658                        while (c->c_cpu == CPUBLOCK)
659                                cpu_spinwait();
660                        continue;
661                }
662#endif
663                cc = CC_CPU(cpu);
664                CC_LOCK(cc);
665                if (cpu == c->c_cpu)
666                        break;
667                CC_UNLOCK(cc);
668        }
669        return (cc);
670}
671
672static void
673callout_cc_add(struct callout *c, struct callout_cpu *cc,
674    sbintime_t sbt, sbintime_t precision, void (*func)(void *),
675    void *arg, int cpu, int flags)
676{
677        int bucket;
678
679        CC_LOCK_ASSERT(cc);
680        if (sbt < cc->cc_lastscan)
681                sbt = cc->cc_lastscan;
682        c->c_arg = arg;
683        c->c_iflags |= CALLOUT_PENDING;
684        c->c_iflags &= ~CALLOUT_PROCESSED;
685        c->c_flags |= CALLOUT_ACTIVE;
686#ifndef __rtems__
687        if (flags & C_DIRECT_EXEC)
688                c->c_iflags |= CALLOUT_DIRECT;
689#endif /* __rtems__ */
690        c->c_func = func;
691        c->c_time = sbt;
692        c->c_precision = precision;
693        bucket = callout_get_bucket(c->c_time);
694        CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
695            c, (int)(c->c_precision >> 32),
696            (u_int)(c->c_precision & 0xffffffff));
697        LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
698        if (cc->cc_bucket == bucket)
699                cc_exec_next(cc) = c;
700#ifndef NO_EVENTTIMERS
701        /*
702         * Inform the eventtimers(4) subsystem there's a new callout
703         * that has been inserted, but only if really required.
704         */
705        if (SBT_MAX - c->c_time < c->c_precision)
706                c->c_precision = SBT_MAX - c->c_time;
707        sbt = c->c_time + c->c_precision;
708        if (sbt < cc->cc_firstevent) {
709                cc->cc_firstevent = sbt;
710                cpu_new_callout(cpu, sbt, c->c_time);
711        }
712#endif
713}
714
715static void
716callout_cc_del(struct callout *c, struct callout_cpu *cc)
717{
718
719        if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
720                return;
721        c->c_func = NULL;
722        SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
723}
724
725static void
726softclock_call_cc(struct callout *c, struct callout_cpu *cc,
727#ifdef CALLOUT_PROFILING
728    int *mpcalls, int *lockcalls, int *gcalls,
729#endif
730    int direct)
731{
732#ifndef __rtems__
733        struct rm_priotracker tracker;
734#endif /* __rtems__ */
735        void (*c_func)(void *);
736        void *c_arg;
737        struct lock_class *class;
738        struct lock_object *c_lock;
739        uintptr_t lock_status;
740        int c_iflags;
741#ifdef SMP
742        struct callout_cpu *new_cc;
743        void (*new_func)(void *);
744        void *new_arg;
745        int flags, new_cpu;
746        sbintime_t new_prec, new_time;
747#endif
748#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
749        sbintime_t sbt1, sbt2;
750        struct timespec ts2;
751        static sbintime_t maxdt = 2 * SBT_1MS;  /* 2 msec */
752        static timeout_t *lastfunc;
753#endif
754
755        KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
756            ("softclock_call_cc: pend %p %x", c, c->c_iflags));
757        KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
758            ("softclock_call_cc: act %p %x", c, c->c_flags));
759        class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
760        lock_status = 0;
761        if (c->c_flags & CALLOUT_SHAREDLOCK) {
762#ifndef __rtems__
763                if (class == &lock_class_rm)
764                        lock_status = (uintptr_t)&tracker;
765                else
766#endif /* __rtems__ */
767                        lock_status = 1;
768        }
769        c_lock = c->c_lock;
770        c_func = c->c_func;
771        c_arg = c->c_arg;
772        c_iflags = c->c_iflags;
773        if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
774                c->c_iflags = CALLOUT_LOCAL_ALLOC;
775        else
776                c->c_iflags &= ~CALLOUT_PENDING;
777       
778        cc_exec_curr(cc, direct) = c;
779        cc_exec_cancel(cc, direct) = false;
780        cc_exec_drain(cc, direct) = NULL;
781        CC_UNLOCK(cc);
782        if (c_lock != NULL) {
783                class->lc_lock(c_lock, lock_status);
784                /*
785                 * The callout may have been cancelled
786                 * while we switched locks.
787                 */
788                if (cc_exec_cancel(cc, direct)) {
789                        class->lc_unlock(c_lock);
790                        goto skip;
791                }
792                /* The callout cannot be stopped now. */
793                cc_exec_cancel(cc, direct) = true;
794                if (c_lock == &Giant.lock_object) {
795#ifdef CALLOUT_PROFILING
796                        (*gcalls)++;
797#endif
798                        CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
799                            c, c_func, c_arg);
800                } else {
801#ifdef CALLOUT_PROFILING
802                        (*lockcalls)++;
803#endif
804                        CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
805                            c, c_func, c_arg);
806                }
807        } else {
808#ifdef CALLOUT_PROFILING
809                (*mpcalls)++;
810#endif
811                CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
812                    c, c_func, c_arg);
813        }
814        KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
815            "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
816#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
817        sbt1 = sbinuptime();
818#endif
819#ifndef __rtems__
820        THREAD_NO_SLEEPING();
821        SDT_PROBE1(callout_execute, , , callout__start, c);
822#endif /* __rtems__ */
823        c_func(c_arg);
824#ifndef __rtems__
825        SDT_PROBE1(callout_execute, , , callout__end, c);
826        THREAD_SLEEPING_OK();
827#endif /* __rtems__ */
828#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
829        sbt2 = sbinuptime();
830        sbt2 -= sbt1;
831        if (sbt2 > maxdt) {
832                if (lastfunc != c_func || sbt2 > maxdt * 2) {
833                        ts2 = sbttots(sbt2);
834                        printf(
835                "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
836                            c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
837                }
838                maxdt = sbt2;
839                lastfunc = c_func;
840        }
841#endif
842        KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
843        CTR1(KTR_CALLOUT, "callout %p finished", c);
844        if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
845                class->lc_unlock(c_lock);
846skip:
847        CC_LOCK(cc);
848        KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
849        cc_exec_curr(cc, direct) = NULL;
850        if (cc_exec_drain(cc, direct)) {
851                void (*drain)(void *);
852               
853                drain = cc_exec_drain(cc, direct);
854                cc_exec_drain(cc, direct) = NULL;
855                CC_UNLOCK(cc);
856                drain(c_arg);
857                CC_LOCK(cc);
858        }
859        if (cc_exec_waiting(cc, direct)) {
860                /*
861                 * There is someone waiting for the
862                 * callout to complete.
863                 * If the callout was scheduled for
864                 * migration just cancel it.
865                 */
866                if (cc_cce_migrating(cc, direct)) {
867                        cc_cce_cleanup(cc, direct);
868
869                        /*
870                         * It should be assert here that the callout is not
871                         * destroyed but that is not easy.
872                         */
873                        c->c_iflags &= ~CALLOUT_DFRMIGRATION;
874                }
875                cc_exec_waiting(cc, direct) = false;
876                CC_UNLOCK(cc);
877                wakeup(&cc_exec_waiting(cc, direct));
878                CC_LOCK(cc);
879        } else if (cc_cce_migrating(cc, direct)) {
880                KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
881                    ("Migrating legacy callout %p", c));
882#ifdef SMP
883                /*
884                 * If the callout was scheduled for
885                 * migration just perform it now.
886                 */
887                new_cpu = cc_migration_cpu(cc, direct);
888                new_time = cc_migration_time(cc, direct);
889                new_prec = cc_migration_prec(cc, direct);
890                new_func = cc_migration_func(cc, direct);
891                new_arg = cc_migration_arg(cc, direct);
892                cc_cce_cleanup(cc, direct);
893
894                /*
895                 * It should be assert here that the callout is not destroyed
896                 * but that is not easy.
897                 *
898                 * As first thing, handle deferred callout stops.
899                 */
900                if (!callout_migrating(c)) {
901                        CTR3(KTR_CALLOUT,
902                             "deferred cancelled %p func %p arg %p",
903                             c, new_func, new_arg);
904                        callout_cc_del(c, cc);
905                        return;
906                }
907                c->c_iflags &= ~CALLOUT_DFRMIGRATION;
908
909                new_cc = callout_cpu_switch(c, cc, new_cpu);
910                flags = (direct) ? C_DIRECT_EXEC : 0;
911                callout_cc_add(c, new_cc, new_time, new_prec, new_func,
912                    new_arg, new_cpu, flags);
913                CC_UNLOCK(new_cc);
914                CC_LOCK(cc);
915#else
916                panic("migration should not happen");
917#endif
918        }
919        /*
920         * If the current callout is locally allocated (from
921         * timeout(9)) then put it on the freelist.
922         *
923         * Note: we need to check the cached copy of c_iflags because
924         * if it was not local, then it's not safe to deref the
925         * callout pointer.
926         */
927        KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
928            c->c_iflags == CALLOUT_LOCAL_ALLOC,
929            ("corrupted callout"));
930        if (c_iflags & CALLOUT_LOCAL_ALLOC)
931                callout_cc_del(c, cc);
932}
933
934/*
935 * The callout mechanism is based on the work of Adam M. Costello and
936 * George Varghese, published in a technical report entitled "Redesigning
937 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
938 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
939 * used in this implementation was published by G. Varghese and T. Lauck in
940 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
941 * the Efficient Implementation of a Timer Facility" in the Proceedings of
942 * the 11th ACM Annual Symposium on Operating Systems Principles,
943 * Austin, Texas Nov 1987.
944 */
945
946#ifndef __rtems__
947/*
948 * Software (low priority) clock interrupt.
949 * Run periodic events from timeout queue.
950 */
951void
952softclock(void *arg)
953{
954        struct callout_cpu *cc;
955        struct callout *c;
956#ifdef CALLOUT_PROFILING
957        int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
958#endif
959
960        cc = (struct callout_cpu *)arg;
961        CC_LOCK(cc);
962        while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
963                TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
964                softclock_call_cc(c, cc,
965#ifdef CALLOUT_PROFILING
966                    &mpcalls, &lockcalls, &gcalls,
967#endif
968                    0);
969#ifdef CALLOUT_PROFILING
970                ++depth;
971#endif
972        }
973#ifdef CALLOUT_PROFILING
974        avg_depth += (depth * 1000 - avg_depth) >> 8;
975        avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
976        avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
977        avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
978#endif
979        CC_UNLOCK(cc);
980}
981#endif /* __rtems__ */
982
983/*
984 * timeout --
985 *      Execute a function after a specified length of time.
986 *
987 * untimeout --
988 *      Cancel previous timeout function call.
989 *
990 * callout_handle_init --
991 *      Initialize a handle so that using it with untimeout is benign.
992 *
993 *      See AT&T BCI Driver Reference Manual for specification.  This
994 *      implementation differs from that one in that although an
995 *      identification value is returned from timeout, the original
996 *      arguments to timeout as well as the identifier are used to
997 *      identify entries for untimeout.
998 */
999struct callout_handle
1000timeout(timeout_t *ftn, void *arg, int to_ticks)
1001{
1002        struct callout_cpu *cc;
1003        struct callout *new;
1004        struct callout_handle handle;
1005
1006        cc = CC_CPU(timeout_cpu);
1007        CC_LOCK(cc);
1008        /* Fill in the next free callout structure. */
1009        new = SLIST_FIRST(&cc->cc_callfree);
1010        if (new == NULL)
1011                /* XXX Attempt to malloc first */
1012                panic("timeout table full");
1013        SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
1014        callout_reset(new, to_ticks, ftn, arg);
1015        handle.callout = new;
1016        CC_UNLOCK(cc);
1017
1018        return (handle);
1019}
1020
1021void
1022untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
1023{
1024        struct callout_cpu *cc;
1025
1026        /*
1027         * Check for a handle that was initialized
1028         * by callout_handle_init, but never used
1029         * for a real timeout.
1030         */
1031        if (handle.callout == NULL)
1032                return;
1033
1034        cc = callout_lock(handle.callout);
1035        if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
1036                callout_stop(handle.callout);
1037        CC_UNLOCK(cc);
1038}
1039
1040void
1041callout_handle_init(struct callout_handle *handle)
1042{
1043        handle->callout = NULL;
1044}
1045
1046void
1047callout_when(sbintime_t sbt, sbintime_t precision, int flags,
1048    sbintime_t *res, sbintime_t *prec_res)
1049{
1050        sbintime_t to_sbt, to_pr;
1051
1052        if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
1053                *res = sbt;
1054                *prec_res = precision;
1055                return;
1056        }
1057        if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt)
1058                sbt = tick_sbt;
1059        if ((flags & C_HARDCLOCK) != 0 ||
1060#ifdef NO_EVENTTIMERS
1061            sbt >= sbt_timethreshold) {
1062                to_sbt = getsbinuptime();
1063
1064                /* Add safety belt for the case of hz > 1000. */
1065                to_sbt += tc_tick_sbt - tick_sbt;
1066#else
1067            sbt >= sbt_tickthreshold) {
1068                /*
1069                 * Obtain the time of the last hardclock() call on
1070                 * this CPU directly from the kern_clocksource.c.
1071                 * This value is per-CPU, but it is equal for all
1072                 * active ones.
1073                 */
1074#ifdef __LP64__
1075                to_sbt = DPCPU_GET(hardclocktime);
1076#else
1077                spinlock_enter();
1078                to_sbt = DPCPU_GET(hardclocktime);
1079                spinlock_exit();
1080#endif
1081#endif
1082                if (cold && to_sbt == 0)
1083                        to_sbt = sbinuptime();
1084                if ((flags & C_HARDCLOCK) == 0)
1085                        to_sbt += tick_sbt;
1086        } else
1087                to_sbt = sbinuptime();
1088        if (SBT_MAX - to_sbt < sbt)
1089                to_sbt = SBT_MAX;
1090        else
1091                to_sbt += sbt;
1092        *res = to_sbt;
1093        to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
1094            sbt >> C_PRELGET(flags));
1095        *prec_res = to_pr > precision ? to_pr : precision;
1096}
1097
1098/*
1099 * New interface; clients allocate their own callout structures.
1100 *
1101 * callout_reset() - establish or change a timeout
1102 * callout_stop() - disestablish a timeout
1103 * callout_init() - initialize a callout structure so that it can
1104 *      safely be passed to callout_reset() and callout_stop()
1105 *
1106 * <sys/callout.h> defines three convenience macros:
1107 *
1108 * callout_active() - returns truth if callout has not been stopped,
1109 *      drained, or deactivated since the last time the callout was
1110 *      reset.
1111 * callout_pending() - returns truth if callout is still waiting for timeout
1112 * callout_deactivate() - marks the callout as having been serviced
1113 */
1114int
1115callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
1116    void (*ftn)(void *), void *arg, int cpu, int flags)
1117{
1118        sbintime_t to_sbt, precision;
1119        struct callout_cpu *cc;
1120#ifndef __rtems__
1121        int cancelled, direct;
1122#else /* __rtems__ */
1123        int cancelled;
1124#endif /* __rtems__ */
1125        int ignore_cpu=0;
1126
1127        cancelled = 0;
1128        if (cpu == -1) {
1129                ignore_cpu = 1;
1130        } else if ((cpu >= MAXCPU) ||
1131                   ((CC_CPU(cpu))->cc_inited == 0)) {
1132                /* Invalid CPU spec */
1133                panic("Invalid CPU in callout %d", cpu);
1134        }
1135        callout_when(sbt, prec, flags, &to_sbt, &precision);
1136
1137#ifndef __rtems__
1138        /*
1139         * This flag used to be added by callout_cc_add, but the
1140         * first time you call this we could end up with the
1141         * wrong direct flag if we don't do it before we add.
1142         */
1143        if (flags & C_DIRECT_EXEC) {
1144                direct = 1;
1145        } else {
1146                direct = 0;
1147        }
1148        KASSERT(!direct || c->c_lock == NULL,
1149            ("%s: direct callout %p has lock", __func__, c));
1150#endif /* __rtems__ */
1151        cc = callout_lock(c);
1152        /*
1153         * Don't allow migration of pre-allocated callouts lest they
1154         * become unbalanced or handle the case where the user does
1155         * not care.
1156         */
1157        if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
1158            ignore_cpu) {
1159                cpu = c->c_cpu;
1160        }
1161
1162        if (cc_exec_curr(cc, direct) == c) {
1163                /*
1164                 * We're being asked to reschedule a callout which is
1165                 * currently in progress.  If there is a lock then we
1166                 * can cancel the callout if it has not really started.
1167                 */
1168                if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
1169                        cancelled = cc_exec_cancel(cc, direct) = true;
1170                if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
1171                        /*
1172                         * Someone has called callout_drain to kill this
1173                         * callout.  Don't reschedule.
1174                         */
1175                        CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1176                            cancelled ? "cancelled" : "failed to cancel",
1177                            c, c->c_func, c->c_arg);
1178                        CC_UNLOCK(cc);
1179                        return (cancelled);
1180                }
1181#ifdef SMP
1182                if (callout_migrating(c)) {
1183                        /*
1184                         * This only occurs when a second callout_reset_sbt_on
1185                         * is made after a previous one moved it into
1186                         * deferred migration (below). Note we do *not* change
1187                         * the prev_cpu even though the previous target may
1188                         * be different.
1189                         */
1190                        cc_migration_cpu(cc, direct) = cpu;
1191                        cc_migration_time(cc, direct) = to_sbt;
1192                        cc_migration_prec(cc, direct) = precision;
1193                        cc_migration_func(cc, direct) = ftn;
1194                        cc_migration_arg(cc, direct) = arg;
1195                        cancelled = 1;
1196                        CC_UNLOCK(cc);
1197                        return (cancelled);
1198                }
1199#endif
1200        }
1201        if (c->c_iflags & CALLOUT_PENDING) {
1202#ifndef __rtems__
1203                if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1204#endif /* __rtems__ */
1205                        if (cc_exec_next(cc) == c)
1206                                cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1207                        LIST_REMOVE(c, c_links.le);
1208#ifndef __rtems__
1209                } else {
1210                        TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1211                }
1212#endif /* __rtems__ */
1213                cancelled = 1;
1214                c->c_iflags &= ~ CALLOUT_PENDING;
1215                c->c_flags &= ~ CALLOUT_ACTIVE;
1216        }
1217
1218#ifdef SMP
1219        /*
1220         * If the callout must migrate try to perform it immediately.
1221         * If the callout is currently running, just defer the migration
1222         * to a more appropriate moment.
1223         */
1224        if (c->c_cpu != cpu) {
1225                if (cc_exec_curr(cc, direct) == c) {
1226                        /*
1227                         * Pending will have been removed since we are
1228                         * actually executing the callout on another
1229                         * CPU. That callout should be waiting on the
1230                         * lock the caller holds. If we set both
1231                         * active/and/pending after we return and the
1232                         * lock on the executing callout proceeds, it
1233                         * will then see pending is true and return.
1234                         * At the return from the actual callout execution
1235                         * the migration will occur in softclock_call_cc
1236                         * and this new callout will be placed on the
1237                         * new CPU via a call to callout_cpu_switch() which
1238                         * will get the lock on the right CPU followed
1239                         * by a call callout_cc_add() which will add it there.
1240                         * (see above in softclock_call_cc()).
1241                         */
1242                        cc_migration_cpu(cc, direct) = cpu;
1243                        cc_migration_time(cc, direct) = to_sbt;
1244                        cc_migration_prec(cc, direct) = precision;
1245                        cc_migration_func(cc, direct) = ftn;
1246                        cc_migration_arg(cc, direct) = arg;
1247                        c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1248                        c->c_flags |= CALLOUT_ACTIVE;
1249                        CTR6(KTR_CALLOUT,
1250                    "migration of %p func %p arg %p in %d.%08x to %u deferred",
1251                            c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1252                            (u_int)(to_sbt & 0xffffffff), cpu);
1253                        CC_UNLOCK(cc);
1254                        return (cancelled);
1255                }
1256                cc = callout_cpu_switch(c, cc, cpu);
1257        }
1258#endif
1259
1260        callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1261        CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1262            cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1263            (u_int)(to_sbt & 0xffffffff));
1264        CC_UNLOCK(cc);
1265
1266        return (cancelled);
1267}
1268
1269/*
1270 * Common idioms that can be optimized in the future.
1271 */
1272int
1273callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1274{
1275        return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1276}
1277
1278int
1279callout_schedule(struct callout *c, int to_ticks)
1280{
1281        return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1282}
1283
1284int
1285_callout_stop_safe(struct callout *c, int flags, void (*drain)(void *))
1286{
1287        struct callout_cpu *cc, *old_cc;
1288        struct lock_class *class;
1289#ifndef __rtems__
1290        int direct, sq_locked, use_lock;
1291#else /* __rtems__ */
1292        int sq_locked, use_lock;
1293#endif /* __rtems__ */
1294        int cancelled, not_on_a_list;
1295#ifdef __rtems__
1296        (void)old_cc;
1297        (void)sq_locked;
1298#endif /* __rtems__ */
1299
1300        if ((flags & CS_DRAIN) != 0)
1301                WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1302                    "calling %s", __func__);
1303
1304        /*
1305         * Some old subsystems don't hold Giant while running a callout_stop(),
1306         * so just discard this check for the moment.
1307         */
1308        if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1309                if (c->c_lock == &Giant.lock_object)
1310                        use_lock = mtx_owned(&Giant);
1311                else {
1312                        use_lock = 1;
1313                        class = LOCK_CLASS(c->c_lock);
1314                        class->lc_assert(c->c_lock, LA_XLOCKED);
1315                }
1316        } else
1317                use_lock = 0;
1318#ifndef __rtems__
1319        if (c->c_iflags & CALLOUT_DIRECT) {
1320                direct = 1;
1321        } else {
1322                direct = 0;
1323        }
1324
1325        sq_locked = 0;
1326        old_cc = NULL;
1327again:
1328#endif /* __rtems__ */
1329        cc = callout_lock(c);
1330
1331        if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1332            (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1333            ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1334                /*
1335                 * Special case where this slipped in while we
1336                 * were migrating *as* the callout is about to
1337                 * execute. The caller probably holds the lock
1338                 * the callout wants.
1339                 *
1340                 * Get rid of the migration first. Then set
1341                 * the flag that tells this code *not* to
1342                 * try to remove it from any lists (its not
1343                 * on one yet). When the callout wheel runs,
1344                 * it will ignore this callout.
1345                 */
1346                c->c_iflags &= ~CALLOUT_PENDING;
1347                c->c_flags &= ~CALLOUT_ACTIVE;
1348                not_on_a_list = 1;
1349        } else {
1350                not_on_a_list = 0;
1351        }
1352
1353#ifndef __rtems__
1354        /*
1355         * If the callout was migrating while the callout cpu lock was
1356         * dropped,  just drop the sleepqueue lock and check the states
1357         * again.
1358         */
1359        if (sq_locked != 0 && cc != old_cc) {
1360#ifdef SMP
1361                CC_UNLOCK(cc);
1362                sleepq_release(&cc_exec_waiting(old_cc, direct));
1363                sq_locked = 0;
1364                old_cc = NULL;
1365                goto again;
1366#else
1367                panic("migration should not happen");
1368#endif
1369        }
1370#endif /* __rtems__ */
1371
1372        /*
1373         * If the callout is running, try to stop it or drain it.
1374         */
1375        if (cc_exec_curr(cc, direct) == c) {
1376                /*
1377                 * Succeed we to stop it or not, we must clear the
1378                 * active flag - this is what API users expect.  If we're
1379                 * draining and the callout is currently executing, first wait
1380                 * until it finishes.
1381                 */
1382                if ((flags & CS_DRAIN) == 0)
1383                        c->c_flags &= ~CALLOUT_ACTIVE;
1384
1385                if ((flags & CS_DRAIN) != 0) {
1386                        /*
1387                         * The current callout is running (or just
1388                         * about to run) and blocking is allowed, so
1389                         * just wait for the current invocation to
1390                         * finish.
1391                         */
1392                        while (cc_exec_curr(cc, direct) == c) {
1393#ifndef __rtems__
1394
1395                                /*
1396                                 * Use direct calls to sleepqueue interface
1397                                 * instead of cv/msleep in order to avoid
1398                                 * a LOR between cc_lock and sleepqueue
1399                                 * chain spinlocks.  This piece of code
1400                                 * emulates a msleep_spin() call actually.
1401                                 *
1402                                 * If we already have the sleepqueue chain
1403                                 * locked, then we can safely block.  If we
1404                                 * don't already have it locked, however,
1405                                 * we have to drop the cc_lock to lock
1406                                 * it.  This opens several races, so we
1407                                 * restart at the beginning once we have
1408                                 * both locks.  If nothing has changed, then
1409                                 * we will end up back here with sq_locked
1410                                 * set.
1411                                 */
1412                                if (!sq_locked) {
1413                                        CC_UNLOCK(cc);
1414                                        sleepq_lock(
1415                                            &cc_exec_waiting(cc, direct));
1416                                        sq_locked = 1;
1417                                        old_cc = cc;
1418                                        goto again;
1419                                }
1420
1421                                /*
1422                                 * Migration could be cancelled here, but
1423                                 * as long as it is still not sure when it
1424                                 * will be packed up, just let softclock()
1425                                 * take care of it.
1426                                 */
1427                                cc_exec_waiting(cc, direct) = true;
1428                                DROP_GIANT();
1429                                CC_UNLOCK(cc);
1430                                sleepq_add(
1431                                    &cc_exec_waiting(cc, direct),
1432                                    &cc->cc_lock.lock_object, "codrain",
1433                                    SLEEPQ_SLEEP, 0);
1434                                sleepq_wait(
1435                                    &cc_exec_waiting(cc, direct),
1436                                             0);
1437                                sq_locked = 0;
1438                                old_cc = NULL;
1439
1440                                /* Reacquire locks previously released. */
1441                                PICKUP_GIANT();
1442                                CC_LOCK(cc);
1443#else /* __rtems__ */
1444                                /*
1445                                 * On RTEMS the LOR problem above does not
1446                                 * exist since here we do not use
1447                                 * sleepq_set_timeout() and instead use the
1448                                 * RTEMS watchdog.
1449                                 */
1450                                cc_exec_waiting(cc, direct) = true;
1451                                msleep_spin(&cc_exec_waiting(cc, direct),
1452                                    &cc->cc_lock, "codrain", 0);
1453#endif /* __rtems__ */
1454                        }
1455                        c->c_flags &= ~CALLOUT_ACTIVE;
1456                } else if (use_lock &&
1457                           !cc_exec_cancel(cc, direct) && (drain == NULL)) {
1458                       
1459                        /*
1460                         * The current callout is waiting for its
1461                         * lock which we hold.  Cancel the callout
1462                         * and return.  After our caller drops the
1463                         * lock, the callout will be skipped in
1464                         * softclock(). This *only* works with a
1465                         * callout_stop() *not* callout_drain() or
1466                         * callout_async_drain().
1467                         */
1468                        cc_exec_cancel(cc, direct) = true;
1469                        CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1470                            c, c->c_func, c->c_arg);
1471                        KASSERT(!cc_cce_migrating(cc, direct),
1472                            ("callout wrongly scheduled for migration"));
1473                        if (callout_migrating(c)) {
1474                                c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1475#ifdef SMP
1476                                cc_migration_cpu(cc, direct) = CPUBLOCK;
1477                                cc_migration_time(cc, direct) = 0;
1478                                cc_migration_prec(cc, direct) = 0;
1479                                cc_migration_func(cc, direct) = NULL;
1480                                cc_migration_arg(cc, direct) = NULL;
1481#endif
1482                        }
1483                        CC_UNLOCK(cc);
1484#ifndef __rtems__
1485                        KASSERT(!sq_locked, ("sleepqueue chain locked"));
1486#endif /* __rtems__ */
1487                        return (1);
1488                } else if (callout_migrating(c)) {
1489                        /*
1490                         * The callout is currently being serviced
1491                         * and the "next" callout is scheduled at
1492                         * its completion with a migration. We remove
1493                         * the migration flag so it *won't* get rescheduled,
1494                         * but we can't stop the one thats running so
1495                         * we return 0.
1496                         */
1497                        c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1498#ifdef SMP
1499                        /*
1500                         * We can't call cc_cce_cleanup here since
1501                         * if we do it will remove .ce_curr and
1502                         * its still running. This will prevent a
1503                         * reschedule of the callout when the
1504                         * execution completes.
1505                         */
1506                        cc_migration_cpu(cc, direct) = CPUBLOCK;
1507                        cc_migration_time(cc, direct) = 0;
1508                        cc_migration_prec(cc, direct) = 0;
1509                        cc_migration_func(cc, direct) = NULL;
1510                        cc_migration_arg(cc, direct) = NULL;
1511#endif
1512                        CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1513                            c, c->c_func, c->c_arg);
1514                        if (drain) {
1515                                cc_exec_drain(cc, direct) = drain;
1516                        }
1517                        CC_UNLOCK(cc);
1518                        return ((flags & CS_EXECUTING) != 0);
1519                }
1520                CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1521                    c, c->c_func, c->c_arg);
1522                if (drain) {
1523                        cc_exec_drain(cc, direct) = drain;
1524                }
1525#ifndef __rtems__
1526                KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1527#endif /* __rtems__ */
1528                cancelled = ((flags & CS_EXECUTING) != 0);
1529        } else
1530                cancelled = 1;
1531
1532#ifndef __rtems__
1533        if (sq_locked)
1534                sleepq_release(&cc_exec_waiting(cc, direct));
1535#endif /* __rtems__ */
1536
1537        if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1538                CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1539                    c, c->c_func, c->c_arg);
1540                /*
1541                 * For not scheduled and not executing callout return
1542                 * negative value.
1543                 */
1544                if (cc_exec_curr(cc, direct) != c)
1545                        cancelled = -1;
1546                CC_UNLOCK(cc);
1547                return (cancelled);
1548        }
1549
1550        c->c_iflags &= ~CALLOUT_PENDING;
1551        c->c_flags &= ~CALLOUT_ACTIVE;
1552
1553        CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1554            c, c->c_func, c->c_arg);
1555        if (not_on_a_list == 0) {
1556#ifndef __rtems__
1557                if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1558#endif /* __rtems__ */
1559                        if (cc_exec_next(cc) == c)
1560                                cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1561                        LIST_REMOVE(c, c_links.le);
1562#ifndef __rtems__
1563                } else {
1564                        TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1565                }
1566#endif /* __rtems__ */
1567        }
1568        callout_cc_del(c, cc);
1569        CC_UNLOCK(cc);
1570        return (cancelled);
1571}
1572
1573void
1574callout_init(struct callout *c, int mpsafe)
1575{
1576        bzero(c, sizeof *c);
1577        if (mpsafe) {
1578                c->c_lock = NULL;
1579                c->c_iflags = CALLOUT_RETURNUNLOCKED;
1580        } else {
1581                c->c_lock = &Giant.lock_object;
1582                c->c_iflags = 0;
1583        }
1584        c->c_cpu = timeout_cpu;
1585}
1586
1587void
1588_callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1589{
1590        bzero(c, sizeof *c);
1591        c->c_lock = lock;
1592        KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1593            ("callout_init_lock: bad flags %d", flags));
1594        KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1595            ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1596        KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1597            (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1598            __func__));
1599        c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1600        c->c_cpu = timeout_cpu;
1601}
1602
1603#ifdef APM_FIXUP_CALLTODO
1604/*
1605 * Adjust the kernel calltodo timeout list.  This routine is used after
1606 * an APM resume to recalculate the calltodo timer list values with the
1607 * number of hz's we have been sleeping.  The next hardclock() will detect
1608 * that there are fired timers and run softclock() to execute them.
1609 *
1610 * Please note, I have not done an exhaustive analysis of what code this
1611 * might break.  I am motivated to have my select()'s and alarm()'s that
1612 * have expired during suspend firing upon resume so that the applications
1613 * which set the timer can do the maintanence the timer was for as close
1614 * as possible to the originally intended time.  Testing this code for a
1615 * week showed that resuming from a suspend resulted in 22 to 25 timers
1616 * firing, which seemed independent on whether the suspend was 2 hours or
1617 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
1618 */
1619void
1620adjust_timeout_calltodo(struct timeval *time_change)
1621{
1622        register struct callout *p;
1623        unsigned long delta_ticks;
1624
1625        /*
1626         * How many ticks were we asleep?
1627         * (stolen from tvtohz()).
1628         */
1629
1630        /* Don't do anything */
1631        if (time_change->tv_sec < 0)
1632                return;
1633        else if (time_change->tv_sec <= LONG_MAX / 1000000)
1634                delta_ticks = howmany(time_change->tv_sec * 1000000 +
1635                    time_change->tv_usec, tick) + 1;
1636        else if (time_change->tv_sec <= LONG_MAX / hz)
1637                delta_ticks = time_change->tv_sec * hz +
1638                    howmany(time_change->tv_usec, tick) + 1;
1639        else
1640                delta_ticks = LONG_MAX;
1641
1642        if (delta_ticks > INT_MAX)
1643                delta_ticks = INT_MAX;
1644
1645        /*
1646         * Now rip through the timer calltodo list looking for timers
1647         * to expire.
1648         */
1649
1650        /* don't collide with softclock() */
1651        CC_LOCK(cc);
1652        for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1653                p->c_time -= delta_ticks;
1654
1655                /* Break if the timer had more time on it than delta_ticks */
1656                if (p->c_time > 0)
1657                        break;
1658
1659                /* take back the ticks the timer didn't use (p->c_time <= 0) */
1660                delta_ticks = -p->c_time;
1661        }
1662        CC_UNLOCK(cc);
1663
1664        return;
1665}
1666#endif /* APM_FIXUP_CALLTODO */
1667
1668static int
1669flssbt(sbintime_t sbt)
1670{
1671
1672        sbt += (uint64_t)sbt >> 1;
1673        if (sizeof(long) >= sizeof(sbintime_t))
1674                return (flsl(sbt));
1675        if (sbt >= SBT_1S)
1676                return (flsl(((uint64_t)sbt) >> 32) + 32);
1677        return (flsl(sbt));
1678}
1679
1680/*
1681 * Dump immediate statistic snapshot of the scheduled callouts.
1682 */
1683static int
1684sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
1685{
1686        struct callout *tmp;
1687        struct callout_cpu *cc;
1688        struct callout_list *sc;
1689        sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
1690        int ct[64], cpr[64], ccpbk[32];
1691        int error, val, i, count, tcum, pcum, maxc, c, medc;
1692#ifdef SMP
1693        int cpu;
1694#endif
1695
1696        val = 0;
1697        error = sysctl_handle_int(oidp, &val, 0, req);
1698        if (error != 0 || req->newptr == NULL)
1699                return (error);
1700        count = maxc = 0;
1701        st = spr = maxt = maxpr = 0;
1702        bzero(ccpbk, sizeof(ccpbk));
1703        bzero(ct, sizeof(ct));
1704        bzero(cpr, sizeof(cpr));
1705        now = sbinuptime();
1706#ifdef SMP
1707        CPU_FOREACH(cpu) {
1708                cc = CC_CPU(cpu);
1709#else
1710                cc = CC_CPU(timeout_cpu);
1711#endif
1712                CC_LOCK(cc);
1713                for (i = 0; i < callwheelsize; i++) {
1714                        sc = &cc->cc_callwheel[i];
1715                        c = 0;
1716                        LIST_FOREACH(tmp, sc, c_links.le) {
1717                                c++;
1718                                t = tmp->c_time - now;
1719                                if (t < 0)
1720                                        t = 0;
1721                                st += t / SBT_1US;
1722                                spr += tmp->c_precision / SBT_1US;
1723                                if (t > maxt)
1724                                        maxt = t;
1725                                if (tmp->c_precision > maxpr)
1726                                        maxpr = tmp->c_precision;
1727                                ct[flssbt(t)]++;
1728                                cpr[flssbt(tmp->c_precision)]++;
1729                        }
1730                        if (c > maxc)
1731                                maxc = c;
1732                        ccpbk[fls(c + c / 2)]++;
1733                        count += c;
1734                }
1735                CC_UNLOCK(cc);
1736#ifdef SMP
1737        }
1738#endif
1739
1740        for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
1741                tcum += ct[i];
1742        medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1743        for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
1744                pcum += cpr[i];
1745        medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1746        for (i = 0, c = 0; i < 32 && c < count / 2; i++)
1747                c += ccpbk[i];
1748        medc = (i >= 2) ? (1 << (i - 2)) : 0;
1749
1750        printf("Scheduled callouts statistic snapshot:\n");
1751        printf("  Callouts: %6d  Buckets: %6d*%-3d  Bucket size: 0.%06ds\n",
1752            count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
1753        printf("  C/Bk: med %5d         avg %6d.%06jd  max %6d\n",
1754            medc,
1755            count / callwheelsize / mp_ncpus,
1756            (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
1757            maxc);
1758        printf("  Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1759            medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1760            (st / count) / 1000000, (st / count) % 1000000,
1761            maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1762        printf("  Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1763            medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1764            (spr / count) / 1000000, (spr / count) % 1000000,
1765            maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1766        printf("  Distribution:       \tbuckets\t   time\t   tcum\t"
1767            "   prec\t   pcum\n");
1768        for (i = 0, tcum = pcum = 0; i < 64; i++) {
1769                if (ct[i] == 0 && cpr[i] == 0)
1770                        continue;
1771                t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1772                tcum += ct[i];
1773                pcum += cpr[i];
1774                printf("  %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1775                    t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1776                    i - 1 - (32 - CC_HASH_SHIFT),
1777                    ct[i], tcum, cpr[i], pcum);
1778        }
1779        return (error);
1780}
1781SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
1782    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1783    0, 0, sysctl_kern_callout_stat, "I",
1784    "Dump immediate statistic snapshot of the scheduled callouts");
1785
1786#ifdef DDB
1787static void
1788_show_callout(struct callout *c)
1789{
1790
1791        db_printf("callout %p\n", c);
1792#define C_DB_PRINTF(f, e)       db_printf("   %s = " f "\n", #e, c->e);
1793        db_printf("   &c_links = %p\n", &(c->c_links));
1794        C_DB_PRINTF("%" PRId64, c_time);
1795        C_DB_PRINTF("%" PRId64, c_precision);
1796        C_DB_PRINTF("%p",       c_arg);
1797        C_DB_PRINTF("%p",       c_func);
1798        C_DB_PRINTF("%p",       c_lock);
1799        C_DB_PRINTF("%#x",      c_flags);
1800        C_DB_PRINTF("%#x",      c_iflags);
1801        C_DB_PRINTF("%d",       c_cpu);
1802#undef  C_DB_PRINTF
1803}
1804
1805DB_SHOW_COMMAND(callout, db_show_callout)
1806{
1807
1808        if (!have_addr) {
1809                db_printf("usage: show callout <struct callout *>\n");
1810                return;
1811        }
1812
1813        _show_callout((struct callout *)addr);
1814}
1815#endif /* DDB */
Note: See TracBrowser for help on using the repository browser.