1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 1982, 1986, 1991, 1993 |
---|
5 | * The Regents of the University of California. All rights reserved. |
---|
6 | * (c) UNIX System Laboratories, Inc. |
---|
7 | * All or some portions of this file are derived from material licensed |
---|
8 | * to the University of California by American Telephone and Telegraph |
---|
9 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
---|
10 | * the permission of UNIX System Laboratories, Inc. |
---|
11 | * |
---|
12 | * Redistribution and use in source and binary forms, with or without |
---|
13 | * modification, are permitted provided that the following conditions |
---|
14 | * are met: |
---|
15 | * 1. Redistributions of source code must retain the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer. |
---|
17 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
18 | * notice, this list of conditions and the following disclaimer in the |
---|
19 | * documentation and/or other materials provided with the distribution. |
---|
20 | * 3. Neither the name of the University nor the names of its contributors |
---|
21 | * may be used to endorse or promote products derived from this software |
---|
22 | * without specific prior written permission. |
---|
23 | * |
---|
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
---|
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
---|
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
34 | * SUCH DAMAGE. |
---|
35 | * |
---|
36 | * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 |
---|
37 | */ |
---|
38 | |
---|
39 | #include <sys/cdefs.h> |
---|
40 | __FBSDID("$FreeBSD$"); |
---|
41 | |
---|
42 | #include <rtems/bsd/local/opt_callout_profiling.h> |
---|
43 | #include <rtems/bsd/local/opt_ddb.h> |
---|
44 | #if defined(__arm__) || defined(__rtems__) |
---|
45 | #include <rtems/bsd/local/opt_timer.h> |
---|
46 | #endif |
---|
47 | #include <rtems/bsd/local/opt_rss.h> |
---|
48 | |
---|
49 | #include <sys/param.h> |
---|
50 | #include <sys/systm.h> |
---|
51 | #include <sys/bus.h> |
---|
52 | #include <sys/callout.h> |
---|
53 | #include <sys/file.h> |
---|
54 | #include <sys/interrupt.h> |
---|
55 | #include <sys/kernel.h> |
---|
56 | #include <sys/ktr.h> |
---|
57 | #include <sys/lock.h> |
---|
58 | #include <sys/malloc.h> |
---|
59 | #include <sys/mutex.h> |
---|
60 | #include <sys/proc.h> |
---|
61 | #include <sys/sdt.h> |
---|
62 | #include <sys/sleepqueue.h> |
---|
63 | #include <sys/sysctl.h> |
---|
64 | #include <sys/smp.h> |
---|
65 | |
---|
66 | #ifdef DDB |
---|
67 | #include <ddb/ddb.h> |
---|
68 | #include <machine/_inttypes.h> |
---|
69 | #endif |
---|
70 | |
---|
71 | #ifdef SMP |
---|
72 | #include <machine/cpu.h> |
---|
73 | #endif |
---|
74 | |
---|
75 | #ifndef NO_EVENTTIMERS |
---|
76 | DPCPU_DECLARE(sbintime_t, hardclocktime); |
---|
77 | #endif |
---|
78 | |
---|
79 | SDT_PROVIDER_DEFINE(callout_execute); |
---|
80 | SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); |
---|
81 | SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); |
---|
82 | |
---|
83 | #ifdef CALLOUT_PROFILING |
---|
84 | static int avg_depth; |
---|
85 | SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, |
---|
86 | "Average number of items examined per softclock call. Units = 1/1000"); |
---|
87 | static int avg_gcalls; |
---|
88 | SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, |
---|
89 | "Average number of Giant callouts made per softclock call. Units = 1/1000"); |
---|
90 | static int avg_lockcalls; |
---|
91 | SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, |
---|
92 | "Average number of lock callouts made per softclock call. Units = 1/1000"); |
---|
93 | static int avg_mpcalls; |
---|
94 | SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, |
---|
95 | "Average number of MP callouts made per softclock call. Units = 1/1000"); |
---|
96 | static int avg_depth_dir; |
---|
97 | SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, |
---|
98 | "Average number of direct callouts examined per callout_process call. " |
---|
99 | "Units = 1/1000"); |
---|
100 | static int avg_lockcalls_dir; |
---|
101 | SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, |
---|
102 | &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " |
---|
103 | "callout_process call. Units = 1/1000"); |
---|
104 | static int avg_mpcalls_dir; |
---|
105 | SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, |
---|
106 | 0, "Average number of MP direct callouts made per callout_process call. " |
---|
107 | "Units = 1/1000"); |
---|
108 | #endif |
---|
109 | |
---|
110 | #ifndef __rtems__ |
---|
111 | static int ncallout; |
---|
112 | SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0, |
---|
113 | "Number of entries in callwheel and size of timeout() preallocation"); |
---|
114 | #else /* __rtems__ */ |
---|
115 | #define ncallout 16 |
---|
116 | #endif /* __rtems__ */ |
---|
117 | |
---|
118 | #ifdef RSS |
---|
119 | static int pin_default_swi = 1; |
---|
120 | static int pin_pcpu_swi = 1; |
---|
121 | #else |
---|
122 | static int pin_default_swi = 0; |
---|
123 | static int pin_pcpu_swi = 0; |
---|
124 | #endif |
---|
125 | |
---|
126 | SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi, |
---|
127 | 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)"); |
---|
128 | SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi, |
---|
129 | 0, "Pin the per-CPU swis (except PCPU 0, which is also default"); |
---|
130 | |
---|
131 | /* |
---|
132 | * TODO: |
---|
133 | * allocate more timeout table slots when table overflows. |
---|
134 | */ |
---|
135 | u_int callwheelsize, callwheelmask; |
---|
136 | |
---|
137 | /* |
---|
138 | * The callout cpu exec entities represent informations necessary for |
---|
139 | * describing the state of callouts currently running on the CPU and the ones |
---|
140 | * necessary for migrating callouts to the new callout cpu. In particular, |
---|
141 | * the first entry of the array cc_exec_entity holds informations for callout |
---|
142 | * running in SWI thread context, while the second one holds informations |
---|
143 | * for callout running directly from hardware interrupt context. |
---|
144 | * The cached informations are very important for deferring migration when |
---|
145 | * the migrating callout is already running. |
---|
146 | */ |
---|
147 | struct cc_exec { |
---|
148 | struct callout *cc_curr; |
---|
149 | void (*cc_drain)(void *); |
---|
150 | #ifdef SMP |
---|
151 | void (*ce_migration_func)(void *); |
---|
152 | void *ce_migration_arg; |
---|
153 | int ce_migration_cpu; |
---|
154 | sbintime_t ce_migration_time; |
---|
155 | sbintime_t ce_migration_prec; |
---|
156 | #endif |
---|
157 | bool cc_cancel; |
---|
158 | bool cc_waiting; |
---|
159 | }; |
---|
160 | |
---|
161 | /* |
---|
162 | * There is one struct callout_cpu per cpu, holding all relevant |
---|
163 | * state for the callout processing thread on the individual CPU. |
---|
164 | */ |
---|
165 | struct callout_cpu { |
---|
166 | struct mtx_padalign cc_lock; |
---|
167 | struct cc_exec cc_exec_entity[2]; |
---|
168 | struct callout *cc_next; |
---|
169 | struct callout *cc_callout; |
---|
170 | struct callout_list *cc_callwheel; |
---|
171 | #ifndef __rtems__ |
---|
172 | struct callout_tailq cc_expireq; |
---|
173 | #endif /* __rtems__ */ |
---|
174 | struct callout_slist cc_callfree; |
---|
175 | sbintime_t cc_firstevent; |
---|
176 | sbintime_t cc_lastscan; |
---|
177 | void *cc_cookie; |
---|
178 | u_int cc_bucket; |
---|
179 | u_int cc_inited; |
---|
180 | char cc_ktr_event_name[20]; |
---|
181 | }; |
---|
182 | |
---|
183 | #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) |
---|
184 | |
---|
185 | #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr |
---|
186 | #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain |
---|
187 | #define cc_exec_next(cc) cc->cc_next |
---|
188 | #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel |
---|
189 | #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting |
---|
190 | #ifdef SMP |
---|
191 | #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func |
---|
192 | #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg |
---|
193 | #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu |
---|
194 | #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time |
---|
195 | #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec |
---|
196 | |
---|
197 | struct callout_cpu cc_cpu[MAXCPU]; |
---|
198 | #define CPUBLOCK MAXCPU |
---|
199 | #define CC_CPU(cpu) (&cc_cpu[(cpu)]) |
---|
200 | #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) |
---|
201 | #else |
---|
202 | struct callout_cpu cc_cpu; |
---|
203 | #define CC_CPU(cpu) &cc_cpu |
---|
204 | #define CC_SELF() &cc_cpu |
---|
205 | #endif |
---|
206 | #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) |
---|
207 | #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) |
---|
208 | #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) |
---|
209 | |
---|
210 | static int timeout_cpu; |
---|
211 | |
---|
212 | static void callout_cpu_init(struct callout_cpu *cc, int cpu); |
---|
213 | static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, |
---|
214 | #ifdef CALLOUT_PROFILING |
---|
215 | int *mpcalls, int *lockcalls, int *gcalls, |
---|
216 | #endif |
---|
217 | int direct); |
---|
218 | |
---|
219 | static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); |
---|
220 | |
---|
221 | /** |
---|
222 | * Locked by cc_lock: |
---|
223 | * cc_curr - If a callout is in progress, it is cc_curr. |
---|
224 | * If cc_curr is non-NULL, threads waiting in |
---|
225 | * callout_drain() will be woken up as soon as the |
---|
226 | * relevant callout completes. |
---|
227 | * cc_cancel - Changing to 1 with both callout_lock and cc_lock held |
---|
228 | * guarantees that the current callout will not run. |
---|
229 | * The softclock() function sets this to 0 before it |
---|
230 | * drops callout_lock to acquire c_lock, and it calls |
---|
231 | * the handler only if curr_cancelled is still 0 after |
---|
232 | * cc_lock is successfully acquired. |
---|
233 | * cc_waiting - If a thread is waiting in callout_drain(), then |
---|
234 | * callout_wait is nonzero. Set only when |
---|
235 | * cc_curr is non-NULL. |
---|
236 | */ |
---|
237 | |
---|
238 | /* |
---|
239 | * Resets the execution entity tied to a specific callout cpu. |
---|
240 | */ |
---|
241 | static void |
---|
242 | cc_cce_cleanup(struct callout_cpu *cc, int direct) |
---|
243 | { |
---|
244 | |
---|
245 | cc_exec_curr(cc, direct) = NULL; |
---|
246 | cc_exec_cancel(cc, direct) = false; |
---|
247 | cc_exec_waiting(cc, direct) = false; |
---|
248 | #ifdef SMP |
---|
249 | cc_migration_cpu(cc, direct) = CPUBLOCK; |
---|
250 | cc_migration_time(cc, direct) = 0; |
---|
251 | cc_migration_prec(cc, direct) = 0; |
---|
252 | cc_migration_func(cc, direct) = NULL; |
---|
253 | cc_migration_arg(cc, direct) = NULL; |
---|
254 | #endif |
---|
255 | } |
---|
256 | |
---|
257 | /* |
---|
258 | * Checks if migration is requested by a specific callout cpu. |
---|
259 | */ |
---|
260 | static int |
---|
261 | cc_cce_migrating(struct callout_cpu *cc, int direct) |
---|
262 | { |
---|
263 | |
---|
264 | #ifdef SMP |
---|
265 | return (cc_migration_cpu(cc, direct) != CPUBLOCK); |
---|
266 | #else |
---|
267 | return (0); |
---|
268 | #endif |
---|
269 | } |
---|
270 | |
---|
271 | /* |
---|
272 | * Kernel low level callwheel initialization |
---|
273 | * called on cpu0 during kernel startup. |
---|
274 | */ |
---|
275 | #ifdef __rtems__ |
---|
276 | static void rtems_bsd_timeout_init_early(void *); |
---|
277 | |
---|
278 | static void |
---|
279 | rtems_bsd_callout_timer(rtems_id id, void *arg) |
---|
280 | { |
---|
281 | rtems_status_code sc; |
---|
282 | |
---|
283 | (void) arg; |
---|
284 | |
---|
285 | sc = rtems_timer_reset(id); |
---|
286 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
287 | |
---|
288 | callout_process(sbinuptime()); |
---|
289 | } |
---|
290 | |
---|
291 | static void |
---|
292 | rtems_bsd_timeout_init_late(void *unused) |
---|
293 | { |
---|
294 | rtems_status_code sc; |
---|
295 | rtems_id id; |
---|
296 | |
---|
297 | (void) unused; |
---|
298 | |
---|
299 | sc = rtems_timer_create(rtems_build_name('_', 'C', 'L', 'O'), &id); |
---|
300 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
301 | |
---|
302 | sc = rtems_timer_server_fire_after(id, 1, rtems_bsd_callout_timer, NULL); |
---|
303 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
304 | } |
---|
305 | |
---|
306 | SYSINIT(rtems_bsd_timeout_early, SI_SUB_VM, SI_ORDER_FIRST, |
---|
307 | rtems_bsd_timeout_init_early, NULL); |
---|
308 | |
---|
309 | SYSINIT(rtems_bsd_timeout_late, SI_SUB_LAST, SI_ORDER_FIRST, |
---|
310 | rtems_bsd_timeout_init_late, NULL); |
---|
311 | |
---|
312 | static void |
---|
313 | rtems_bsd_timeout_init_early(void *dummy) |
---|
314 | #else /* __rtems__ */ |
---|
315 | static void |
---|
316 | callout_callwheel_init(void *dummy) |
---|
317 | #endif /* __rtems__ */ |
---|
318 | { |
---|
319 | struct callout_cpu *cc; |
---|
320 | #ifdef __rtems__ |
---|
321 | (void) dummy; |
---|
322 | #endif /* __rtems__ */ |
---|
323 | |
---|
324 | /* |
---|
325 | * Calculate the size of the callout wheel and the preallocated |
---|
326 | * timeout() structures. |
---|
327 | * XXX: Clip callout to result of previous function of maxusers |
---|
328 | * maximum 384. This is still huge, but acceptable. |
---|
329 | */ |
---|
330 | memset(CC_CPU(0), 0, sizeof(cc_cpu)); |
---|
331 | #ifndef __rtems__ |
---|
332 | ncallout = imin(16 + maxproc + maxfiles, 18508); |
---|
333 | TUNABLE_INT_FETCH("kern.ncallout", &ncallout); |
---|
334 | #endif /* __rtems__ */ |
---|
335 | |
---|
336 | /* |
---|
337 | * Calculate callout wheel size, should be next power of two higher |
---|
338 | * than 'ncallout'. |
---|
339 | */ |
---|
340 | callwheelsize = 1 << fls(ncallout); |
---|
341 | callwheelmask = callwheelsize - 1; |
---|
342 | |
---|
343 | #ifndef __rtems__ |
---|
344 | /* |
---|
345 | * Fetch whether we're pinning the swi's or not. |
---|
346 | */ |
---|
347 | TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi); |
---|
348 | TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); |
---|
349 | #endif /* __rtems__ */ |
---|
350 | |
---|
351 | /* |
---|
352 | * Only cpu0 handles timeout(9) and receives a preallocation. |
---|
353 | * |
---|
354 | * XXX: Once all timeout(9) consumers are converted this can |
---|
355 | * be removed. |
---|
356 | */ |
---|
357 | timeout_cpu = PCPU_GET(cpuid); |
---|
358 | cc = CC_CPU(timeout_cpu); |
---|
359 | cc->cc_callout = malloc(ncallout * sizeof(struct callout), |
---|
360 | M_CALLOUT, M_WAITOK); |
---|
361 | callout_cpu_init(cc, timeout_cpu); |
---|
362 | } |
---|
363 | #ifndef __rtems__ |
---|
364 | SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); |
---|
365 | #endif /* __rtems__ */ |
---|
366 | |
---|
367 | /* |
---|
368 | * Initialize the per-cpu callout structures. |
---|
369 | */ |
---|
370 | static void |
---|
371 | callout_cpu_init(struct callout_cpu *cc, int cpu) |
---|
372 | { |
---|
373 | struct callout *c; |
---|
374 | int i; |
---|
375 | |
---|
376 | mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); |
---|
377 | SLIST_INIT(&cc->cc_callfree); |
---|
378 | cc->cc_inited = 1; |
---|
379 | cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, |
---|
380 | M_CALLOUT, M_WAITOK); |
---|
381 | for (i = 0; i < callwheelsize; i++) |
---|
382 | LIST_INIT(&cc->cc_callwheel[i]); |
---|
383 | #ifndef __rtems__ |
---|
384 | TAILQ_INIT(&cc->cc_expireq); |
---|
385 | #endif /* __rtems__ */ |
---|
386 | cc->cc_firstevent = SBT_MAX; |
---|
387 | for (i = 0; i < 2; i++) |
---|
388 | cc_cce_cleanup(cc, i); |
---|
389 | snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), |
---|
390 | "callwheel cpu %d", cpu); |
---|
391 | if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ |
---|
392 | return; |
---|
393 | for (i = 0; i < ncallout; i++) { |
---|
394 | c = &cc->cc_callout[i]; |
---|
395 | callout_init(c, 0); |
---|
396 | c->c_iflags = CALLOUT_LOCAL_ALLOC; |
---|
397 | SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); |
---|
398 | } |
---|
399 | } |
---|
400 | |
---|
401 | #ifdef SMP |
---|
402 | /* |
---|
403 | * Switches the cpu tied to a specific callout. |
---|
404 | * The function expects a locked incoming callout cpu and returns with |
---|
405 | * locked outcoming callout cpu. |
---|
406 | */ |
---|
407 | static struct callout_cpu * |
---|
408 | callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) |
---|
409 | { |
---|
410 | struct callout_cpu *new_cc; |
---|
411 | |
---|
412 | MPASS(c != NULL && cc != NULL); |
---|
413 | CC_LOCK_ASSERT(cc); |
---|
414 | |
---|
415 | /* |
---|
416 | * Avoid interrupts and preemption firing after the callout cpu |
---|
417 | * is blocked in order to avoid deadlocks as the new thread |
---|
418 | * may be willing to acquire the callout cpu lock. |
---|
419 | */ |
---|
420 | c->c_cpu = CPUBLOCK; |
---|
421 | spinlock_enter(); |
---|
422 | CC_UNLOCK(cc); |
---|
423 | new_cc = CC_CPU(new_cpu); |
---|
424 | CC_LOCK(new_cc); |
---|
425 | spinlock_exit(); |
---|
426 | c->c_cpu = new_cpu; |
---|
427 | return (new_cc); |
---|
428 | } |
---|
429 | #endif |
---|
430 | |
---|
431 | #ifndef __rtems__ |
---|
432 | /* |
---|
433 | * Start standard softclock thread. |
---|
434 | */ |
---|
435 | static void |
---|
436 | start_softclock(void *dummy) |
---|
437 | { |
---|
438 | struct callout_cpu *cc; |
---|
439 | char name[MAXCOMLEN]; |
---|
440 | #ifdef SMP |
---|
441 | int cpu; |
---|
442 | struct intr_event *ie; |
---|
443 | #endif |
---|
444 | |
---|
445 | cc = CC_CPU(timeout_cpu); |
---|
446 | snprintf(name, sizeof(name), "clock (%d)", timeout_cpu); |
---|
447 | if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK, |
---|
448 | INTR_MPSAFE, &cc->cc_cookie)) |
---|
449 | panic("died while creating standard software ithreads"); |
---|
450 | if (pin_default_swi && |
---|
451 | (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) { |
---|
452 | printf("%s: timeout clock couldn't be pinned to cpu %d\n", |
---|
453 | __func__, |
---|
454 | timeout_cpu); |
---|
455 | } |
---|
456 | |
---|
457 | #ifdef SMP |
---|
458 | CPU_FOREACH(cpu) { |
---|
459 | if (cpu == timeout_cpu) |
---|
460 | continue; |
---|
461 | cc = CC_CPU(cpu); |
---|
462 | cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ |
---|
463 | callout_cpu_init(cc, cpu); |
---|
464 | snprintf(name, sizeof(name), "clock (%d)", cpu); |
---|
465 | ie = NULL; |
---|
466 | if (swi_add(&ie, name, softclock, cc, SWI_CLOCK, |
---|
467 | INTR_MPSAFE, &cc->cc_cookie)) |
---|
468 | panic("died while creating standard software ithreads"); |
---|
469 | if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) { |
---|
470 | printf("%s: per-cpu clock couldn't be pinned to " |
---|
471 | "cpu %d\n", |
---|
472 | __func__, |
---|
473 | cpu); |
---|
474 | } |
---|
475 | } |
---|
476 | #endif |
---|
477 | } |
---|
478 | SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); |
---|
479 | #endif /* __rtems__ */ |
---|
480 | |
---|
481 | #define CC_HASH_SHIFT 8 |
---|
482 | |
---|
483 | static inline u_int |
---|
484 | callout_hash(sbintime_t sbt) |
---|
485 | { |
---|
486 | |
---|
487 | return (sbt >> (32 - CC_HASH_SHIFT)); |
---|
488 | } |
---|
489 | |
---|
490 | static inline u_int |
---|
491 | callout_get_bucket(sbintime_t sbt) |
---|
492 | { |
---|
493 | |
---|
494 | return (callout_hash(sbt) & callwheelmask); |
---|
495 | } |
---|
496 | |
---|
497 | void |
---|
498 | callout_process(sbintime_t now) |
---|
499 | { |
---|
500 | struct callout *tmp, *tmpn; |
---|
501 | struct callout_cpu *cc; |
---|
502 | struct callout_list *sc; |
---|
503 | sbintime_t first, last, max, tmp_max; |
---|
504 | uint32_t lookahead; |
---|
505 | u_int firstb, lastb, nowb; |
---|
506 | #ifdef CALLOUT_PROFILING |
---|
507 | int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; |
---|
508 | #endif |
---|
509 | |
---|
510 | cc = CC_SELF(); |
---|
511 | mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
512 | |
---|
513 | /* Compute the buckets of the last scan and present times. */ |
---|
514 | firstb = callout_hash(cc->cc_lastscan); |
---|
515 | cc->cc_lastscan = now; |
---|
516 | nowb = callout_hash(now); |
---|
517 | |
---|
518 | /* Compute the last bucket and minimum time of the bucket after it. */ |
---|
519 | if (nowb == firstb) |
---|
520 | lookahead = (SBT_1S / 16); |
---|
521 | else if (nowb - firstb == 1) |
---|
522 | lookahead = (SBT_1S / 8); |
---|
523 | else |
---|
524 | lookahead = (SBT_1S / 2); |
---|
525 | first = last = now; |
---|
526 | first += (lookahead / 2); |
---|
527 | last += lookahead; |
---|
528 | last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); |
---|
529 | lastb = callout_hash(last) - 1; |
---|
530 | max = last; |
---|
531 | |
---|
532 | /* |
---|
533 | * Check if we wrapped around the entire wheel from the last scan. |
---|
534 | * In case, we need to scan entirely the wheel for pending callouts. |
---|
535 | */ |
---|
536 | if (lastb - firstb >= callwheelsize) { |
---|
537 | lastb = firstb + callwheelsize - 1; |
---|
538 | if (nowb - firstb >= callwheelsize) |
---|
539 | nowb = lastb; |
---|
540 | } |
---|
541 | |
---|
542 | /* Iterate callwheel from firstb to nowb and then up to lastb. */ |
---|
543 | do { |
---|
544 | sc = &cc->cc_callwheel[firstb & callwheelmask]; |
---|
545 | tmp = LIST_FIRST(sc); |
---|
546 | while (tmp != NULL) { |
---|
547 | /* Run the callout if present time within allowed. */ |
---|
548 | if (tmp->c_time <= now) { |
---|
549 | #ifndef __rtems__ |
---|
550 | /* |
---|
551 | * Consumer told us the callout may be run |
---|
552 | * directly from hardware interrupt context. |
---|
553 | */ |
---|
554 | if (tmp->c_iflags & CALLOUT_DIRECT) { |
---|
555 | #endif /* __rtems__ */ |
---|
556 | #ifdef CALLOUT_PROFILING |
---|
557 | ++depth_dir; |
---|
558 | #endif |
---|
559 | cc_exec_next(cc) = |
---|
560 | LIST_NEXT(tmp, c_links.le); |
---|
561 | cc->cc_bucket = firstb & callwheelmask; |
---|
562 | LIST_REMOVE(tmp, c_links.le); |
---|
563 | softclock_call_cc(tmp, cc, |
---|
564 | #ifdef CALLOUT_PROFILING |
---|
565 | &mpcalls_dir, &lockcalls_dir, NULL, |
---|
566 | #endif |
---|
567 | 1); |
---|
568 | tmp = cc_exec_next(cc); |
---|
569 | cc_exec_next(cc) = NULL; |
---|
570 | #ifndef __rtems__ |
---|
571 | } else { |
---|
572 | tmpn = LIST_NEXT(tmp, c_links.le); |
---|
573 | LIST_REMOVE(tmp, c_links.le); |
---|
574 | TAILQ_INSERT_TAIL(&cc->cc_expireq, |
---|
575 | tmp, c_links.tqe); |
---|
576 | tmp->c_iflags |= CALLOUT_PROCESSED; |
---|
577 | tmp = tmpn; |
---|
578 | } |
---|
579 | #endif /* __rtems__ */ |
---|
580 | continue; |
---|
581 | } |
---|
582 | /* Skip events from distant future. */ |
---|
583 | if (tmp->c_time >= max) |
---|
584 | goto next; |
---|
585 | /* |
---|
586 | * Event minimal time is bigger than present maximal |
---|
587 | * time, so it cannot be aggregated. |
---|
588 | */ |
---|
589 | if (tmp->c_time > last) { |
---|
590 | lastb = nowb; |
---|
591 | goto next; |
---|
592 | } |
---|
593 | /* Update first and last time, respecting this event. */ |
---|
594 | if (tmp->c_time < first) |
---|
595 | first = tmp->c_time; |
---|
596 | tmp_max = tmp->c_time + tmp->c_precision; |
---|
597 | if (tmp_max < last) |
---|
598 | last = tmp_max; |
---|
599 | next: |
---|
600 | tmp = LIST_NEXT(tmp, c_links.le); |
---|
601 | } |
---|
602 | /* Proceed with the next bucket. */ |
---|
603 | firstb++; |
---|
604 | /* |
---|
605 | * Stop if we looked after present time and found |
---|
606 | * some event we can't execute at now. |
---|
607 | * Stop if we looked far enough into the future. |
---|
608 | */ |
---|
609 | } while (((int)(firstb - lastb)) <= 0); |
---|
610 | cc->cc_firstevent = last; |
---|
611 | #ifndef NO_EVENTTIMERS |
---|
612 | cpu_new_callout(curcpu, last, first); |
---|
613 | #endif |
---|
614 | #ifdef CALLOUT_PROFILING |
---|
615 | avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; |
---|
616 | avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; |
---|
617 | avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; |
---|
618 | #endif |
---|
619 | mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
620 | #ifndef __rtems__ |
---|
621 | /* |
---|
622 | * swi_sched acquires the thread lock, so we don't want to call it |
---|
623 | * with cc_lock held; incorrect locking order. |
---|
624 | */ |
---|
625 | if (!TAILQ_EMPTY(&cc->cc_expireq)) |
---|
626 | swi_sched(cc->cc_cookie, 0); |
---|
627 | #endif /* __rtems__ */ |
---|
628 | } |
---|
629 | |
---|
630 | static struct callout_cpu * |
---|
631 | callout_lock(struct callout *c) |
---|
632 | { |
---|
633 | struct callout_cpu *cc; |
---|
634 | int cpu; |
---|
635 | |
---|
636 | for (;;) { |
---|
637 | cpu = c->c_cpu; |
---|
638 | #ifdef SMP |
---|
639 | if (cpu == CPUBLOCK) { |
---|
640 | while (c->c_cpu == CPUBLOCK) |
---|
641 | cpu_spinwait(); |
---|
642 | continue; |
---|
643 | } |
---|
644 | #endif |
---|
645 | cc = CC_CPU(cpu); |
---|
646 | CC_LOCK(cc); |
---|
647 | if (cpu == c->c_cpu) |
---|
648 | break; |
---|
649 | CC_UNLOCK(cc); |
---|
650 | } |
---|
651 | return (cc); |
---|
652 | } |
---|
653 | |
---|
654 | static void |
---|
655 | callout_cc_add(struct callout *c, struct callout_cpu *cc, |
---|
656 | sbintime_t sbt, sbintime_t precision, void (*func)(void *), |
---|
657 | void *arg, int cpu, int flags) |
---|
658 | { |
---|
659 | int bucket; |
---|
660 | |
---|
661 | CC_LOCK_ASSERT(cc); |
---|
662 | if (sbt < cc->cc_lastscan) |
---|
663 | sbt = cc->cc_lastscan; |
---|
664 | c->c_arg = arg; |
---|
665 | c->c_iflags |= CALLOUT_PENDING; |
---|
666 | c->c_iflags &= ~CALLOUT_PROCESSED; |
---|
667 | c->c_flags |= CALLOUT_ACTIVE; |
---|
668 | if (flags & C_DIRECT_EXEC) |
---|
669 | c->c_iflags |= CALLOUT_DIRECT; |
---|
670 | c->c_func = func; |
---|
671 | c->c_time = sbt; |
---|
672 | c->c_precision = precision; |
---|
673 | bucket = callout_get_bucket(c->c_time); |
---|
674 | CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", |
---|
675 | c, (int)(c->c_precision >> 32), |
---|
676 | (u_int)(c->c_precision & 0xffffffff)); |
---|
677 | LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); |
---|
678 | if (cc->cc_bucket == bucket) |
---|
679 | cc_exec_next(cc) = c; |
---|
680 | #ifndef NO_EVENTTIMERS |
---|
681 | /* |
---|
682 | * Inform the eventtimers(4) subsystem there's a new callout |
---|
683 | * that has been inserted, but only if really required. |
---|
684 | */ |
---|
685 | if (SBT_MAX - c->c_time < c->c_precision) |
---|
686 | c->c_precision = SBT_MAX - c->c_time; |
---|
687 | sbt = c->c_time + c->c_precision; |
---|
688 | if (sbt < cc->cc_firstevent) { |
---|
689 | cc->cc_firstevent = sbt; |
---|
690 | cpu_new_callout(cpu, sbt, c->c_time); |
---|
691 | } |
---|
692 | #endif |
---|
693 | } |
---|
694 | |
---|
695 | static void |
---|
696 | callout_cc_del(struct callout *c, struct callout_cpu *cc) |
---|
697 | { |
---|
698 | |
---|
699 | if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0) |
---|
700 | return; |
---|
701 | c->c_func = NULL; |
---|
702 | SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); |
---|
703 | } |
---|
704 | |
---|
705 | static void |
---|
706 | softclock_call_cc(struct callout *c, struct callout_cpu *cc, |
---|
707 | #ifdef CALLOUT_PROFILING |
---|
708 | int *mpcalls, int *lockcalls, int *gcalls, |
---|
709 | #endif |
---|
710 | int direct) |
---|
711 | { |
---|
712 | #ifndef __rtems__ |
---|
713 | struct rm_priotracker tracker; |
---|
714 | #endif /* __rtems__ */ |
---|
715 | void (*c_func)(void *); |
---|
716 | void *c_arg; |
---|
717 | struct lock_class *class; |
---|
718 | struct lock_object *c_lock; |
---|
719 | uintptr_t lock_status; |
---|
720 | int c_iflags; |
---|
721 | #ifdef SMP |
---|
722 | struct callout_cpu *new_cc; |
---|
723 | void (*new_func)(void *); |
---|
724 | void *new_arg; |
---|
725 | int flags, new_cpu; |
---|
726 | sbintime_t new_prec, new_time; |
---|
727 | #endif |
---|
728 | #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) |
---|
729 | sbintime_t sbt1, sbt2; |
---|
730 | struct timespec ts2; |
---|
731 | static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ |
---|
732 | static timeout_t *lastfunc; |
---|
733 | #endif |
---|
734 | |
---|
735 | KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, |
---|
736 | ("softclock_call_cc: pend %p %x", c, c->c_iflags)); |
---|
737 | KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, |
---|
738 | ("softclock_call_cc: act %p %x", c, c->c_flags)); |
---|
739 | class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; |
---|
740 | lock_status = 0; |
---|
741 | if (c->c_flags & CALLOUT_SHAREDLOCK) { |
---|
742 | #ifndef __rtems__ |
---|
743 | if (class == &lock_class_rm) |
---|
744 | lock_status = (uintptr_t)&tracker; |
---|
745 | else |
---|
746 | #endif /* __rtems__ */ |
---|
747 | lock_status = 1; |
---|
748 | } |
---|
749 | c_lock = c->c_lock; |
---|
750 | c_func = c->c_func; |
---|
751 | c_arg = c->c_arg; |
---|
752 | c_iflags = c->c_iflags; |
---|
753 | if (c->c_iflags & CALLOUT_LOCAL_ALLOC) |
---|
754 | c->c_iflags = CALLOUT_LOCAL_ALLOC; |
---|
755 | else |
---|
756 | c->c_iflags &= ~CALLOUT_PENDING; |
---|
757 | |
---|
758 | cc_exec_curr(cc, direct) = c; |
---|
759 | cc_exec_cancel(cc, direct) = false; |
---|
760 | cc_exec_drain(cc, direct) = NULL; |
---|
761 | CC_UNLOCK(cc); |
---|
762 | if (c_lock != NULL) { |
---|
763 | class->lc_lock(c_lock, lock_status); |
---|
764 | /* |
---|
765 | * The callout may have been cancelled |
---|
766 | * while we switched locks. |
---|
767 | */ |
---|
768 | if (cc_exec_cancel(cc, direct)) { |
---|
769 | class->lc_unlock(c_lock); |
---|
770 | goto skip; |
---|
771 | } |
---|
772 | /* The callout cannot be stopped now. */ |
---|
773 | cc_exec_cancel(cc, direct) = true; |
---|
774 | if (c_lock == &Giant.lock_object) { |
---|
775 | #ifdef CALLOUT_PROFILING |
---|
776 | (*gcalls)++; |
---|
777 | #endif |
---|
778 | CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", |
---|
779 | c, c_func, c_arg); |
---|
780 | } else { |
---|
781 | #ifdef CALLOUT_PROFILING |
---|
782 | (*lockcalls)++; |
---|
783 | #endif |
---|
784 | CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", |
---|
785 | c, c_func, c_arg); |
---|
786 | } |
---|
787 | } else { |
---|
788 | #ifdef CALLOUT_PROFILING |
---|
789 | (*mpcalls)++; |
---|
790 | #endif |
---|
791 | CTR3(KTR_CALLOUT, "callout %p func %p arg %p", |
---|
792 | c, c_func, c_arg); |
---|
793 | } |
---|
794 | KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", |
---|
795 | "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); |
---|
796 | #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) |
---|
797 | sbt1 = sbinuptime(); |
---|
798 | #endif |
---|
799 | #ifndef __rtems__ |
---|
800 | THREAD_NO_SLEEPING(); |
---|
801 | SDT_PROBE1(callout_execute, , , callout__start, c); |
---|
802 | #endif /* __rtems__ */ |
---|
803 | c_func(c_arg); |
---|
804 | #ifndef __rtems__ |
---|
805 | SDT_PROBE1(callout_execute, , , callout__end, c); |
---|
806 | THREAD_SLEEPING_OK(); |
---|
807 | #endif /* __rtems__ */ |
---|
808 | #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) |
---|
809 | sbt2 = sbinuptime(); |
---|
810 | sbt2 -= sbt1; |
---|
811 | if (sbt2 > maxdt) { |
---|
812 | if (lastfunc != c_func || sbt2 > maxdt * 2) { |
---|
813 | ts2 = sbttots(sbt2); |
---|
814 | printf( |
---|
815 | "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", |
---|
816 | c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); |
---|
817 | } |
---|
818 | maxdt = sbt2; |
---|
819 | lastfunc = c_func; |
---|
820 | } |
---|
821 | #endif |
---|
822 | KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); |
---|
823 | CTR1(KTR_CALLOUT, "callout %p finished", c); |
---|
824 | if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) |
---|
825 | class->lc_unlock(c_lock); |
---|
826 | skip: |
---|
827 | CC_LOCK(cc); |
---|
828 | KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); |
---|
829 | cc_exec_curr(cc, direct) = NULL; |
---|
830 | if (cc_exec_drain(cc, direct)) { |
---|
831 | void (*drain)(void *); |
---|
832 | |
---|
833 | drain = cc_exec_drain(cc, direct); |
---|
834 | cc_exec_drain(cc, direct) = NULL; |
---|
835 | CC_UNLOCK(cc); |
---|
836 | drain(c_arg); |
---|
837 | CC_LOCK(cc); |
---|
838 | } |
---|
839 | if (cc_exec_waiting(cc, direct)) { |
---|
840 | /* |
---|
841 | * There is someone waiting for the |
---|
842 | * callout to complete. |
---|
843 | * If the callout was scheduled for |
---|
844 | * migration just cancel it. |
---|
845 | */ |
---|
846 | if (cc_cce_migrating(cc, direct)) { |
---|
847 | cc_cce_cleanup(cc, direct); |
---|
848 | |
---|
849 | /* |
---|
850 | * It should be assert here that the callout is not |
---|
851 | * destroyed but that is not easy. |
---|
852 | */ |
---|
853 | c->c_iflags &= ~CALLOUT_DFRMIGRATION; |
---|
854 | } |
---|
855 | cc_exec_waiting(cc, direct) = false; |
---|
856 | CC_UNLOCK(cc); |
---|
857 | wakeup(&cc_exec_waiting(cc, direct)); |
---|
858 | CC_LOCK(cc); |
---|
859 | } else if (cc_cce_migrating(cc, direct)) { |
---|
860 | KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0, |
---|
861 | ("Migrating legacy callout %p", c)); |
---|
862 | #ifdef SMP |
---|
863 | /* |
---|
864 | * If the callout was scheduled for |
---|
865 | * migration just perform it now. |
---|
866 | */ |
---|
867 | new_cpu = cc_migration_cpu(cc, direct); |
---|
868 | new_time = cc_migration_time(cc, direct); |
---|
869 | new_prec = cc_migration_prec(cc, direct); |
---|
870 | new_func = cc_migration_func(cc, direct); |
---|
871 | new_arg = cc_migration_arg(cc, direct); |
---|
872 | cc_cce_cleanup(cc, direct); |
---|
873 | |
---|
874 | /* |
---|
875 | * It should be assert here that the callout is not destroyed |
---|
876 | * but that is not easy. |
---|
877 | * |
---|
878 | * As first thing, handle deferred callout stops. |
---|
879 | */ |
---|
880 | if (!callout_migrating(c)) { |
---|
881 | CTR3(KTR_CALLOUT, |
---|
882 | "deferred cancelled %p func %p arg %p", |
---|
883 | c, new_func, new_arg); |
---|
884 | callout_cc_del(c, cc); |
---|
885 | return; |
---|
886 | } |
---|
887 | c->c_iflags &= ~CALLOUT_DFRMIGRATION; |
---|
888 | |
---|
889 | new_cc = callout_cpu_switch(c, cc, new_cpu); |
---|
890 | flags = (direct) ? C_DIRECT_EXEC : 0; |
---|
891 | callout_cc_add(c, new_cc, new_time, new_prec, new_func, |
---|
892 | new_arg, new_cpu, flags); |
---|
893 | CC_UNLOCK(new_cc); |
---|
894 | CC_LOCK(cc); |
---|
895 | #else |
---|
896 | panic("migration should not happen"); |
---|
897 | #endif |
---|
898 | } |
---|
899 | /* |
---|
900 | * If the current callout is locally allocated (from |
---|
901 | * timeout(9)) then put it on the freelist. |
---|
902 | * |
---|
903 | * Note: we need to check the cached copy of c_iflags because |
---|
904 | * if it was not local, then it's not safe to deref the |
---|
905 | * callout pointer. |
---|
906 | */ |
---|
907 | KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 || |
---|
908 | c->c_iflags == CALLOUT_LOCAL_ALLOC, |
---|
909 | ("corrupted callout")); |
---|
910 | if (c_iflags & CALLOUT_LOCAL_ALLOC) |
---|
911 | callout_cc_del(c, cc); |
---|
912 | } |
---|
913 | |
---|
914 | /* |
---|
915 | * The callout mechanism is based on the work of Adam M. Costello and |
---|
916 | * George Varghese, published in a technical report entitled "Redesigning |
---|
917 | * the BSD Callout and Timer Facilities" and modified slightly for inclusion |
---|
918 | * in FreeBSD by Justin T. Gibbs. The original work on the data structures |
---|
919 | * used in this implementation was published by G. Varghese and T. Lauck in |
---|
920 | * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for |
---|
921 | * the Efficient Implementation of a Timer Facility" in the Proceedings of |
---|
922 | * the 11th ACM Annual Symposium on Operating Systems Principles, |
---|
923 | * Austin, Texas Nov 1987. |
---|
924 | */ |
---|
925 | |
---|
926 | #ifndef __rtems__ |
---|
927 | /* |
---|
928 | * Software (low priority) clock interrupt. |
---|
929 | * Run periodic events from timeout queue. |
---|
930 | */ |
---|
931 | void |
---|
932 | softclock(void *arg) |
---|
933 | { |
---|
934 | struct callout_cpu *cc; |
---|
935 | struct callout *c; |
---|
936 | #ifdef CALLOUT_PROFILING |
---|
937 | int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; |
---|
938 | #endif |
---|
939 | |
---|
940 | cc = (struct callout_cpu *)arg; |
---|
941 | CC_LOCK(cc); |
---|
942 | while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { |
---|
943 | TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); |
---|
944 | softclock_call_cc(c, cc, |
---|
945 | #ifdef CALLOUT_PROFILING |
---|
946 | &mpcalls, &lockcalls, &gcalls, |
---|
947 | #endif |
---|
948 | 0); |
---|
949 | #ifdef CALLOUT_PROFILING |
---|
950 | ++depth; |
---|
951 | #endif |
---|
952 | } |
---|
953 | #ifdef CALLOUT_PROFILING |
---|
954 | avg_depth += (depth * 1000 - avg_depth) >> 8; |
---|
955 | avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; |
---|
956 | avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; |
---|
957 | avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; |
---|
958 | #endif |
---|
959 | CC_UNLOCK(cc); |
---|
960 | } |
---|
961 | #endif /* __rtems__ */ |
---|
962 | |
---|
963 | /* |
---|
964 | * timeout -- |
---|
965 | * Execute a function after a specified length of time. |
---|
966 | * |
---|
967 | * untimeout -- |
---|
968 | * Cancel previous timeout function call. |
---|
969 | * |
---|
970 | * callout_handle_init -- |
---|
971 | * Initialize a handle so that using it with untimeout is benign. |
---|
972 | * |
---|
973 | * See AT&T BCI Driver Reference Manual for specification. This |
---|
974 | * implementation differs from that one in that although an |
---|
975 | * identification value is returned from timeout, the original |
---|
976 | * arguments to timeout as well as the identifier are used to |
---|
977 | * identify entries for untimeout. |
---|
978 | */ |
---|
979 | struct callout_handle |
---|
980 | timeout(timeout_t *ftn, void *arg, int to_ticks) |
---|
981 | { |
---|
982 | struct callout_cpu *cc; |
---|
983 | struct callout *new; |
---|
984 | struct callout_handle handle; |
---|
985 | |
---|
986 | cc = CC_CPU(timeout_cpu); |
---|
987 | CC_LOCK(cc); |
---|
988 | /* Fill in the next free callout structure. */ |
---|
989 | new = SLIST_FIRST(&cc->cc_callfree); |
---|
990 | if (new == NULL) |
---|
991 | /* XXX Attempt to malloc first */ |
---|
992 | panic("timeout table full"); |
---|
993 | SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); |
---|
994 | callout_reset(new, to_ticks, ftn, arg); |
---|
995 | handle.callout = new; |
---|
996 | CC_UNLOCK(cc); |
---|
997 | |
---|
998 | return (handle); |
---|
999 | } |
---|
1000 | |
---|
1001 | void |
---|
1002 | untimeout(timeout_t *ftn, void *arg, struct callout_handle handle) |
---|
1003 | { |
---|
1004 | struct callout_cpu *cc; |
---|
1005 | |
---|
1006 | /* |
---|
1007 | * Check for a handle that was initialized |
---|
1008 | * by callout_handle_init, but never used |
---|
1009 | * for a real timeout. |
---|
1010 | */ |
---|
1011 | if (handle.callout == NULL) |
---|
1012 | return; |
---|
1013 | |
---|
1014 | cc = callout_lock(handle.callout); |
---|
1015 | if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) |
---|
1016 | callout_stop(handle.callout); |
---|
1017 | CC_UNLOCK(cc); |
---|
1018 | } |
---|
1019 | |
---|
1020 | void |
---|
1021 | callout_handle_init(struct callout_handle *handle) |
---|
1022 | { |
---|
1023 | handle->callout = NULL; |
---|
1024 | } |
---|
1025 | |
---|
1026 | void |
---|
1027 | callout_when(sbintime_t sbt, sbintime_t precision, int flags, |
---|
1028 | sbintime_t *res, sbintime_t *prec_res) |
---|
1029 | { |
---|
1030 | sbintime_t to_sbt, to_pr; |
---|
1031 | |
---|
1032 | if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) { |
---|
1033 | *res = sbt; |
---|
1034 | *prec_res = precision; |
---|
1035 | return; |
---|
1036 | } |
---|
1037 | if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt) |
---|
1038 | sbt = tick_sbt; |
---|
1039 | if ((flags & C_HARDCLOCK) != 0 || |
---|
1040 | #ifdef NO_EVENTTIMERS |
---|
1041 | sbt >= sbt_timethreshold) { |
---|
1042 | to_sbt = getsbinuptime(); |
---|
1043 | |
---|
1044 | /* Add safety belt for the case of hz > 1000. */ |
---|
1045 | to_sbt += tc_tick_sbt - tick_sbt; |
---|
1046 | #else |
---|
1047 | sbt >= sbt_tickthreshold) { |
---|
1048 | /* |
---|
1049 | * Obtain the time of the last hardclock() call on |
---|
1050 | * this CPU directly from the kern_clocksource.c. |
---|
1051 | * This value is per-CPU, but it is equal for all |
---|
1052 | * active ones. |
---|
1053 | */ |
---|
1054 | #ifdef __LP64__ |
---|
1055 | to_sbt = DPCPU_GET(hardclocktime); |
---|
1056 | #else |
---|
1057 | spinlock_enter(); |
---|
1058 | to_sbt = DPCPU_GET(hardclocktime); |
---|
1059 | spinlock_exit(); |
---|
1060 | #endif |
---|
1061 | #endif |
---|
1062 | if (cold && to_sbt == 0) |
---|
1063 | to_sbt = sbinuptime(); |
---|
1064 | if ((flags & C_HARDCLOCK) == 0) |
---|
1065 | to_sbt += tick_sbt; |
---|
1066 | } else |
---|
1067 | to_sbt = sbinuptime(); |
---|
1068 | if (SBT_MAX - to_sbt < sbt) |
---|
1069 | to_sbt = SBT_MAX; |
---|
1070 | else |
---|
1071 | to_sbt += sbt; |
---|
1072 | *res = to_sbt; |
---|
1073 | to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : |
---|
1074 | sbt >> C_PRELGET(flags)); |
---|
1075 | *prec_res = to_pr > precision ? to_pr : precision; |
---|
1076 | } |
---|
1077 | |
---|
1078 | /* |
---|
1079 | * New interface; clients allocate their own callout structures. |
---|
1080 | * |
---|
1081 | * callout_reset() - establish or change a timeout |
---|
1082 | * callout_stop() - disestablish a timeout |
---|
1083 | * callout_init() - initialize a callout structure so that it can |
---|
1084 | * safely be passed to callout_reset() and callout_stop() |
---|
1085 | * |
---|
1086 | * <sys/callout.h> defines three convenience macros: |
---|
1087 | * |
---|
1088 | * callout_active() - returns truth if callout has not been stopped, |
---|
1089 | * drained, or deactivated since the last time the callout was |
---|
1090 | * reset. |
---|
1091 | * callout_pending() - returns truth if callout is still waiting for timeout |
---|
1092 | * callout_deactivate() - marks the callout as having been serviced |
---|
1093 | */ |
---|
1094 | int |
---|
1095 | callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, |
---|
1096 | void (*ftn)(void *), void *arg, int cpu, int flags) |
---|
1097 | { |
---|
1098 | sbintime_t to_sbt, precision; |
---|
1099 | struct callout_cpu *cc; |
---|
1100 | int cancelled, direct; |
---|
1101 | int ignore_cpu=0; |
---|
1102 | |
---|
1103 | cancelled = 0; |
---|
1104 | if (cpu == -1) { |
---|
1105 | ignore_cpu = 1; |
---|
1106 | } else if ((cpu >= MAXCPU) || |
---|
1107 | ((CC_CPU(cpu))->cc_inited == 0)) { |
---|
1108 | /* Invalid CPU spec */ |
---|
1109 | panic("Invalid CPU in callout %d", cpu); |
---|
1110 | } |
---|
1111 | callout_when(sbt, prec, flags, &to_sbt, &precision); |
---|
1112 | |
---|
1113 | /* |
---|
1114 | * This flag used to be added by callout_cc_add, but the |
---|
1115 | * first time you call this we could end up with the |
---|
1116 | * wrong direct flag if we don't do it before we add. |
---|
1117 | */ |
---|
1118 | if (flags & C_DIRECT_EXEC) { |
---|
1119 | direct = 1; |
---|
1120 | } else { |
---|
1121 | direct = 0; |
---|
1122 | } |
---|
1123 | KASSERT(!direct || c->c_lock == NULL, |
---|
1124 | ("%s: direct callout %p has lock", __func__, c)); |
---|
1125 | cc = callout_lock(c); |
---|
1126 | /* |
---|
1127 | * Don't allow migration of pre-allocated callouts lest they |
---|
1128 | * become unbalanced or handle the case where the user does |
---|
1129 | * not care. |
---|
1130 | */ |
---|
1131 | if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) || |
---|
1132 | ignore_cpu) { |
---|
1133 | cpu = c->c_cpu; |
---|
1134 | } |
---|
1135 | |
---|
1136 | if (cc_exec_curr(cc, direct) == c) { |
---|
1137 | /* |
---|
1138 | * We're being asked to reschedule a callout which is |
---|
1139 | * currently in progress. If there is a lock then we |
---|
1140 | * can cancel the callout if it has not really started. |
---|
1141 | */ |
---|
1142 | if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) |
---|
1143 | cancelled = cc_exec_cancel(cc, direct) = true; |
---|
1144 | if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) { |
---|
1145 | /* |
---|
1146 | * Someone has called callout_drain to kill this |
---|
1147 | * callout. Don't reschedule. |
---|
1148 | */ |
---|
1149 | CTR4(KTR_CALLOUT, "%s %p func %p arg %p", |
---|
1150 | cancelled ? "cancelled" : "failed to cancel", |
---|
1151 | c, c->c_func, c->c_arg); |
---|
1152 | CC_UNLOCK(cc); |
---|
1153 | return (cancelled); |
---|
1154 | } |
---|
1155 | #ifdef SMP |
---|
1156 | if (callout_migrating(c)) { |
---|
1157 | /* |
---|
1158 | * This only occurs when a second callout_reset_sbt_on |
---|
1159 | * is made after a previous one moved it into |
---|
1160 | * deferred migration (below). Note we do *not* change |
---|
1161 | * the prev_cpu even though the previous target may |
---|
1162 | * be different. |
---|
1163 | */ |
---|
1164 | cc_migration_cpu(cc, direct) = cpu; |
---|
1165 | cc_migration_time(cc, direct) = to_sbt; |
---|
1166 | cc_migration_prec(cc, direct) = precision; |
---|
1167 | cc_migration_func(cc, direct) = ftn; |
---|
1168 | cc_migration_arg(cc, direct) = arg; |
---|
1169 | cancelled = 1; |
---|
1170 | CC_UNLOCK(cc); |
---|
1171 | return (cancelled); |
---|
1172 | } |
---|
1173 | #endif |
---|
1174 | } |
---|
1175 | if (c->c_iflags & CALLOUT_PENDING) { |
---|
1176 | #ifndef __rtems__ |
---|
1177 | if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { |
---|
1178 | #endif /* __rtems__ */ |
---|
1179 | if (cc_exec_next(cc) == c) |
---|
1180 | cc_exec_next(cc) = LIST_NEXT(c, c_links.le); |
---|
1181 | LIST_REMOVE(c, c_links.le); |
---|
1182 | #ifndef __rtems__ |
---|
1183 | } else { |
---|
1184 | TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); |
---|
1185 | } |
---|
1186 | #endif /* __rtems__ */ |
---|
1187 | cancelled = 1; |
---|
1188 | c->c_iflags &= ~ CALLOUT_PENDING; |
---|
1189 | c->c_flags &= ~ CALLOUT_ACTIVE; |
---|
1190 | } |
---|
1191 | |
---|
1192 | #ifdef SMP |
---|
1193 | /* |
---|
1194 | * If the callout must migrate try to perform it immediately. |
---|
1195 | * If the callout is currently running, just defer the migration |
---|
1196 | * to a more appropriate moment. |
---|
1197 | */ |
---|
1198 | if (c->c_cpu != cpu) { |
---|
1199 | if (cc_exec_curr(cc, direct) == c) { |
---|
1200 | /* |
---|
1201 | * Pending will have been removed since we are |
---|
1202 | * actually executing the callout on another |
---|
1203 | * CPU. That callout should be waiting on the |
---|
1204 | * lock the caller holds. If we set both |
---|
1205 | * active/and/pending after we return and the |
---|
1206 | * lock on the executing callout proceeds, it |
---|
1207 | * will then see pending is true and return. |
---|
1208 | * At the return from the actual callout execution |
---|
1209 | * the migration will occur in softclock_call_cc |
---|
1210 | * and this new callout will be placed on the |
---|
1211 | * new CPU via a call to callout_cpu_switch() which |
---|
1212 | * will get the lock on the right CPU followed |
---|
1213 | * by a call callout_cc_add() which will add it there. |
---|
1214 | * (see above in softclock_call_cc()). |
---|
1215 | */ |
---|
1216 | cc_migration_cpu(cc, direct) = cpu; |
---|
1217 | cc_migration_time(cc, direct) = to_sbt; |
---|
1218 | cc_migration_prec(cc, direct) = precision; |
---|
1219 | cc_migration_func(cc, direct) = ftn; |
---|
1220 | cc_migration_arg(cc, direct) = arg; |
---|
1221 | c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); |
---|
1222 | c->c_flags |= CALLOUT_ACTIVE; |
---|
1223 | CTR6(KTR_CALLOUT, |
---|
1224 | "migration of %p func %p arg %p in %d.%08x to %u deferred", |
---|
1225 | c, c->c_func, c->c_arg, (int)(to_sbt >> 32), |
---|
1226 | (u_int)(to_sbt & 0xffffffff), cpu); |
---|
1227 | CC_UNLOCK(cc); |
---|
1228 | return (cancelled); |
---|
1229 | } |
---|
1230 | cc = callout_cpu_switch(c, cc, cpu); |
---|
1231 | } |
---|
1232 | #endif |
---|
1233 | |
---|
1234 | callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); |
---|
1235 | CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", |
---|
1236 | cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), |
---|
1237 | (u_int)(to_sbt & 0xffffffff)); |
---|
1238 | CC_UNLOCK(cc); |
---|
1239 | |
---|
1240 | return (cancelled); |
---|
1241 | } |
---|
1242 | |
---|
1243 | /* |
---|
1244 | * Common idioms that can be optimized in the future. |
---|
1245 | */ |
---|
1246 | int |
---|
1247 | callout_schedule_on(struct callout *c, int to_ticks, int cpu) |
---|
1248 | { |
---|
1249 | return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); |
---|
1250 | } |
---|
1251 | |
---|
1252 | int |
---|
1253 | callout_schedule(struct callout *c, int to_ticks) |
---|
1254 | { |
---|
1255 | return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); |
---|
1256 | } |
---|
1257 | |
---|
1258 | int |
---|
1259 | _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *)) |
---|
1260 | { |
---|
1261 | struct callout_cpu *cc, *old_cc; |
---|
1262 | struct lock_class *class; |
---|
1263 | int direct, sq_locked, use_lock; |
---|
1264 | int cancelled, not_on_a_list; |
---|
1265 | #ifdef __rtems__ |
---|
1266 | (void)old_cc; |
---|
1267 | (void)sq_locked; |
---|
1268 | #endif /* __rtems__ */ |
---|
1269 | |
---|
1270 | if ((flags & CS_DRAIN) != 0) |
---|
1271 | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, |
---|
1272 | "calling %s", __func__); |
---|
1273 | |
---|
1274 | /* |
---|
1275 | * Some old subsystems don't hold Giant while running a callout_stop(), |
---|
1276 | * so just discard this check for the moment. |
---|
1277 | */ |
---|
1278 | if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { |
---|
1279 | if (c->c_lock == &Giant.lock_object) |
---|
1280 | use_lock = mtx_owned(&Giant); |
---|
1281 | else { |
---|
1282 | use_lock = 1; |
---|
1283 | class = LOCK_CLASS(c->c_lock); |
---|
1284 | class->lc_assert(c->c_lock, LA_XLOCKED); |
---|
1285 | } |
---|
1286 | } else |
---|
1287 | use_lock = 0; |
---|
1288 | if (c->c_iflags & CALLOUT_DIRECT) { |
---|
1289 | direct = 1; |
---|
1290 | } else { |
---|
1291 | direct = 0; |
---|
1292 | } |
---|
1293 | |
---|
1294 | #ifndef __rtems__ |
---|
1295 | sq_locked = 0; |
---|
1296 | old_cc = NULL; |
---|
1297 | again: |
---|
1298 | #endif /* __rtems__ */ |
---|
1299 | cc = callout_lock(c); |
---|
1300 | |
---|
1301 | if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == |
---|
1302 | (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && |
---|
1303 | ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { |
---|
1304 | /* |
---|
1305 | * Special case where this slipped in while we |
---|
1306 | * were migrating *as* the callout is about to |
---|
1307 | * execute. The caller probably holds the lock |
---|
1308 | * the callout wants. |
---|
1309 | * |
---|
1310 | * Get rid of the migration first. Then set |
---|
1311 | * the flag that tells this code *not* to |
---|
1312 | * try to remove it from any lists (its not |
---|
1313 | * on one yet). When the callout wheel runs, |
---|
1314 | * it will ignore this callout. |
---|
1315 | */ |
---|
1316 | c->c_iflags &= ~CALLOUT_PENDING; |
---|
1317 | c->c_flags &= ~CALLOUT_ACTIVE; |
---|
1318 | not_on_a_list = 1; |
---|
1319 | } else { |
---|
1320 | not_on_a_list = 0; |
---|
1321 | } |
---|
1322 | |
---|
1323 | #ifndef __rtems__ |
---|
1324 | /* |
---|
1325 | * If the callout was migrating while the callout cpu lock was |
---|
1326 | * dropped, just drop the sleepqueue lock and check the states |
---|
1327 | * again. |
---|
1328 | */ |
---|
1329 | if (sq_locked != 0 && cc != old_cc) { |
---|
1330 | #ifdef SMP |
---|
1331 | CC_UNLOCK(cc); |
---|
1332 | sleepq_release(&cc_exec_waiting(old_cc, direct)); |
---|
1333 | sq_locked = 0; |
---|
1334 | old_cc = NULL; |
---|
1335 | goto again; |
---|
1336 | #else |
---|
1337 | panic("migration should not happen"); |
---|
1338 | #endif |
---|
1339 | } |
---|
1340 | #endif /* __rtems__ */ |
---|
1341 | |
---|
1342 | /* |
---|
1343 | * If the callout is running, try to stop it or drain it. |
---|
1344 | */ |
---|
1345 | if (cc_exec_curr(cc, direct) == c) { |
---|
1346 | /* |
---|
1347 | * Succeed we to stop it or not, we must clear the |
---|
1348 | * active flag - this is what API users expect. If we're |
---|
1349 | * draining and the callout is currently executing, first wait |
---|
1350 | * until it finishes. |
---|
1351 | */ |
---|
1352 | if ((flags & CS_DRAIN) == 0) |
---|
1353 | c->c_flags &= ~CALLOUT_ACTIVE; |
---|
1354 | |
---|
1355 | if ((flags & CS_DRAIN) != 0) { |
---|
1356 | /* |
---|
1357 | * The current callout is running (or just |
---|
1358 | * about to run) and blocking is allowed, so |
---|
1359 | * just wait for the current invocation to |
---|
1360 | * finish. |
---|
1361 | */ |
---|
1362 | while (cc_exec_curr(cc, direct) == c) { |
---|
1363 | #ifndef __rtems__ |
---|
1364 | |
---|
1365 | /* |
---|
1366 | * Use direct calls to sleepqueue interface |
---|
1367 | * instead of cv/msleep in order to avoid |
---|
1368 | * a LOR between cc_lock and sleepqueue |
---|
1369 | * chain spinlocks. This piece of code |
---|
1370 | * emulates a msleep_spin() call actually. |
---|
1371 | * |
---|
1372 | * If we already have the sleepqueue chain |
---|
1373 | * locked, then we can safely block. If we |
---|
1374 | * don't already have it locked, however, |
---|
1375 | * we have to drop the cc_lock to lock |
---|
1376 | * it. This opens several races, so we |
---|
1377 | * restart at the beginning once we have |
---|
1378 | * both locks. If nothing has changed, then |
---|
1379 | * we will end up back here with sq_locked |
---|
1380 | * set. |
---|
1381 | */ |
---|
1382 | if (!sq_locked) { |
---|
1383 | CC_UNLOCK(cc); |
---|
1384 | sleepq_lock( |
---|
1385 | &cc_exec_waiting(cc, direct)); |
---|
1386 | sq_locked = 1; |
---|
1387 | old_cc = cc; |
---|
1388 | goto again; |
---|
1389 | } |
---|
1390 | |
---|
1391 | /* |
---|
1392 | * Migration could be cancelled here, but |
---|
1393 | * as long as it is still not sure when it |
---|
1394 | * will be packed up, just let softclock() |
---|
1395 | * take care of it. |
---|
1396 | */ |
---|
1397 | cc_exec_waiting(cc, direct) = true; |
---|
1398 | DROP_GIANT(); |
---|
1399 | CC_UNLOCK(cc); |
---|
1400 | sleepq_add( |
---|
1401 | &cc_exec_waiting(cc, direct), |
---|
1402 | &cc->cc_lock.lock_object, "codrain", |
---|
1403 | SLEEPQ_SLEEP, 0); |
---|
1404 | sleepq_wait( |
---|
1405 | &cc_exec_waiting(cc, direct), |
---|
1406 | 0); |
---|
1407 | sq_locked = 0; |
---|
1408 | old_cc = NULL; |
---|
1409 | |
---|
1410 | /* Reacquire locks previously released. */ |
---|
1411 | PICKUP_GIANT(); |
---|
1412 | CC_LOCK(cc); |
---|
1413 | #else /* __rtems__ */ |
---|
1414 | /* |
---|
1415 | * On RTEMS the LOR problem above does not |
---|
1416 | * exist since here we do not use |
---|
1417 | * sleepq_set_timeout() and instead use the |
---|
1418 | * RTEMS watchdog. |
---|
1419 | */ |
---|
1420 | cc_exec_waiting(cc, direct) = true; |
---|
1421 | msleep_spin(&cc_exec_waiting(cc, direct), |
---|
1422 | &cc->cc_lock, "codrain", 0); |
---|
1423 | #endif /* __rtems__ */ |
---|
1424 | } |
---|
1425 | c->c_flags &= ~CALLOUT_ACTIVE; |
---|
1426 | } else if (use_lock && |
---|
1427 | !cc_exec_cancel(cc, direct) && (drain == NULL)) { |
---|
1428 | |
---|
1429 | /* |
---|
1430 | * The current callout is waiting for its |
---|
1431 | * lock which we hold. Cancel the callout |
---|
1432 | * and return. After our caller drops the |
---|
1433 | * lock, the callout will be skipped in |
---|
1434 | * softclock(). This *only* works with a |
---|
1435 | * callout_stop() *not* callout_drain() or |
---|
1436 | * callout_async_drain(). |
---|
1437 | */ |
---|
1438 | cc_exec_cancel(cc, direct) = true; |
---|
1439 | CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", |
---|
1440 | c, c->c_func, c->c_arg); |
---|
1441 | KASSERT(!cc_cce_migrating(cc, direct), |
---|
1442 | ("callout wrongly scheduled for migration")); |
---|
1443 | if (callout_migrating(c)) { |
---|
1444 | c->c_iflags &= ~CALLOUT_DFRMIGRATION; |
---|
1445 | #ifdef SMP |
---|
1446 | cc_migration_cpu(cc, direct) = CPUBLOCK; |
---|
1447 | cc_migration_time(cc, direct) = 0; |
---|
1448 | cc_migration_prec(cc, direct) = 0; |
---|
1449 | cc_migration_func(cc, direct) = NULL; |
---|
1450 | cc_migration_arg(cc, direct) = NULL; |
---|
1451 | #endif |
---|
1452 | } |
---|
1453 | CC_UNLOCK(cc); |
---|
1454 | #ifndef __rtems__ |
---|
1455 | KASSERT(!sq_locked, ("sleepqueue chain locked")); |
---|
1456 | #endif /* __rtems__ */ |
---|
1457 | return (1); |
---|
1458 | } else if (callout_migrating(c)) { |
---|
1459 | /* |
---|
1460 | * The callout is currently being serviced |
---|
1461 | * and the "next" callout is scheduled at |
---|
1462 | * its completion with a migration. We remove |
---|
1463 | * the migration flag so it *won't* get rescheduled, |
---|
1464 | * but we can't stop the one thats running so |
---|
1465 | * we return 0. |
---|
1466 | */ |
---|
1467 | c->c_iflags &= ~CALLOUT_DFRMIGRATION; |
---|
1468 | #ifdef SMP |
---|
1469 | /* |
---|
1470 | * We can't call cc_cce_cleanup here since |
---|
1471 | * if we do it will remove .ce_curr and |
---|
1472 | * its still running. This will prevent a |
---|
1473 | * reschedule of the callout when the |
---|
1474 | * execution completes. |
---|
1475 | */ |
---|
1476 | cc_migration_cpu(cc, direct) = CPUBLOCK; |
---|
1477 | cc_migration_time(cc, direct) = 0; |
---|
1478 | cc_migration_prec(cc, direct) = 0; |
---|
1479 | cc_migration_func(cc, direct) = NULL; |
---|
1480 | cc_migration_arg(cc, direct) = NULL; |
---|
1481 | #endif |
---|
1482 | CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", |
---|
1483 | c, c->c_func, c->c_arg); |
---|
1484 | if (drain) { |
---|
1485 | cc_exec_drain(cc, direct) = drain; |
---|
1486 | } |
---|
1487 | CC_UNLOCK(cc); |
---|
1488 | return ((flags & CS_EXECUTING) != 0); |
---|
1489 | } |
---|
1490 | CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", |
---|
1491 | c, c->c_func, c->c_arg); |
---|
1492 | if (drain) { |
---|
1493 | cc_exec_drain(cc, direct) = drain; |
---|
1494 | } |
---|
1495 | #ifndef __rtems__ |
---|
1496 | KASSERT(!sq_locked, ("sleepqueue chain still locked")); |
---|
1497 | #endif /* __rtems__ */ |
---|
1498 | cancelled = ((flags & CS_EXECUTING) != 0); |
---|
1499 | } else |
---|
1500 | cancelled = 1; |
---|
1501 | |
---|
1502 | #ifndef __rtems__ |
---|
1503 | if (sq_locked) |
---|
1504 | sleepq_release(&cc_exec_waiting(cc, direct)); |
---|
1505 | #endif /* __rtems__ */ |
---|
1506 | |
---|
1507 | if ((c->c_iflags & CALLOUT_PENDING) == 0) { |
---|
1508 | CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", |
---|
1509 | c, c->c_func, c->c_arg); |
---|
1510 | /* |
---|
1511 | * For not scheduled and not executing callout return |
---|
1512 | * negative value. |
---|
1513 | */ |
---|
1514 | if (cc_exec_curr(cc, direct) != c) |
---|
1515 | cancelled = -1; |
---|
1516 | CC_UNLOCK(cc); |
---|
1517 | return (cancelled); |
---|
1518 | } |
---|
1519 | |
---|
1520 | c->c_iflags &= ~CALLOUT_PENDING; |
---|
1521 | c->c_flags &= ~CALLOUT_ACTIVE; |
---|
1522 | |
---|
1523 | CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", |
---|
1524 | c, c->c_func, c->c_arg); |
---|
1525 | if (not_on_a_list == 0) { |
---|
1526 | #ifndef __rtems__ |
---|
1527 | if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { |
---|
1528 | #endif /* __rtems__ */ |
---|
1529 | if (cc_exec_next(cc) == c) |
---|
1530 | cc_exec_next(cc) = LIST_NEXT(c, c_links.le); |
---|
1531 | LIST_REMOVE(c, c_links.le); |
---|
1532 | #ifndef __rtems__ |
---|
1533 | } else { |
---|
1534 | TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); |
---|
1535 | } |
---|
1536 | #endif /* __rtems__ */ |
---|
1537 | } |
---|
1538 | callout_cc_del(c, cc); |
---|
1539 | CC_UNLOCK(cc); |
---|
1540 | return (cancelled); |
---|
1541 | } |
---|
1542 | |
---|
1543 | void |
---|
1544 | callout_init(struct callout *c, int mpsafe) |
---|
1545 | { |
---|
1546 | bzero(c, sizeof *c); |
---|
1547 | if (mpsafe) { |
---|
1548 | c->c_lock = NULL; |
---|
1549 | c->c_iflags = CALLOUT_RETURNUNLOCKED; |
---|
1550 | } else { |
---|
1551 | c->c_lock = &Giant.lock_object; |
---|
1552 | c->c_iflags = 0; |
---|
1553 | } |
---|
1554 | c->c_cpu = timeout_cpu; |
---|
1555 | } |
---|
1556 | |
---|
1557 | void |
---|
1558 | _callout_init_lock(struct callout *c, struct lock_object *lock, int flags) |
---|
1559 | { |
---|
1560 | bzero(c, sizeof *c); |
---|
1561 | c->c_lock = lock; |
---|
1562 | KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, |
---|
1563 | ("callout_init_lock: bad flags %d", flags)); |
---|
1564 | KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, |
---|
1565 | ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); |
---|
1566 | KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & |
---|
1567 | (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", |
---|
1568 | __func__)); |
---|
1569 | c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); |
---|
1570 | c->c_cpu = timeout_cpu; |
---|
1571 | } |
---|
1572 | |
---|
1573 | #ifdef APM_FIXUP_CALLTODO |
---|
1574 | /* |
---|
1575 | * Adjust the kernel calltodo timeout list. This routine is used after |
---|
1576 | * an APM resume to recalculate the calltodo timer list values with the |
---|
1577 | * number of hz's we have been sleeping. The next hardclock() will detect |
---|
1578 | * that there are fired timers and run softclock() to execute them. |
---|
1579 | * |
---|
1580 | * Please note, I have not done an exhaustive analysis of what code this |
---|
1581 | * might break. I am motivated to have my select()'s and alarm()'s that |
---|
1582 | * have expired during suspend firing upon resume so that the applications |
---|
1583 | * which set the timer can do the maintanence the timer was for as close |
---|
1584 | * as possible to the originally intended time. Testing this code for a |
---|
1585 | * week showed that resuming from a suspend resulted in 22 to 25 timers |
---|
1586 | * firing, which seemed independent on whether the suspend was 2 hours or |
---|
1587 | * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> |
---|
1588 | */ |
---|
1589 | void |
---|
1590 | adjust_timeout_calltodo(struct timeval *time_change) |
---|
1591 | { |
---|
1592 | register struct callout *p; |
---|
1593 | unsigned long delta_ticks; |
---|
1594 | |
---|
1595 | /* |
---|
1596 | * How many ticks were we asleep? |
---|
1597 | * (stolen from tvtohz()). |
---|
1598 | */ |
---|
1599 | |
---|
1600 | /* Don't do anything */ |
---|
1601 | if (time_change->tv_sec < 0) |
---|
1602 | return; |
---|
1603 | else if (time_change->tv_sec <= LONG_MAX / 1000000) |
---|
1604 | delta_ticks = howmany(time_change->tv_sec * 1000000 + |
---|
1605 | time_change->tv_usec, tick) + 1; |
---|
1606 | else if (time_change->tv_sec <= LONG_MAX / hz) |
---|
1607 | delta_ticks = time_change->tv_sec * hz + |
---|
1608 | howmany(time_change->tv_usec, tick) + 1; |
---|
1609 | else |
---|
1610 | delta_ticks = LONG_MAX; |
---|
1611 | |
---|
1612 | if (delta_ticks > INT_MAX) |
---|
1613 | delta_ticks = INT_MAX; |
---|
1614 | |
---|
1615 | /* |
---|
1616 | * Now rip through the timer calltodo list looking for timers |
---|
1617 | * to expire. |
---|
1618 | */ |
---|
1619 | |
---|
1620 | /* don't collide with softclock() */ |
---|
1621 | CC_LOCK(cc); |
---|
1622 | for (p = calltodo.c_next; p != NULL; p = p->c_next) { |
---|
1623 | p->c_time -= delta_ticks; |
---|
1624 | |
---|
1625 | /* Break if the timer had more time on it than delta_ticks */ |
---|
1626 | if (p->c_time > 0) |
---|
1627 | break; |
---|
1628 | |
---|
1629 | /* take back the ticks the timer didn't use (p->c_time <= 0) */ |
---|
1630 | delta_ticks = -p->c_time; |
---|
1631 | } |
---|
1632 | CC_UNLOCK(cc); |
---|
1633 | |
---|
1634 | return; |
---|
1635 | } |
---|
1636 | #endif /* APM_FIXUP_CALLTODO */ |
---|
1637 | |
---|
1638 | static int |
---|
1639 | flssbt(sbintime_t sbt) |
---|
1640 | { |
---|
1641 | |
---|
1642 | sbt += (uint64_t)sbt >> 1; |
---|
1643 | if (sizeof(long) >= sizeof(sbintime_t)) |
---|
1644 | return (flsl(sbt)); |
---|
1645 | if (sbt >= SBT_1S) |
---|
1646 | return (flsl(((uint64_t)sbt) >> 32) + 32); |
---|
1647 | return (flsl(sbt)); |
---|
1648 | } |
---|
1649 | |
---|
1650 | /* |
---|
1651 | * Dump immediate statistic snapshot of the scheduled callouts. |
---|
1652 | */ |
---|
1653 | static int |
---|
1654 | sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) |
---|
1655 | { |
---|
1656 | struct callout *tmp; |
---|
1657 | struct callout_cpu *cc; |
---|
1658 | struct callout_list *sc; |
---|
1659 | sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; |
---|
1660 | int ct[64], cpr[64], ccpbk[32]; |
---|
1661 | int error, val, i, count, tcum, pcum, maxc, c, medc; |
---|
1662 | #ifdef SMP |
---|
1663 | int cpu; |
---|
1664 | #endif |
---|
1665 | |
---|
1666 | val = 0; |
---|
1667 | error = sysctl_handle_int(oidp, &val, 0, req); |
---|
1668 | if (error != 0 || req->newptr == NULL) |
---|
1669 | return (error); |
---|
1670 | count = maxc = 0; |
---|
1671 | st = spr = maxt = maxpr = 0; |
---|
1672 | bzero(ccpbk, sizeof(ccpbk)); |
---|
1673 | bzero(ct, sizeof(ct)); |
---|
1674 | bzero(cpr, sizeof(cpr)); |
---|
1675 | now = sbinuptime(); |
---|
1676 | #ifdef SMP |
---|
1677 | CPU_FOREACH(cpu) { |
---|
1678 | cc = CC_CPU(cpu); |
---|
1679 | #else |
---|
1680 | cc = CC_CPU(timeout_cpu); |
---|
1681 | #endif |
---|
1682 | CC_LOCK(cc); |
---|
1683 | for (i = 0; i < callwheelsize; i++) { |
---|
1684 | sc = &cc->cc_callwheel[i]; |
---|
1685 | c = 0; |
---|
1686 | LIST_FOREACH(tmp, sc, c_links.le) { |
---|
1687 | c++; |
---|
1688 | t = tmp->c_time - now; |
---|
1689 | if (t < 0) |
---|
1690 | t = 0; |
---|
1691 | st += t / SBT_1US; |
---|
1692 | spr += tmp->c_precision / SBT_1US; |
---|
1693 | if (t > maxt) |
---|
1694 | maxt = t; |
---|
1695 | if (tmp->c_precision > maxpr) |
---|
1696 | maxpr = tmp->c_precision; |
---|
1697 | ct[flssbt(t)]++; |
---|
1698 | cpr[flssbt(tmp->c_precision)]++; |
---|
1699 | } |
---|
1700 | if (c > maxc) |
---|
1701 | maxc = c; |
---|
1702 | ccpbk[fls(c + c / 2)]++; |
---|
1703 | count += c; |
---|
1704 | } |
---|
1705 | CC_UNLOCK(cc); |
---|
1706 | #ifdef SMP |
---|
1707 | } |
---|
1708 | #endif |
---|
1709 | |
---|
1710 | for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) |
---|
1711 | tcum += ct[i]; |
---|
1712 | medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; |
---|
1713 | for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) |
---|
1714 | pcum += cpr[i]; |
---|
1715 | medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; |
---|
1716 | for (i = 0, c = 0; i < 32 && c < count / 2; i++) |
---|
1717 | c += ccpbk[i]; |
---|
1718 | medc = (i >= 2) ? (1 << (i - 2)) : 0; |
---|
1719 | |
---|
1720 | printf("Scheduled callouts statistic snapshot:\n"); |
---|
1721 | printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", |
---|
1722 | count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); |
---|
1723 | printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", |
---|
1724 | medc, |
---|
1725 | count / callwheelsize / mp_ncpus, |
---|
1726 | (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, |
---|
1727 | maxc); |
---|
1728 | printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", |
---|
1729 | medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, |
---|
1730 | (st / count) / 1000000, (st / count) % 1000000, |
---|
1731 | maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); |
---|
1732 | printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", |
---|
1733 | medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, |
---|
1734 | (spr / count) / 1000000, (spr / count) % 1000000, |
---|
1735 | maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); |
---|
1736 | printf(" Distribution: \tbuckets\t time\t tcum\t" |
---|
1737 | " prec\t pcum\n"); |
---|
1738 | for (i = 0, tcum = pcum = 0; i < 64; i++) { |
---|
1739 | if (ct[i] == 0 && cpr[i] == 0) |
---|
1740 | continue; |
---|
1741 | t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; |
---|
1742 | tcum += ct[i]; |
---|
1743 | pcum += cpr[i]; |
---|
1744 | printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", |
---|
1745 | t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, |
---|
1746 | i - 1 - (32 - CC_HASH_SHIFT), |
---|
1747 | ct[i], tcum, cpr[i], pcum); |
---|
1748 | } |
---|
1749 | return (error); |
---|
1750 | } |
---|
1751 | SYSCTL_PROC(_kern, OID_AUTO, callout_stat, |
---|
1752 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, |
---|
1753 | 0, 0, sysctl_kern_callout_stat, "I", |
---|
1754 | "Dump immediate statistic snapshot of the scheduled callouts"); |
---|
1755 | |
---|
1756 | #ifdef DDB |
---|
1757 | static void |
---|
1758 | _show_callout(struct callout *c) |
---|
1759 | { |
---|
1760 | |
---|
1761 | db_printf("callout %p\n", c); |
---|
1762 | #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e); |
---|
1763 | db_printf(" &c_links = %p\n", &(c->c_links)); |
---|
1764 | C_DB_PRINTF("%" PRId64, c_time); |
---|
1765 | C_DB_PRINTF("%" PRId64, c_precision); |
---|
1766 | C_DB_PRINTF("%p", c_arg); |
---|
1767 | C_DB_PRINTF("%p", c_func); |
---|
1768 | C_DB_PRINTF("%p", c_lock); |
---|
1769 | C_DB_PRINTF("%#x", c_flags); |
---|
1770 | C_DB_PRINTF("%#x", c_iflags); |
---|
1771 | C_DB_PRINTF("%d", c_cpu); |
---|
1772 | #undef C_DB_PRINTF |
---|
1773 | } |
---|
1774 | |
---|
1775 | DB_SHOW_COMMAND(callout, db_show_callout) |
---|
1776 | { |
---|
1777 | |
---|
1778 | if (!have_addr) { |
---|
1779 | db_printf("usage: show callout <struct callout *>\n"); |
---|
1780 | return; |
---|
1781 | } |
---|
1782 | |
---|
1783 | _show_callout((struct callout *)addr); |
---|
1784 | } |
---|
1785 | #endif /* DDB */ |
---|