1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 1982, 1986, 1991, 1993 |
---|
5 | * The Regents of the University of California. All rights reserved. |
---|
6 | * (c) UNIX System Laboratories, Inc. |
---|
7 | * All or some portions of this file are derived from material licensed |
---|
8 | * to the University of California by American Telephone and Telegraph |
---|
9 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
---|
10 | * the permission of UNIX System Laboratories, Inc. |
---|
11 | * |
---|
12 | * Redistribution and use in source and binary forms, with or without |
---|
13 | * modification, are permitted provided that the following conditions |
---|
14 | * are met: |
---|
15 | * 1. Redistributions of source code must retain the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer. |
---|
17 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
18 | * notice, this list of conditions and the following disclaimer in the |
---|
19 | * documentation and/or other materials provided with the distribution. |
---|
20 | * 4. Neither the name of the University nor the names of its contributors |
---|
21 | * may be used to endorse or promote products derived from this software |
---|
22 | * without specific prior written permission. |
---|
23 | * |
---|
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
---|
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
---|
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
34 | * SUCH DAMAGE. |
---|
35 | * |
---|
36 | * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 |
---|
37 | */ |
---|
38 | |
---|
39 | #include <sys/cdefs.h> |
---|
40 | __FBSDID("$FreeBSD$"); |
---|
41 | |
---|
42 | #include <rtems/bsd/local/opt_kdtrace.h> |
---|
43 | |
---|
44 | #include <rtems/bsd/sys/param.h> |
---|
45 | #include <sys/systm.h> |
---|
46 | #include <sys/bus.h> |
---|
47 | #include <sys/callout.h> |
---|
48 | #include <sys/condvar.h> |
---|
49 | #include <sys/interrupt.h> |
---|
50 | #include <sys/kernel.h> |
---|
51 | #include <sys/ktr.h> |
---|
52 | #include <rtems/bsd/sys/lock.h> |
---|
53 | #include <sys/malloc.h> |
---|
54 | #include <sys/mutex.h> |
---|
55 | #include <sys/proc.h> |
---|
56 | #include <sys/sdt.h> |
---|
57 | #include <sys/sleepqueue.h> |
---|
58 | #include <sys/sysctl.h> |
---|
59 | #include <sys/smp.h> |
---|
60 | |
---|
61 | #ifdef SMP |
---|
62 | #include <machine/cpu.h> |
---|
63 | #endif |
---|
64 | |
---|
65 | #ifdef __rtems__ |
---|
66 | #define ncallout 16 |
---|
67 | #endif /* __rtems__ */ |
---|
68 | SDT_PROVIDER_DEFINE(callout_execute); |
---|
69 | SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); |
---|
70 | SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, |
---|
71 | "struct callout *"); |
---|
72 | SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); |
---|
73 | SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, |
---|
74 | "struct callout *"); |
---|
75 | |
---|
76 | static int avg_depth; |
---|
77 | SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, |
---|
78 | "Average number of items examined per softclock call. Units = 1/1000"); |
---|
79 | static int avg_gcalls; |
---|
80 | SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, |
---|
81 | "Average number of Giant callouts made per softclock call. Units = 1/1000"); |
---|
82 | static int avg_lockcalls; |
---|
83 | SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, |
---|
84 | "Average number of lock callouts made per softclock call. Units = 1/1000"); |
---|
85 | static int avg_mpcalls; |
---|
86 | SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, |
---|
87 | "Average number of MP callouts made per softclock call. Units = 1/1000"); |
---|
88 | /* |
---|
89 | * TODO: |
---|
90 | * allocate more timeout table slots when table overflows. |
---|
91 | */ |
---|
92 | int callwheelsize, callwheelbits, callwheelmask; |
---|
93 | |
---|
94 | /* |
---|
95 | * The callout cpu migration entity represents informations necessary for |
---|
96 | * describing the migrating callout to the new callout cpu. |
---|
97 | * The cached informations are very important for deferring migration when |
---|
98 | * the migrating callout is already running. |
---|
99 | */ |
---|
100 | struct cc_mig_ent { |
---|
101 | #ifdef SMP |
---|
102 | void (*ce_migration_func)(void *); |
---|
103 | void *ce_migration_arg; |
---|
104 | int ce_migration_cpu; |
---|
105 | int ce_migration_ticks; |
---|
106 | #endif |
---|
107 | }; |
---|
108 | |
---|
109 | /* |
---|
110 | * There is one struct callout_cpu per cpu, holding all relevant |
---|
111 | * state for the callout processing thread on the individual CPU. |
---|
112 | * In particular: |
---|
113 | * cc_ticks is incremented once per tick in callout_cpu(). |
---|
114 | * It tracks the global 'ticks' but in a way that the individual |
---|
115 | * threads should not worry about races in the order in which |
---|
116 | * hardclock() and hardclock_cpu() run on the various CPUs. |
---|
117 | * cc_softclock is advanced in callout_cpu() to point to the |
---|
118 | * first entry in cc_callwheel that may need handling. In turn, |
---|
119 | * a softclock() is scheduled so it can serve the various entries i |
---|
120 | * such that cc_softclock <= i <= cc_ticks . |
---|
121 | * XXX maybe cc_softclock and cc_ticks should be volatile ? |
---|
122 | * |
---|
123 | * cc_ticks is also used in callout_reset_cpu() to determine |
---|
124 | * when the callout should be served. |
---|
125 | */ |
---|
126 | struct callout_cpu { |
---|
127 | struct cc_mig_ent cc_migrating_entity; |
---|
128 | struct mtx cc_lock; |
---|
129 | struct callout *cc_callout; |
---|
130 | struct callout_tailq *cc_callwheel; |
---|
131 | struct callout_list cc_callfree; |
---|
132 | struct callout *cc_next; |
---|
133 | struct callout *cc_curr; |
---|
134 | void *cc_cookie; |
---|
135 | int cc_ticks; |
---|
136 | int cc_softticks; |
---|
137 | int cc_cancel; |
---|
138 | int cc_waiting; |
---|
139 | int cc_firsttick; |
---|
140 | }; |
---|
141 | |
---|
142 | #ifdef SMP |
---|
143 | #define cc_migration_func cc_migrating_entity.ce_migration_func |
---|
144 | #define cc_migration_arg cc_migrating_entity.ce_migration_arg |
---|
145 | #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu |
---|
146 | #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks |
---|
147 | |
---|
148 | struct callout_cpu cc_cpu[MAXCPU]; |
---|
149 | #define CPUBLOCK MAXCPU |
---|
150 | #define CC_CPU(cpu) (&cc_cpu[(cpu)]) |
---|
151 | #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) |
---|
152 | #else |
---|
153 | struct callout_cpu cc_cpu; |
---|
154 | #define CC_CPU(cpu) &cc_cpu |
---|
155 | #define CC_SELF() &cc_cpu |
---|
156 | #endif |
---|
157 | #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) |
---|
158 | #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) |
---|
159 | #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) |
---|
160 | |
---|
161 | static int timeout_cpu; |
---|
162 | void (*callout_new_inserted)(int cpu, int ticks) = NULL; |
---|
163 | |
---|
164 | static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); |
---|
165 | |
---|
166 | /** |
---|
167 | * Locked by cc_lock: |
---|
168 | * cc_curr - If a callout is in progress, it is curr_callout. |
---|
169 | * If curr_callout is non-NULL, threads waiting in |
---|
170 | * callout_drain() will be woken up as soon as the |
---|
171 | * relevant callout completes. |
---|
172 | * cc_cancel - Changing to 1 with both callout_lock and c_lock held |
---|
173 | * guarantees that the current callout will not run. |
---|
174 | * The softclock() function sets this to 0 before it |
---|
175 | * drops callout_lock to acquire c_lock, and it calls |
---|
176 | * the handler only if curr_cancelled is still 0 after |
---|
177 | * c_lock is successfully acquired. |
---|
178 | * cc_waiting - If a thread is waiting in callout_drain(), then |
---|
179 | * callout_wait is nonzero. Set only when |
---|
180 | * curr_callout is non-NULL. |
---|
181 | */ |
---|
182 | |
---|
183 | /* |
---|
184 | * Resets the migration entity tied to a specific callout cpu. |
---|
185 | */ |
---|
186 | static void |
---|
187 | cc_cme_cleanup(struct callout_cpu *cc) |
---|
188 | { |
---|
189 | |
---|
190 | #ifdef SMP |
---|
191 | cc->cc_migration_cpu = CPUBLOCK; |
---|
192 | cc->cc_migration_ticks = 0; |
---|
193 | cc->cc_migration_func = NULL; |
---|
194 | cc->cc_migration_arg = NULL; |
---|
195 | #endif |
---|
196 | } |
---|
197 | |
---|
198 | /* |
---|
199 | * Checks if migration is requested by a specific callout cpu. |
---|
200 | */ |
---|
201 | static int |
---|
202 | cc_cme_migrating(struct callout_cpu *cc) |
---|
203 | { |
---|
204 | |
---|
205 | #ifdef SMP |
---|
206 | return (cc->cc_migration_cpu != CPUBLOCK); |
---|
207 | #else |
---|
208 | return (0); |
---|
209 | #endif |
---|
210 | } |
---|
211 | |
---|
212 | /* |
---|
213 | * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization |
---|
214 | * |
---|
215 | * This code is called very early in the kernel initialization sequence, |
---|
216 | * and may be called more then once. |
---|
217 | */ |
---|
218 | #ifdef __rtems__ |
---|
219 | static void rtems_bsd_timeout_init_early(void *); |
---|
220 | |
---|
221 | static void callout_cpu_init(struct callout_cpu *); |
---|
222 | |
---|
223 | static void |
---|
224 | rtems_bsd_callout_timer(rtems_id id, void *arg) |
---|
225 | { |
---|
226 | rtems_status_code sc; |
---|
227 | |
---|
228 | (void) arg; |
---|
229 | |
---|
230 | sc = rtems_timer_reset(id); |
---|
231 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
232 | |
---|
233 | callout_tick(); |
---|
234 | } |
---|
235 | |
---|
236 | static void |
---|
237 | rtems_bsd_timeout_init_late(void *unused) |
---|
238 | { |
---|
239 | rtems_status_code sc; |
---|
240 | rtems_id id; |
---|
241 | |
---|
242 | (void) unused; |
---|
243 | |
---|
244 | sc = rtems_timer_create(rtems_build_name('_', 'C', 'L', 'O'), &id); |
---|
245 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
246 | |
---|
247 | sc = rtems_timer_server_fire_after(id, 1, rtems_bsd_callout_timer, NULL); |
---|
248 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
249 | } |
---|
250 | |
---|
251 | SYSINIT(rtems_bsd_timeout_early, SI_SUB_VM, SI_ORDER_FIRST, |
---|
252 | rtems_bsd_timeout_init_early, NULL); |
---|
253 | |
---|
254 | SYSINIT(rtems_bsd_timeout_late, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, |
---|
255 | rtems_bsd_timeout_init_late, NULL); |
---|
256 | |
---|
257 | static void |
---|
258 | rtems_bsd_timeout_init_early(void *unused) |
---|
259 | #else /* __rtems__ */ |
---|
260 | caddr_t |
---|
261 | kern_timeout_callwheel_alloc(caddr_t v) |
---|
262 | #endif /* __rtems__ */ |
---|
263 | { |
---|
264 | struct callout_cpu *cc; |
---|
265 | #ifdef __rtems__ |
---|
266 | caddr_t v; |
---|
267 | |
---|
268 | (void) unused; |
---|
269 | #endif /* __rtems__ */ |
---|
270 | |
---|
271 | timeout_cpu = PCPU_GET(cpuid); |
---|
272 | cc = CC_CPU(timeout_cpu); |
---|
273 | /* |
---|
274 | * Calculate callout wheel size |
---|
275 | */ |
---|
276 | for (callwheelsize = 1, callwheelbits = 0; |
---|
277 | callwheelsize < ncallout; |
---|
278 | callwheelsize <<= 1, ++callwheelbits) |
---|
279 | ; |
---|
280 | callwheelmask = callwheelsize - 1; |
---|
281 | |
---|
282 | #ifdef __rtems__ |
---|
283 | v = malloc(ncallout * sizeof(*cc->cc_callout) + callwheelsize |
---|
284 | * sizeof(*cc->cc_callwheel), M_CALLOUT, M_ZERO | M_WAITOK); |
---|
285 | #endif /* __rtems__ */ |
---|
286 | cc->cc_callout = (struct callout *)v; |
---|
287 | v = (caddr_t)(cc->cc_callout + ncallout); |
---|
288 | cc->cc_callwheel = (struct callout_tailq *)v; |
---|
289 | v = (caddr_t)(cc->cc_callwheel + callwheelsize); |
---|
290 | #ifndef __rtems__ |
---|
291 | return(v); |
---|
292 | #else /* __rtems__ */ |
---|
293 | callout_cpu_init(cc); |
---|
294 | #endif /* __rtems__ */ |
---|
295 | } |
---|
296 | |
---|
297 | static void |
---|
298 | callout_cpu_init(struct callout_cpu *cc) |
---|
299 | { |
---|
300 | struct callout *c; |
---|
301 | int i; |
---|
302 | |
---|
303 | mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); |
---|
304 | SLIST_INIT(&cc->cc_callfree); |
---|
305 | for (i = 0; i < callwheelsize; i++) { |
---|
306 | TAILQ_INIT(&cc->cc_callwheel[i]); |
---|
307 | } |
---|
308 | cc_cme_cleanup(cc); |
---|
309 | if (cc->cc_callout == NULL) |
---|
310 | return; |
---|
311 | for (i = 0; i < ncallout; i++) { |
---|
312 | c = &cc->cc_callout[i]; |
---|
313 | callout_init(c, 0); |
---|
314 | c->c_flags = CALLOUT_LOCAL_ALLOC; |
---|
315 | SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); |
---|
316 | } |
---|
317 | } |
---|
318 | |
---|
319 | #ifdef SMP |
---|
320 | /* |
---|
321 | * Switches the cpu tied to a specific callout. |
---|
322 | * The function expects a locked incoming callout cpu and returns with |
---|
323 | * locked outcoming callout cpu. |
---|
324 | */ |
---|
325 | static struct callout_cpu * |
---|
326 | callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) |
---|
327 | { |
---|
328 | struct callout_cpu *new_cc; |
---|
329 | |
---|
330 | MPASS(c != NULL && cc != NULL); |
---|
331 | CC_LOCK_ASSERT(cc); |
---|
332 | |
---|
333 | /* |
---|
334 | * Avoid interrupts and preemption firing after the callout cpu |
---|
335 | * is blocked in order to avoid deadlocks as the new thread |
---|
336 | * may be willing to acquire the callout cpu lock. |
---|
337 | */ |
---|
338 | c->c_cpu = CPUBLOCK; |
---|
339 | spinlock_enter(); |
---|
340 | CC_UNLOCK(cc); |
---|
341 | new_cc = CC_CPU(new_cpu); |
---|
342 | CC_LOCK(new_cc); |
---|
343 | spinlock_exit(); |
---|
344 | c->c_cpu = new_cpu; |
---|
345 | return (new_cc); |
---|
346 | } |
---|
347 | #endif |
---|
348 | |
---|
349 | #ifndef __rtems__ |
---|
350 | /* |
---|
351 | * kern_timeout_callwheel_init() - initialize previously reserved callwheel |
---|
352 | * space. |
---|
353 | * |
---|
354 | * This code is called just once, after the space reserved for the |
---|
355 | * callout wheel has been finalized. |
---|
356 | */ |
---|
357 | void |
---|
358 | kern_timeout_callwheel_init(void) |
---|
359 | { |
---|
360 | callout_cpu_init(CC_CPU(timeout_cpu)); |
---|
361 | } |
---|
362 | #endif /* __rtems__ */ |
---|
363 | |
---|
364 | /* |
---|
365 | * Start standard softclock thread. |
---|
366 | */ |
---|
367 | static void |
---|
368 | start_softclock(void *dummy) |
---|
369 | { |
---|
370 | struct callout_cpu *cc; |
---|
371 | #ifdef SMP |
---|
372 | int cpu; |
---|
373 | #endif |
---|
374 | |
---|
375 | cc = CC_CPU(timeout_cpu); |
---|
376 | if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, |
---|
377 | INTR_MPSAFE, &cc->cc_cookie)) |
---|
378 | panic("died while creating standard software ithreads"); |
---|
379 | #ifdef SMP |
---|
380 | CPU_FOREACH(cpu) { |
---|
381 | if (cpu == timeout_cpu) |
---|
382 | continue; |
---|
383 | cc = CC_CPU(cpu); |
---|
384 | if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, |
---|
385 | INTR_MPSAFE, &cc->cc_cookie)) |
---|
386 | panic("died while creating standard software ithreads"); |
---|
387 | cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ |
---|
388 | cc->cc_callwheel = malloc( |
---|
389 | sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, |
---|
390 | M_WAITOK); |
---|
391 | callout_cpu_init(cc); |
---|
392 | } |
---|
393 | #endif |
---|
394 | } |
---|
395 | |
---|
396 | SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); |
---|
397 | |
---|
398 | void |
---|
399 | callout_tick(void) |
---|
400 | { |
---|
401 | struct callout_cpu *cc; |
---|
402 | int need_softclock; |
---|
403 | int bucket; |
---|
404 | |
---|
405 | /* |
---|
406 | * Process callouts at a very low cpu priority, so we don't keep the |
---|
407 | * relatively high clock interrupt priority any longer than necessary. |
---|
408 | */ |
---|
409 | need_softclock = 0; |
---|
410 | cc = CC_SELF(); |
---|
411 | mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
412 | cc->cc_firsttick = cc->cc_ticks = ticks; |
---|
413 | for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { |
---|
414 | bucket = cc->cc_softticks & callwheelmask; |
---|
415 | if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { |
---|
416 | need_softclock = 1; |
---|
417 | break; |
---|
418 | } |
---|
419 | } |
---|
420 | mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
421 | /* |
---|
422 | * swi_sched acquires the thread lock, so we don't want to call it |
---|
423 | * with cc_lock held; incorrect locking order. |
---|
424 | */ |
---|
425 | if (need_softclock) |
---|
426 | swi_sched(cc->cc_cookie, 0); |
---|
427 | } |
---|
428 | |
---|
429 | int |
---|
430 | callout_tickstofirst(int limit) |
---|
431 | { |
---|
432 | struct callout_cpu *cc; |
---|
433 | struct callout *c; |
---|
434 | struct callout_tailq *sc; |
---|
435 | int curticks; |
---|
436 | int skip = 1; |
---|
437 | |
---|
438 | cc = CC_SELF(); |
---|
439 | mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
440 | curticks = cc->cc_ticks; |
---|
441 | while( skip < ncallout && skip < limit ) { |
---|
442 | sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; |
---|
443 | /* search scanning ticks */ |
---|
444 | TAILQ_FOREACH( c, sc, c_links.tqe ){ |
---|
445 | if (c->c_time - curticks <= ncallout) |
---|
446 | goto out; |
---|
447 | } |
---|
448 | skip++; |
---|
449 | } |
---|
450 | out: |
---|
451 | cc->cc_firsttick = curticks + skip; |
---|
452 | mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); |
---|
453 | return (skip); |
---|
454 | } |
---|
455 | |
---|
456 | static struct callout_cpu * |
---|
457 | callout_lock(struct callout *c) |
---|
458 | { |
---|
459 | struct callout_cpu *cc; |
---|
460 | int cpu; |
---|
461 | |
---|
462 | for (;;) { |
---|
463 | cpu = c->c_cpu; |
---|
464 | #ifdef SMP |
---|
465 | if (cpu == CPUBLOCK) { |
---|
466 | while (c->c_cpu == CPUBLOCK) |
---|
467 | cpu_spinwait(); |
---|
468 | continue; |
---|
469 | } |
---|
470 | #endif |
---|
471 | cc = CC_CPU(cpu); |
---|
472 | CC_LOCK(cc); |
---|
473 | if (cpu == c->c_cpu) |
---|
474 | break; |
---|
475 | CC_UNLOCK(cc); |
---|
476 | } |
---|
477 | return (cc); |
---|
478 | } |
---|
479 | |
---|
480 | static void |
---|
481 | callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, |
---|
482 | void (*func)(void *), void *arg, int cpu) |
---|
483 | { |
---|
484 | |
---|
485 | CC_LOCK_ASSERT(cc); |
---|
486 | |
---|
487 | if (to_ticks <= 0) |
---|
488 | to_ticks = 1; |
---|
489 | c->c_arg = arg; |
---|
490 | c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); |
---|
491 | c->c_func = func; |
---|
492 | c->c_time = ticks + to_ticks; |
---|
493 | TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], |
---|
494 | c, c_links.tqe); |
---|
495 | if ((c->c_time - cc->cc_firsttick) < 0 && |
---|
496 | callout_new_inserted != NULL) { |
---|
497 | cc->cc_firsttick = c->c_time; |
---|
498 | (*callout_new_inserted)(cpu, |
---|
499 | to_ticks + (ticks - cc->cc_ticks)); |
---|
500 | } |
---|
501 | } |
---|
502 | |
---|
503 | static void |
---|
504 | callout_cc_del(struct callout *c, struct callout_cpu *cc) |
---|
505 | { |
---|
506 | |
---|
507 | if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) |
---|
508 | return; |
---|
509 | c->c_func = NULL; |
---|
510 | SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); |
---|
511 | } |
---|
512 | |
---|
513 | static void |
---|
514 | softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, |
---|
515 | int *lockcalls, int *gcalls) |
---|
516 | { |
---|
517 | void (*c_func)(void *); |
---|
518 | void *c_arg; |
---|
519 | struct lock_class *class; |
---|
520 | struct lock_object *c_lock; |
---|
521 | int c_flags, sharedlock; |
---|
522 | #ifdef SMP |
---|
523 | struct callout_cpu *new_cc; |
---|
524 | void (*new_func)(void *); |
---|
525 | void *new_arg; |
---|
526 | int new_cpu, new_ticks; |
---|
527 | #endif |
---|
528 | #ifdef DIAGNOSTIC |
---|
529 | struct bintime bt1, bt2; |
---|
530 | struct timespec ts2; |
---|
531 | static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ |
---|
532 | static timeout_t *lastfunc; |
---|
533 | #endif |
---|
534 | |
---|
535 | KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == |
---|
536 | (CALLOUT_PENDING | CALLOUT_ACTIVE), |
---|
537 | ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); |
---|
538 | class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; |
---|
539 | sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; |
---|
540 | c_lock = c->c_lock; |
---|
541 | c_func = c->c_func; |
---|
542 | c_arg = c->c_arg; |
---|
543 | c_flags = c->c_flags; |
---|
544 | if (c->c_flags & CALLOUT_LOCAL_ALLOC) |
---|
545 | c->c_flags = CALLOUT_LOCAL_ALLOC; |
---|
546 | else |
---|
547 | c->c_flags &= ~CALLOUT_PENDING; |
---|
548 | cc->cc_curr = c; |
---|
549 | cc->cc_cancel = 0; |
---|
550 | CC_UNLOCK(cc); |
---|
551 | if (c_lock != NULL) { |
---|
552 | class->lc_lock(c_lock, sharedlock); |
---|
553 | /* |
---|
554 | * The callout may have been cancelled |
---|
555 | * while we switched locks. |
---|
556 | */ |
---|
557 | if (cc->cc_cancel) { |
---|
558 | class->lc_unlock(c_lock); |
---|
559 | goto skip; |
---|
560 | } |
---|
561 | /* The callout cannot be stopped now. */ |
---|
562 | cc->cc_cancel = 1; |
---|
563 | |
---|
564 | if (c_lock == &Giant.lock_object) { |
---|
565 | (*gcalls)++; |
---|
566 | CTR3(KTR_CALLOUT, "callout %p func %p arg %p", |
---|
567 | c, c_func, c_arg); |
---|
568 | } else { |
---|
569 | (*lockcalls)++; |
---|
570 | CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", |
---|
571 | c, c_func, c_arg); |
---|
572 | } |
---|
573 | } else { |
---|
574 | (*mpcalls)++; |
---|
575 | CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", |
---|
576 | c, c_func, c_arg); |
---|
577 | } |
---|
578 | #ifdef DIAGNOSTIC |
---|
579 | binuptime(&bt1); |
---|
580 | #endif |
---|
581 | #ifndef __rtems__ |
---|
582 | THREAD_NO_SLEEPING(); |
---|
583 | SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); |
---|
584 | #endif /* __rtems__ */ |
---|
585 | c_func(c_arg); |
---|
586 | #ifndef __rtems__ |
---|
587 | SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); |
---|
588 | THREAD_SLEEPING_OK(); |
---|
589 | #endif /* __rtems__ */ |
---|
590 | #ifdef DIAGNOSTIC |
---|
591 | binuptime(&bt2); |
---|
592 | bintime_sub(&bt2, &bt1); |
---|
593 | if (bt2.frac > maxdt) { |
---|
594 | if (lastfunc != c_func || bt2.frac > maxdt * 2) { |
---|
595 | bintime2timespec(&bt2, &ts2); |
---|
596 | printf( |
---|
597 | "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", |
---|
598 | c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); |
---|
599 | } |
---|
600 | maxdt = bt2.frac; |
---|
601 | lastfunc = c_func; |
---|
602 | } |
---|
603 | #endif |
---|
604 | CTR1(KTR_CALLOUT, "callout %p finished", c); |
---|
605 | if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) |
---|
606 | class->lc_unlock(c_lock); |
---|
607 | skip: |
---|
608 | CC_LOCK(cc); |
---|
609 | KASSERT(cc->cc_curr == c, ("mishandled cc_curr")); |
---|
610 | cc->cc_curr = NULL; |
---|
611 | if (cc->cc_waiting) { |
---|
612 | /* |
---|
613 | * There is someone waiting for the |
---|
614 | * callout to complete. |
---|
615 | * If the callout was scheduled for |
---|
616 | * migration just cancel it. |
---|
617 | */ |
---|
618 | if (cc_cme_migrating(cc)) { |
---|
619 | cc_cme_cleanup(cc); |
---|
620 | |
---|
621 | /* |
---|
622 | * It should be assert here that the callout is not |
---|
623 | * destroyed but that is not easy. |
---|
624 | */ |
---|
625 | c->c_flags &= ~CALLOUT_DFRMIGRATION; |
---|
626 | } |
---|
627 | cc->cc_waiting = 0; |
---|
628 | CC_UNLOCK(cc); |
---|
629 | wakeup(&cc->cc_waiting); |
---|
630 | CC_LOCK(cc); |
---|
631 | } else if (cc_cme_migrating(cc)) { |
---|
632 | KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, |
---|
633 | ("Migrating legacy callout %p", c)); |
---|
634 | #ifdef SMP |
---|
635 | /* |
---|
636 | * If the callout was scheduled for |
---|
637 | * migration just perform it now. |
---|
638 | */ |
---|
639 | new_cpu = cc->cc_migration_cpu; |
---|
640 | new_ticks = cc->cc_migration_ticks; |
---|
641 | new_func = cc->cc_migration_func; |
---|
642 | new_arg = cc->cc_migration_arg; |
---|
643 | cc_cme_cleanup(cc); |
---|
644 | |
---|
645 | /* |
---|
646 | * It should be assert here that the callout is not destroyed |
---|
647 | * but that is not easy. |
---|
648 | * |
---|
649 | * As first thing, handle deferred callout stops. |
---|
650 | */ |
---|
651 | if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { |
---|
652 | CTR3(KTR_CALLOUT, |
---|
653 | "deferred cancelled %p func %p arg %p", |
---|
654 | c, new_func, new_arg); |
---|
655 | callout_cc_del(c, cc); |
---|
656 | return; |
---|
657 | } |
---|
658 | c->c_flags &= ~CALLOUT_DFRMIGRATION; |
---|
659 | |
---|
660 | new_cc = callout_cpu_switch(c, cc, new_cpu); |
---|
661 | callout_cc_add(c, new_cc, new_ticks, new_func, new_arg, |
---|
662 | new_cpu); |
---|
663 | CC_UNLOCK(new_cc); |
---|
664 | CC_LOCK(cc); |
---|
665 | #else |
---|
666 | panic("migration should not happen"); |
---|
667 | #endif |
---|
668 | } |
---|
669 | /* |
---|
670 | * If the current callout is locally allocated (from |
---|
671 | * timeout(9)) then put it on the freelist. |
---|
672 | * |
---|
673 | * Note: we need to check the cached copy of c_flags because |
---|
674 | * if it was not local, then it's not safe to deref the |
---|
675 | * callout pointer. |
---|
676 | */ |
---|
677 | KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || |
---|
678 | c->c_flags == CALLOUT_LOCAL_ALLOC, |
---|
679 | ("corrupted callout")); |
---|
680 | if (c_flags & CALLOUT_LOCAL_ALLOC) |
---|
681 | callout_cc_del(c, cc); |
---|
682 | } |
---|
683 | |
---|
684 | /* |
---|
685 | * The callout mechanism is based on the work of Adam M. Costello and |
---|
686 | * George Varghese, published in a technical report entitled "Redesigning |
---|
687 | * the BSD Callout and Timer Facilities" and modified slightly for inclusion |
---|
688 | * in FreeBSD by Justin T. Gibbs. The original work on the data structures |
---|
689 | * used in this implementation was published by G. Varghese and T. Lauck in |
---|
690 | * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for |
---|
691 | * the Efficient Implementation of a Timer Facility" in the Proceedings of |
---|
692 | * the 11th ACM Annual Symposium on Operating Systems Principles, |
---|
693 | * Austin, Texas Nov 1987. |
---|
694 | */ |
---|
695 | |
---|
696 | /* |
---|
697 | * Software (low priority) clock interrupt. |
---|
698 | * Run periodic events from timeout queue. |
---|
699 | */ |
---|
700 | void |
---|
701 | softclock(void *arg) |
---|
702 | { |
---|
703 | struct callout_cpu *cc; |
---|
704 | struct callout *c; |
---|
705 | struct callout_tailq *bucket; |
---|
706 | int curticks; |
---|
707 | int steps; /* #steps since we last allowed interrupts */ |
---|
708 | int depth; |
---|
709 | int mpcalls; |
---|
710 | int lockcalls; |
---|
711 | int gcalls; |
---|
712 | |
---|
713 | #ifndef MAX_SOFTCLOCK_STEPS |
---|
714 | #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ |
---|
715 | #endif /* MAX_SOFTCLOCK_STEPS */ |
---|
716 | |
---|
717 | mpcalls = 0; |
---|
718 | lockcalls = 0; |
---|
719 | gcalls = 0; |
---|
720 | depth = 0; |
---|
721 | steps = 0; |
---|
722 | cc = (struct callout_cpu *)arg; |
---|
723 | CC_LOCK(cc); |
---|
724 | while (cc->cc_softticks - 1 != cc->cc_ticks) { |
---|
725 | /* |
---|
726 | * cc_softticks may be modified by hard clock, so cache |
---|
727 | * it while we work on a given bucket. |
---|
728 | */ |
---|
729 | curticks = cc->cc_softticks; |
---|
730 | cc->cc_softticks++; |
---|
731 | bucket = &cc->cc_callwheel[curticks & callwheelmask]; |
---|
732 | c = TAILQ_FIRST(bucket); |
---|
733 | while (c != NULL) { |
---|
734 | depth++; |
---|
735 | if (c->c_time != curticks) { |
---|
736 | c = TAILQ_NEXT(c, c_links.tqe); |
---|
737 | ++steps; |
---|
738 | if (steps >= MAX_SOFTCLOCK_STEPS) { |
---|
739 | cc->cc_next = c; |
---|
740 | /* Give interrupts a chance. */ |
---|
741 | CC_UNLOCK(cc); |
---|
742 | ; /* nothing */ |
---|
743 | CC_LOCK(cc); |
---|
744 | c = cc->cc_next; |
---|
745 | steps = 0; |
---|
746 | } |
---|
747 | } else { |
---|
748 | cc->cc_next = TAILQ_NEXT(c, c_links.tqe); |
---|
749 | TAILQ_REMOVE(bucket, c, c_links.tqe); |
---|
750 | softclock_call_cc(c, cc, &mpcalls, |
---|
751 | &lockcalls, &gcalls); |
---|
752 | steps = 0; |
---|
753 | c = cc->cc_next; |
---|
754 | } |
---|
755 | } |
---|
756 | } |
---|
757 | avg_depth += (depth * 1000 - avg_depth) >> 8; |
---|
758 | avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; |
---|
759 | avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; |
---|
760 | avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; |
---|
761 | cc->cc_next = NULL; |
---|
762 | CC_UNLOCK(cc); |
---|
763 | } |
---|
764 | |
---|
765 | /* |
---|
766 | * timeout -- |
---|
767 | * Execute a function after a specified length of time. |
---|
768 | * |
---|
769 | * untimeout -- |
---|
770 | * Cancel previous timeout function call. |
---|
771 | * |
---|
772 | * callout_handle_init -- |
---|
773 | * Initialize a handle so that using it with untimeout is benign. |
---|
774 | * |
---|
775 | * See AT&T BCI Driver Reference Manual for specification. This |
---|
776 | * implementation differs from that one in that although an |
---|
777 | * identification value is returned from timeout, the original |
---|
778 | * arguments to timeout as well as the identifier are used to |
---|
779 | * identify entries for untimeout. |
---|
780 | */ |
---|
781 | struct callout_handle |
---|
782 | timeout(ftn, arg, to_ticks) |
---|
783 | timeout_t *ftn; |
---|
784 | void *arg; |
---|
785 | int to_ticks; |
---|
786 | { |
---|
787 | struct callout_cpu *cc; |
---|
788 | struct callout *new; |
---|
789 | struct callout_handle handle; |
---|
790 | |
---|
791 | cc = CC_CPU(timeout_cpu); |
---|
792 | CC_LOCK(cc); |
---|
793 | /* Fill in the next free callout structure. */ |
---|
794 | new = SLIST_FIRST(&cc->cc_callfree); |
---|
795 | if (new == NULL) |
---|
796 | /* XXX Attempt to malloc first */ |
---|
797 | panic("timeout table full"); |
---|
798 | SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); |
---|
799 | callout_reset(new, to_ticks, ftn, arg); |
---|
800 | handle.callout = new; |
---|
801 | CC_UNLOCK(cc); |
---|
802 | |
---|
803 | return (handle); |
---|
804 | } |
---|
805 | |
---|
806 | void |
---|
807 | untimeout(ftn, arg, handle) |
---|
808 | timeout_t *ftn; |
---|
809 | void *arg; |
---|
810 | struct callout_handle handle; |
---|
811 | { |
---|
812 | struct callout_cpu *cc; |
---|
813 | |
---|
814 | /* |
---|
815 | * Check for a handle that was initialized |
---|
816 | * by callout_handle_init, but never used |
---|
817 | * for a real timeout. |
---|
818 | */ |
---|
819 | if (handle.callout == NULL) |
---|
820 | return; |
---|
821 | |
---|
822 | cc = callout_lock(handle.callout); |
---|
823 | if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) |
---|
824 | callout_stop(handle.callout); |
---|
825 | CC_UNLOCK(cc); |
---|
826 | } |
---|
827 | |
---|
828 | void |
---|
829 | callout_handle_init(struct callout_handle *handle) |
---|
830 | { |
---|
831 | handle->callout = NULL; |
---|
832 | } |
---|
833 | |
---|
834 | /* |
---|
835 | * New interface; clients allocate their own callout structures. |
---|
836 | * |
---|
837 | * callout_reset() - establish or change a timeout |
---|
838 | * callout_stop() - disestablish a timeout |
---|
839 | * callout_init() - initialize a callout structure so that it can |
---|
840 | * safely be passed to callout_reset() and callout_stop() |
---|
841 | * |
---|
842 | * <sys/callout.h> defines three convenience macros: |
---|
843 | * |
---|
844 | * callout_active() - returns truth if callout has not been stopped, |
---|
845 | * drained, or deactivated since the last time the callout was |
---|
846 | * reset. |
---|
847 | * callout_pending() - returns truth if callout is still waiting for timeout |
---|
848 | * callout_deactivate() - marks the callout as having been serviced |
---|
849 | */ |
---|
850 | int |
---|
851 | callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), |
---|
852 | void *arg, int cpu) |
---|
853 | { |
---|
854 | struct callout_cpu *cc; |
---|
855 | int cancelled = 0; |
---|
856 | |
---|
857 | /* |
---|
858 | * Don't allow migration of pre-allocated callouts lest they |
---|
859 | * become unbalanced. |
---|
860 | */ |
---|
861 | if (c->c_flags & CALLOUT_LOCAL_ALLOC) |
---|
862 | cpu = c->c_cpu; |
---|
863 | cc = callout_lock(c); |
---|
864 | if (cc->cc_curr == c) { |
---|
865 | /* |
---|
866 | * We're being asked to reschedule a callout which is |
---|
867 | * currently in progress. If there is a lock then we |
---|
868 | * can cancel the callout if it has not really started. |
---|
869 | */ |
---|
870 | if (c->c_lock != NULL && !cc->cc_cancel) |
---|
871 | cancelled = cc->cc_cancel = 1; |
---|
872 | if (cc->cc_waiting) { |
---|
873 | /* |
---|
874 | * Someone has called callout_drain to kill this |
---|
875 | * callout. Don't reschedule. |
---|
876 | */ |
---|
877 | CTR4(KTR_CALLOUT, "%s %p func %p arg %p", |
---|
878 | cancelled ? "cancelled" : "failed to cancel", |
---|
879 | c, c->c_func, c->c_arg); |
---|
880 | CC_UNLOCK(cc); |
---|
881 | return (cancelled); |
---|
882 | } |
---|
883 | } |
---|
884 | if (c->c_flags & CALLOUT_PENDING) { |
---|
885 | if (cc->cc_next == c) { |
---|
886 | cc->cc_next = TAILQ_NEXT(c, c_links.tqe); |
---|
887 | } |
---|
888 | TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, |
---|
889 | c_links.tqe); |
---|
890 | |
---|
891 | cancelled = 1; |
---|
892 | c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); |
---|
893 | } |
---|
894 | |
---|
895 | #ifdef SMP |
---|
896 | /* |
---|
897 | * If the callout must migrate try to perform it immediately. |
---|
898 | * If the callout is currently running, just defer the migration |
---|
899 | * to a more appropriate moment. |
---|
900 | */ |
---|
901 | if (c->c_cpu != cpu) { |
---|
902 | if (cc->cc_curr == c) { |
---|
903 | cc->cc_migration_cpu = cpu; |
---|
904 | cc->cc_migration_ticks = to_ticks; |
---|
905 | cc->cc_migration_func = ftn; |
---|
906 | cc->cc_migration_arg = arg; |
---|
907 | c->c_flags |= CALLOUT_DFRMIGRATION; |
---|
908 | CTR5(KTR_CALLOUT, |
---|
909 | "migration of %p func %p arg %p in %d to %u deferred", |
---|
910 | c, c->c_func, c->c_arg, to_ticks, cpu); |
---|
911 | CC_UNLOCK(cc); |
---|
912 | return (cancelled); |
---|
913 | } |
---|
914 | cc = callout_cpu_switch(c, cc, cpu); |
---|
915 | } |
---|
916 | #endif |
---|
917 | |
---|
918 | callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); |
---|
919 | CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", |
---|
920 | cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); |
---|
921 | CC_UNLOCK(cc); |
---|
922 | |
---|
923 | return (cancelled); |
---|
924 | } |
---|
925 | |
---|
926 | /* |
---|
927 | * Common idioms that can be optimized in the future. |
---|
928 | */ |
---|
929 | int |
---|
930 | callout_schedule_on(struct callout *c, int to_ticks, int cpu) |
---|
931 | { |
---|
932 | return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); |
---|
933 | } |
---|
934 | |
---|
935 | int |
---|
936 | callout_schedule(struct callout *c, int to_ticks) |
---|
937 | { |
---|
938 | return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); |
---|
939 | } |
---|
940 | |
---|
941 | int |
---|
942 | _callout_stop_safe(c, safe) |
---|
943 | struct callout *c; |
---|
944 | int safe; |
---|
945 | { |
---|
946 | struct callout_cpu *cc, *old_cc; |
---|
947 | struct lock_class *class; |
---|
948 | #ifndef __rtems__ |
---|
949 | int use_lock, sq_locked; |
---|
950 | #else /* __rtems__ */ |
---|
951 | int use_lock; |
---|
952 | #endif /* __rtems__ */ |
---|
953 | |
---|
954 | /* |
---|
955 | * Some old subsystems don't hold Giant while running a callout_stop(), |
---|
956 | * so just discard this check for the moment. |
---|
957 | */ |
---|
958 | if (!safe && c->c_lock != NULL) { |
---|
959 | if (c->c_lock == &Giant.lock_object) |
---|
960 | use_lock = mtx_owned(&Giant); |
---|
961 | else { |
---|
962 | use_lock = 1; |
---|
963 | class = LOCK_CLASS(c->c_lock); |
---|
964 | class->lc_assert(c->c_lock, LA_XLOCKED); |
---|
965 | } |
---|
966 | } else |
---|
967 | use_lock = 0; |
---|
968 | |
---|
969 | #ifndef __rtems__ |
---|
970 | sq_locked = 0; |
---|
971 | old_cc = NULL; |
---|
972 | again: |
---|
973 | #endif /* __rtems__ */ |
---|
974 | cc = callout_lock(c); |
---|
975 | |
---|
976 | #ifndef __rtems__ |
---|
977 | /* |
---|
978 | * If the callout was migrating while the callout cpu lock was |
---|
979 | * dropped, just drop the sleepqueue lock and check the states |
---|
980 | * again. |
---|
981 | */ |
---|
982 | if (sq_locked != 0 && cc != old_cc) { |
---|
983 | #ifdef SMP |
---|
984 | CC_UNLOCK(cc); |
---|
985 | sleepq_release(&old_cc->cc_waiting); |
---|
986 | sq_locked = 0; |
---|
987 | old_cc = NULL; |
---|
988 | goto again; |
---|
989 | #else |
---|
990 | panic("migration should not happen"); |
---|
991 | #endif |
---|
992 | } |
---|
993 | #endif /* __rtems__ */ |
---|
994 | |
---|
995 | /* |
---|
996 | * If the callout isn't pending, it's not on the queue, so |
---|
997 | * don't attempt to remove it from the queue. We can try to |
---|
998 | * stop it by other means however. |
---|
999 | */ |
---|
1000 | if (!(c->c_flags & CALLOUT_PENDING)) { |
---|
1001 | c->c_flags &= ~CALLOUT_ACTIVE; |
---|
1002 | |
---|
1003 | /* |
---|
1004 | * If it wasn't on the queue and it isn't the current |
---|
1005 | * callout, then we can't stop it, so just bail. |
---|
1006 | */ |
---|
1007 | if (cc->cc_curr != c) { |
---|
1008 | CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", |
---|
1009 | c, c->c_func, c->c_arg); |
---|
1010 | CC_UNLOCK(cc); |
---|
1011 | #ifndef __rtems__ |
---|
1012 | if (sq_locked) |
---|
1013 | sleepq_release(&cc->cc_waiting); |
---|
1014 | #endif /* __rtems__ */ |
---|
1015 | return (0); |
---|
1016 | } |
---|
1017 | |
---|
1018 | if (safe) { |
---|
1019 | /* |
---|
1020 | * The current callout is running (or just |
---|
1021 | * about to run) and blocking is allowed, so |
---|
1022 | * just wait for the current invocation to |
---|
1023 | * finish. |
---|
1024 | */ |
---|
1025 | while (cc->cc_curr == c) { |
---|
1026 | #ifndef __rtems__ |
---|
1027 | |
---|
1028 | /* |
---|
1029 | * Use direct calls to sleepqueue interface |
---|
1030 | * instead of cv/msleep in order to avoid |
---|
1031 | * a LOR between cc_lock and sleepqueue |
---|
1032 | * chain spinlocks. This piece of code |
---|
1033 | * emulates a msleep_spin() call actually. |
---|
1034 | * |
---|
1035 | * If we already have the sleepqueue chain |
---|
1036 | * locked, then we can safely block. If we |
---|
1037 | * don't already have it locked, however, |
---|
1038 | * we have to drop the cc_lock to lock |
---|
1039 | * it. This opens several races, so we |
---|
1040 | * restart at the beginning once we have |
---|
1041 | * both locks. If nothing has changed, then |
---|
1042 | * we will end up back here with sq_locked |
---|
1043 | * set. |
---|
1044 | */ |
---|
1045 | if (!sq_locked) { |
---|
1046 | CC_UNLOCK(cc); |
---|
1047 | sleepq_lock(&cc->cc_waiting); |
---|
1048 | sq_locked = 1; |
---|
1049 | old_cc = cc; |
---|
1050 | goto again; |
---|
1051 | } |
---|
1052 | |
---|
1053 | /* |
---|
1054 | * Migration could be cancelled here, but |
---|
1055 | * as long as it is still not sure when it |
---|
1056 | * will be packed up, just let softclock() |
---|
1057 | * take care of it. |
---|
1058 | */ |
---|
1059 | cc->cc_waiting = 1; |
---|
1060 | DROP_GIANT(); |
---|
1061 | CC_UNLOCK(cc); |
---|
1062 | sleepq_add(&cc->cc_waiting, |
---|
1063 | &cc->cc_lock.lock_object, "codrain", |
---|
1064 | SLEEPQ_SLEEP, 0); |
---|
1065 | sleepq_wait(&cc->cc_waiting, 0); |
---|
1066 | sq_locked = 0; |
---|
1067 | old_cc = NULL; |
---|
1068 | |
---|
1069 | /* Reacquire locks previously released. */ |
---|
1070 | PICKUP_GIANT(); |
---|
1071 | CC_LOCK(cc); |
---|
1072 | #else /* __rtems__ */ |
---|
1073 | /* |
---|
1074 | * On RTEMS the LOR problem above does not |
---|
1075 | * exist since here we do not use |
---|
1076 | * sleepq_set_timeout() and instead use the |
---|
1077 | * RTEMS watchdog. |
---|
1078 | */ |
---|
1079 | cc->cc_waiting = 1; |
---|
1080 | msleep_spin(&cc->cc_waiting, &cc->cc_lock, |
---|
1081 | "codrain", 0); |
---|
1082 | #endif /* __rtems__ */ |
---|
1083 | } |
---|
1084 | } else if (use_lock && !cc->cc_cancel) { |
---|
1085 | /* |
---|
1086 | * The current callout is waiting for its |
---|
1087 | * lock which we hold. Cancel the callout |
---|
1088 | * and return. After our caller drops the |
---|
1089 | * lock, the callout will be skipped in |
---|
1090 | * softclock(). |
---|
1091 | */ |
---|
1092 | cc->cc_cancel = 1; |
---|
1093 | CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", |
---|
1094 | c, c->c_func, c->c_arg); |
---|
1095 | KASSERT(!cc_cme_migrating(cc), |
---|
1096 | ("callout wrongly scheduled for migration")); |
---|
1097 | CC_UNLOCK(cc); |
---|
1098 | KASSERT(!sq_locked, ("sleepqueue chain locked")); |
---|
1099 | return (1); |
---|
1100 | } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { |
---|
1101 | c->c_flags &= ~CALLOUT_DFRMIGRATION; |
---|
1102 | CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", |
---|
1103 | c, c->c_func, c->c_arg); |
---|
1104 | CC_UNLOCK(cc); |
---|
1105 | return (1); |
---|
1106 | } |
---|
1107 | CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", |
---|
1108 | c, c->c_func, c->c_arg); |
---|
1109 | CC_UNLOCK(cc); |
---|
1110 | KASSERT(!sq_locked, ("sleepqueue chain still locked")); |
---|
1111 | return (0); |
---|
1112 | } |
---|
1113 | #ifndef __rtems__ |
---|
1114 | if (sq_locked) |
---|
1115 | sleepq_release(&cc->cc_waiting); |
---|
1116 | #endif /* __rtems__ */ |
---|
1117 | |
---|
1118 | c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); |
---|
1119 | |
---|
1120 | CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", |
---|
1121 | c, c->c_func, c->c_arg); |
---|
1122 | if (cc->cc_next == c) |
---|
1123 | cc->cc_next = TAILQ_NEXT(c, c_links.tqe); |
---|
1124 | TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, |
---|
1125 | c_links.tqe); |
---|
1126 | callout_cc_del(c, cc); |
---|
1127 | |
---|
1128 | CC_UNLOCK(cc); |
---|
1129 | return (1); |
---|
1130 | } |
---|
1131 | |
---|
1132 | void |
---|
1133 | callout_init(c, mpsafe) |
---|
1134 | struct callout *c; |
---|
1135 | int mpsafe; |
---|
1136 | { |
---|
1137 | bzero(c, sizeof *c); |
---|
1138 | if (mpsafe) { |
---|
1139 | c->c_lock = NULL; |
---|
1140 | c->c_flags = CALLOUT_RETURNUNLOCKED; |
---|
1141 | } else { |
---|
1142 | c->c_lock = &Giant.lock_object; |
---|
1143 | c->c_flags = 0; |
---|
1144 | } |
---|
1145 | c->c_cpu = timeout_cpu; |
---|
1146 | } |
---|
1147 | |
---|
1148 | void |
---|
1149 | _callout_init_lock(c, lock, flags) |
---|
1150 | struct callout *c; |
---|
1151 | struct lock_object *lock; |
---|
1152 | int flags; |
---|
1153 | { |
---|
1154 | bzero(c, sizeof *c); |
---|
1155 | c->c_lock = lock; |
---|
1156 | KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, |
---|
1157 | ("callout_init_lock: bad flags %d", flags)); |
---|
1158 | KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, |
---|
1159 | ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); |
---|
1160 | KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & |
---|
1161 | (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", |
---|
1162 | __func__)); |
---|
1163 | c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); |
---|
1164 | c->c_cpu = timeout_cpu; |
---|
1165 | } |
---|
1166 | |
---|
1167 | #ifdef APM_FIXUP_CALLTODO |
---|
1168 | /* |
---|
1169 | * Adjust the kernel calltodo timeout list. This routine is used after |
---|
1170 | * an APM resume to recalculate the calltodo timer list values with the |
---|
1171 | * number of hz's we have been sleeping. The next hardclock() will detect |
---|
1172 | * that there are fired timers and run softclock() to execute them. |
---|
1173 | * |
---|
1174 | * Please note, I have not done an exhaustive analysis of what code this |
---|
1175 | * might break. I am motivated to have my select()'s and alarm()'s that |
---|
1176 | * have expired during suspend firing upon resume so that the applications |
---|
1177 | * which set the timer can do the maintanence the timer was for as close |
---|
1178 | * as possible to the originally intended time. Testing this code for a |
---|
1179 | * week showed that resuming from a suspend resulted in 22 to 25 timers |
---|
1180 | * firing, which seemed independant on whether the suspend was 2 hours or |
---|
1181 | * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> |
---|
1182 | */ |
---|
1183 | void |
---|
1184 | adjust_timeout_calltodo(time_change) |
---|
1185 | struct timeval *time_change; |
---|
1186 | { |
---|
1187 | register struct callout *p; |
---|
1188 | unsigned long delta_ticks; |
---|
1189 | |
---|
1190 | /* |
---|
1191 | * How many ticks were we asleep? |
---|
1192 | * (stolen from tvtohz()). |
---|
1193 | */ |
---|
1194 | |
---|
1195 | /* Don't do anything */ |
---|
1196 | if (time_change->tv_sec < 0) |
---|
1197 | return; |
---|
1198 | else if (time_change->tv_sec <= LONG_MAX / 1000000) |
---|
1199 | delta_ticks = (time_change->tv_sec * 1000000 + |
---|
1200 | time_change->tv_usec + (tick - 1)) / tick + 1; |
---|
1201 | else if (time_change->tv_sec <= LONG_MAX / hz) |
---|
1202 | delta_ticks = time_change->tv_sec * hz + |
---|
1203 | (time_change->tv_usec + (tick - 1)) / tick + 1; |
---|
1204 | else |
---|
1205 | delta_ticks = LONG_MAX; |
---|
1206 | |
---|
1207 | if (delta_ticks > INT_MAX) |
---|
1208 | delta_ticks = INT_MAX; |
---|
1209 | |
---|
1210 | /* |
---|
1211 | * Now rip through the timer calltodo list looking for timers |
---|
1212 | * to expire. |
---|
1213 | */ |
---|
1214 | |
---|
1215 | /* don't collide with softclock() */ |
---|
1216 | CC_LOCK(cc); |
---|
1217 | for (p = calltodo.c_next; p != NULL; p = p->c_next) { |
---|
1218 | p->c_time -= delta_ticks; |
---|
1219 | |
---|
1220 | /* Break if the timer had more time on it than delta_ticks */ |
---|
1221 | if (p->c_time > 0) |
---|
1222 | break; |
---|
1223 | |
---|
1224 | /* take back the ticks the timer didn't use (p->c_time <= 0) */ |
---|
1225 | delta_ticks = -p->c_time; |
---|
1226 | } |
---|
1227 | CC_UNLOCK(cc); |
---|
1228 | |
---|
1229 | return; |
---|
1230 | } |
---|
1231 | #endif /* APM_FIXUP_CALLTODO */ |
---|