1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> |
---|
5 | * Copyright (c) 2015 embedded brains GmbH <rtems@embedded-brains.de> |
---|
6 | * All rights reserved. |
---|
7 | * |
---|
8 | * Redistribution and use in source and binary forms, with or without |
---|
9 | * modification, are permitted provided that the following conditions |
---|
10 | * are met: |
---|
11 | * 1. Redistributions of source code must retain the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer. |
---|
13 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer in the |
---|
15 | * documentation and/or other materials provided with the distribution. |
---|
16 | * |
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
27 | * SUCH DAMAGE. |
---|
28 | */ |
---|
29 | |
---|
30 | /* |
---|
31 | * Implementation of sleep queues used to hold queue of threads blocked on |
---|
32 | * a wait channel. Sleep queues different from turnstiles in that wait |
---|
33 | * channels are not owned by anyone, so there is no priority propagation. |
---|
34 | * Sleep queues can also provide a timeout and can also be interrupted by |
---|
35 | * signals. That said, there are several similarities between the turnstile |
---|
36 | * and sleep queue implementations. (Note: turnstiles were implemented |
---|
37 | * first.) For example, both use a hash table of the same size where each |
---|
38 | * bucket is referred to as a "chain" that contains both a spin lock and |
---|
39 | * a linked list of queues. An individual queue is located by using a hash |
---|
40 | * to pick a chain, locking the chain, and then walking the chain searching |
---|
41 | * for the queue. This means that a wait channel object does not need to |
---|
42 | * embed it's queue head just as locks do not embed their turnstile queue |
---|
43 | * head. Threads also carry around a sleep queue that they lend to the |
---|
44 | * wait channel when blocking. Just as in turnstiles, the queue includes |
---|
45 | * a free list of the sleep queues of other threads blocked on the same |
---|
46 | * wait channel in the case of multiple waiters. |
---|
47 | * |
---|
48 | * Some additional functionality provided by sleep queues include the |
---|
49 | * ability to set a timeout. The timeout is managed using a per-thread |
---|
50 | * callout that resumes a thread if it is asleep. A thread may also |
---|
51 | * catch signals while it is asleep (aka an interruptible sleep). The |
---|
52 | * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, |
---|
53 | * sleep queues also provide some extra assertions. One is not allowed to |
---|
54 | * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one |
---|
55 | * must consistently use the same lock to synchronize with a wait channel, |
---|
56 | * though this check is currently only a warning for sleep/wakeup due to |
---|
57 | * pre-existing abuse of that API. The same lock must also be held when |
---|
58 | * awakening threads, though that is currently only enforced for condition |
---|
59 | * variables. |
---|
60 | */ |
---|
61 | |
---|
62 | #include <sys/cdefs.h> |
---|
63 | __FBSDID("$FreeBSD$"); |
---|
64 | |
---|
65 | #include <rtems/bsd/local/opt_sleepqueue_profiling.h> |
---|
66 | #include <rtems/bsd/local/opt_ddb.h> |
---|
67 | #include <rtems/bsd/local/opt_sched.h> |
---|
68 | #include <rtems/bsd/local/opt_stack.h> |
---|
69 | |
---|
70 | #include <rtems/bsd/sys/param.h> |
---|
71 | #include <sys/systm.h> |
---|
72 | #include <rtems/bsd/sys/lock.h> |
---|
73 | #include <sys/kernel.h> |
---|
74 | #include <sys/ktr.h> |
---|
75 | #include <sys/mutex.h> |
---|
76 | #include <sys/proc.h> |
---|
77 | #include <sys/sbuf.h> |
---|
78 | #include <sys/sched.h> |
---|
79 | #include <sys/sdt.h> |
---|
80 | #include <sys/signalvar.h> |
---|
81 | #include <sys/sleepqueue.h> |
---|
82 | #include <sys/stack.h> |
---|
83 | #include <sys/sysctl.h> |
---|
84 | |
---|
85 | #include <vm/uma.h> |
---|
86 | |
---|
87 | #ifdef DDB |
---|
88 | #include <ddb/ddb.h> |
---|
89 | #endif |
---|
90 | #ifdef __rtems__ |
---|
91 | #include <machine/rtems-bsd-thread.h> |
---|
92 | #undef ticks |
---|
93 | #include <rtems/score/threadimpl.h> |
---|
94 | #include <rtems/score/watchdogimpl.h> |
---|
95 | #endif /* __rtems__ */ |
---|
96 | |
---|
97 | |
---|
98 | /* |
---|
99 | * Constants for the hash table of sleep queue chains. |
---|
100 | * SC_TABLESIZE must be a power of two for SC_MASK to work properly. |
---|
101 | */ |
---|
102 | #define SC_TABLESIZE 256 /* Must be power of 2. */ |
---|
103 | #define SC_MASK (SC_TABLESIZE - 1) |
---|
104 | #define SC_SHIFT 8 |
---|
105 | #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ |
---|
106 | SC_MASK) |
---|
107 | #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] |
---|
108 | #define NR_SLEEPQS 2 |
---|
109 | /* |
---|
110 | * There two different lists of sleep queues. Both lists are connected |
---|
111 | * via the sq_hash entries. The first list is the sleep queue chain list |
---|
112 | * that a sleep queue is on when it is attached to a wait channel. The |
---|
113 | * second list is the free list hung off of a sleep queue that is attached |
---|
114 | * to a wait channel. |
---|
115 | * |
---|
116 | * Each sleep queue also contains the wait channel it is attached to, the |
---|
117 | * list of threads blocked on that wait channel, flags specific to the |
---|
118 | * wait channel, and the lock used to synchronize with a wait channel. |
---|
119 | * The flags are used to catch mismatches between the various consumers |
---|
120 | * of the sleep queue API (e.g. sleep/wakeup and condition variables). |
---|
121 | * The lock pointer is only used when invariants are enabled for various |
---|
122 | * debugging checks. |
---|
123 | * |
---|
124 | * Locking key: |
---|
125 | * c - sleep queue chain lock |
---|
126 | */ |
---|
127 | struct sleepqueue { |
---|
128 | TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ |
---|
129 | u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ |
---|
130 | LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ |
---|
131 | LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ |
---|
132 | void *sq_wchan; /* (c) Wait channel. */ |
---|
133 | int sq_type; /* (c) Queue type. */ |
---|
134 | #ifdef INVARIANTS |
---|
135 | struct lock_object *sq_lock; /* (c) Associated lock. */ |
---|
136 | #endif |
---|
137 | }; |
---|
138 | |
---|
139 | struct sleepqueue_chain { |
---|
140 | LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ |
---|
141 | struct mtx sc_lock; /* Spin lock for this chain. */ |
---|
142 | #ifdef SLEEPQUEUE_PROFILING |
---|
143 | u_int sc_depth; /* Length of sc_queues. */ |
---|
144 | u_int sc_max_depth; /* Max length of sc_queues. */ |
---|
145 | #endif |
---|
146 | }; |
---|
147 | |
---|
148 | #ifdef SLEEPQUEUE_PROFILING |
---|
149 | u_int sleepq_max_depth; |
---|
150 | static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); |
---|
151 | static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, |
---|
152 | "sleepq chain stats"); |
---|
153 | SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, |
---|
154 | 0, "maxmimum depth achieved of a single chain"); |
---|
155 | |
---|
156 | static void sleepq_profile(const char *wmesg); |
---|
157 | static int prof_enabled; |
---|
158 | #endif |
---|
159 | static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; |
---|
160 | static uma_zone_t sleepq_zone; |
---|
161 | |
---|
162 | /* |
---|
163 | * Prototypes for non-exported routines. |
---|
164 | */ |
---|
165 | #ifndef __rtems__ |
---|
166 | static int sleepq_catch_signals(void *wchan, int pri); |
---|
167 | static int sleepq_check_signals(void); |
---|
168 | static int sleepq_check_timeout(void); |
---|
169 | #endif /* __rtems__ */ |
---|
170 | #ifdef INVARIANTS |
---|
171 | static void sleepq_dtor(void *mem, int size, void *arg); |
---|
172 | #endif |
---|
173 | static int sleepq_init(void *mem, int size, int flags); |
---|
174 | static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, |
---|
175 | int pri); |
---|
176 | static void sleepq_switch(void *wchan, int pri); |
---|
177 | #ifndef __rtems__ |
---|
178 | static void sleepq_timeout(void *arg); |
---|
179 | #else /* __rtems__ */ |
---|
180 | static void sleepq_timeout(Watchdog_Control *watchdog); |
---|
181 | #endif /* __rtems__ */ |
---|
182 | |
---|
183 | SDT_PROBE_DECLARE(sched, , , sleep); |
---|
184 | SDT_PROBE_DECLARE(sched, , , wakeup); |
---|
185 | |
---|
186 | /* |
---|
187 | * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. |
---|
188 | * Note that it must happen after sleepinit() has been fully executed, so |
---|
189 | * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. |
---|
190 | */ |
---|
191 | #ifdef SLEEPQUEUE_PROFILING |
---|
192 | static void |
---|
193 | init_sleepqueue_profiling(void) |
---|
194 | { |
---|
195 | char chain_name[10]; |
---|
196 | struct sysctl_oid *chain_oid; |
---|
197 | u_int i; |
---|
198 | |
---|
199 | for (i = 0; i < SC_TABLESIZE; i++) { |
---|
200 | snprintf(chain_name, sizeof(chain_name), "%u", i); |
---|
201 | chain_oid = SYSCTL_ADD_NODE(NULL, |
---|
202 | SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, |
---|
203 | chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); |
---|
204 | SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, |
---|
205 | "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); |
---|
206 | SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, |
---|
207 | "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, |
---|
208 | NULL); |
---|
209 | } |
---|
210 | } |
---|
211 | |
---|
212 | SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, |
---|
213 | init_sleepqueue_profiling, NULL); |
---|
214 | #endif |
---|
215 | |
---|
216 | /* |
---|
217 | * Early initialization of sleep queues that is called from the sleepinit() |
---|
218 | * SYSINIT. |
---|
219 | */ |
---|
220 | void |
---|
221 | init_sleepqueues(void) |
---|
222 | { |
---|
223 | int i; |
---|
224 | |
---|
225 | for (i = 0; i < SC_TABLESIZE; i++) { |
---|
226 | LIST_INIT(&sleepq_chains[i].sc_queues); |
---|
227 | mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, |
---|
228 | MTX_SPIN | MTX_RECURSE); |
---|
229 | } |
---|
230 | sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), |
---|
231 | #ifdef INVARIANTS |
---|
232 | NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); |
---|
233 | #else |
---|
234 | NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); |
---|
235 | #endif |
---|
236 | |
---|
237 | #ifndef __rtems__ |
---|
238 | thread0.td_sleepqueue = sleepq_alloc(); |
---|
239 | #endif /* __rtems__ */ |
---|
240 | } |
---|
241 | |
---|
242 | /* |
---|
243 | * Get a sleep queue for a new thread. |
---|
244 | */ |
---|
245 | struct sleepqueue * |
---|
246 | sleepq_alloc(void) |
---|
247 | { |
---|
248 | |
---|
249 | return (uma_zalloc(sleepq_zone, M_WAITOK)); |
---|
250 | } |
---|
251 | |
---|
252 | /* |
---|
253 | * Free a sleep queue when a thread is destroyed. |
---|
254 | */ |
---|
255 | void |
---|
256 | sleepq_free(struct sleepqueue *sq) |
---|
257 | { |
---|
258 | |
---|
259 | uma_zfree(sleepq_zone, sq); |
---|
260 | } |
---|
261 | |
---|
262 | /* |
---|
263 | * Lock the sleep queue chain associated with the specified wait channel. |
---|
264 | */ |
---|
265 | void |
---|
266 | sleepq_lock(void *wchan) |
---|
267 | { |
---|
268 | struct sleepqueue_chain *sc; |
---|
269 | |
---|
270 | sc = SC_LOOKUP(wchan); |
---|
271 | mtx_lock_spin(&sc->sc_lock); |
---|
272 | } |
---|
273 | |
---|
274 | /* |
---|
275 | * Look up the sleep queue associated with a given wait channel in the hash |
---|
276 | * table locking the associated sleep queue chain. If no queue is found in |
---|
277 | * the table, NULL is returned. |
---|
278 | */ |
---|
279 | struct sleepqueue * |
---|
280 | sleepq_lookup(void *wchan) |
---|
281 | { |
---|
282 | struct sleepqueue_chain *sc; |
---|
283 | struct sleepqueue *sq; |
---|
284 | |
---|
285 | KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); |
---|
286 | sc = SC_LOOKUP(wchan); |
---|
287 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
288 | LIST_FOREACH(sq, &sc->sc_queues, sq_hash) |
---|
289 | if (sq->sq_wchan == wchan) |
---|
290 | return (sq); |
---|
291 | return (NULL); |
---|
292 | } |
---|
293 | |
---|
294 | /* |
---|
295 | * Unlock the sleep queue chain associated with a given wait channel. |
---|
296 | */ |
---|
297 | void |
---|
298 | sleepq_release(void *wchan) |
---|
299 | { |
---|
300 | struct sleepqueue_chain *sc; |
---|
301 | |
---|
302 | sc = SC_LOOKUP(wchan); |
---|
303 | mtx_unlock_spin(&sc->sc_lock); |
---|
304 | } |
---|
305 | |
---|
306 | /* |
---|
307 | * Places the current thread on the sleep queue for the specified wait |
---|
308 | * channel. If INVARIANTS is enabled, then it associates the passed in |
---|
309 | * lock with the sleepq to make sure it is held when that sleep queue is |
---|
310 | * woken up. |
---|
311 | */ |
---|
312 | void |
---|
313 | sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, |
---|
314 | int queue) |
---|
315 | { |
---|
316 | struct sleepqueue_chain *sc; |
---|
317 | struct sleepqueue *sq; |
---|
318 | struct thread *td; |
---|
319 | #ifdef __rtems__ |
---|
320 | ISR_lock_Context lock_context; |
---|
321 | Thread_Control *executing; |
---|
322 | struct thread *succ; |
---|
323 | #endif /* __rtems__ */ |
---|
324 | |
---|
325 | td = curthread; |
---|
326 | sc = SC_LOOKUP(wchan); |
---|
327 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
328 | MPASS(td->td_sleepqueue != NULL); |
---|
329 | MPASS(wchan != NULL); |
---|
330 | MPASS((queue >= 0) && (queue < NR_SLEEPQS)); |
---|
331 | |
---|
332 | /* If this thread is not allowed to sleep, die a horrible death. */ |
---|
333 | #ifndef __rtems__ |
---|
334 | KASSERT(td->td_no_sleeping == 0, |
---|
335 | ("%s: td %p to sleep on wchan %p with sleeping prohibited", |
---|
336 | __func__, td, wchan)); |
---|
337 | #endif /* __rtems__ */ |
---|
338 | |
---|
339 | /* Look up the sleep queue associated with the wait channel 'wchan'. */ |
---|
340 | sq = sleepq_lookup(wchan); |
---|
341 | |
---|
342 | /* |
---|
343 | * If the wait channel does not already have a sleep queue, use |
---|
344 | * this thread's sleep queue. Otherwise, insert the current thread |
---|
345 | * into the sleep queue already in use by this wait channel. |
---|
346 | */ |
---|
347 | if (sq == NULL) { |
---|
348 | #ifdef INVARIANTS |
---|
349 | int i; |
---|
350 | |
---|
351 | sq = td->td_sleepqueue; |
---|
352 | for (i = 0; i < NR_SLEEPQS; i++) { |
---|
353 | KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), |
---|
354 | ("thread's sleep queue %d is not empty", i)); |
---|
355 | KASSERT(sq->sq_blockedcnt[i] == 0, |
---|
356 | ("thread's sleep queue %d count mismatches", i)); |
---|
357 | } |
---|
358 | KASSERT(LIST_EMPTY(&sq->sq_free), |
---|
359 | ("thread's sleep queue has a non-empty free list")); |
---|
360 | KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); |
---|
361 | sq->sq_lock = lock; |
---|
362 | #endif |
---|
363 | #ifdef SLEEPQUEUE_PROFILING |
---|
364 | sc->sc_depth++; |
---|
365 | if (sc->sc_depth > sc->sc_max_depth) { |
---|
366 | sc->sc_max_depth = sc->sc_depth; |
---|
367 | if (sc->sc_max_depth > sleepq_max_depth) |
---|
368 | sleepq_max_depth = sc->sc_max_depth; |
---|
369 | } |
---|
370 | #endif |
---|
371 | sq = td->td_sleepqueue; |
---|
372 | LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); |
---|
373 | sq->sq_wchan = wchan; |
---|
374 | sq->sq_type = flags & SLEEPQ_TYPE; |
---|
375 | } else { |
---|
376 | MPASS(wchan == sq->sq_wchan); |
---|
377 | MPASS(lock == sq->sq_lock); |
---|
378 | MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); |
---|
379 | LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); |
---|
380 | } |
---|
381 | thread_lock(td); |
---|
382 | #ifndef __rtems__ |
---|
383 | TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); |
---|
384 | #else /* __rtems__ */ |
---|
385 | /* FIXME: This is broken with clustered scheduling */ |
---|
386 | succ = NULL; |
---|
387 | TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) { |
---|
388 | if (_Thread_Get_priority(td->td_thread) < |
---|
389 | _Thread_Get_priority(succ->td_thread)) |
---|
390 | break; |
---|
391 | } |
---|
392 | if (succ == NULL) |
---|
393 | TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); |
---|
394 | else |
---|
395 | TAILQ_INSERT_BEFORE(succ, td, td_slpq); |
---|
396 | #endif /* __rtems__ */ |
---|
397 | sq->sq_blockedcnt[queue]++; |
---|
398 | #ifdef __rtems__ |
---|
399 | executing = td->td_thread; |
---|
400 | _Thread_Wait_acquire_default(executing, &lock_context); |
---|
401 | td->td_sq_state = TD_SQ_TIRED; |
---|
402 | #endif /* __rtems__ */ |
---|
403 | td->td_sleepqueue = NULL; |
---|
404 | td->td_sqqueue = queue; |
---|
405 | td->td_wchan = wchan; |
---|
406 | td->td_wmesg = wmesg; |
---|
407 | #ifndef __rtems__ |
---|
408 | if (flags & SLEEPQ_INTERRUPTIBLE) { |
---|
409 | td->td_flags |= TDF_SINTR; |
---|
410 | td->td_flags &= ~TDF_SLEEPABORT; |
---|
411 | } |
---|
412 | thread_unlock(td); |
---|
413 | #else /* __rtems__ */ |
---|
414 | _Thread_Wait_release_default(executing, &lock_context); |
---|
415 | #endif /* __rtems__ */ |
---|
416 | } |
---|
417 | |
---|
418 | /* |
---|
419 | * Sets a timeout that will remove the current thread from the specified |
---|
420 | * sleep queue after timo ticks if the thread has not already been awakened. |
---|
421 | */ |
---|
422 | void |
---|
423 | sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, |
---|
424 | int flags) |
---|
425 | { |
---|
426 | #ifndef __rtems__ |
---|
427 | struct sleepqueue_chain *sc; |
---|
428 | struct thread *td; |
---|
429 | sbintime_t pr1; |
---|
430 | |
---|
431 | td = curthread; |
---|
432 | sc = SC_LOOKUP(wchan); |
---|
433 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
434 | MPASS(TD_ON_SLEEPQ(td)); |
---|
435 | MPASS(td->td_sleepqueue == NULL); |
---|
436 | MPASS(wchan != NULL); |
---|
437 | if (cold && td == &thread0) |
---|
438 | panic("timed sleep before timers are working"); |
---|
439 | KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", |
---|
440 | td->td_tid, td, (uintmax_t)td->td_sleeptimo)); |
---|
441 | thread_lock(td); |
---|
442 | callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); |
---|
443 | thread_unlock(td); |
---|
444 | callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, |
---|
445 | sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | |
---|
446 | C_DIRECT_EXEC); |
---|
447 | #else /* __rtems__ */ |
---|
448 | Per_CPU_Control *cpu_self; |
---|
449 | Thread_Control *executing; |
---|
450 | |
---|
451 | cpu_self = _Thread_Dispatch_disable(); |
---|
452 | executing = _Per_CPU_Get_executing(cpu_self); |
---|
453 | BSD_ASSERT(_Watchdog_Get_state(&executing->Timer.Watchdog) == |
---|
454 | WATCHDOG_INACTIVE); |
---|
455 | |
---|
456 | if ((flags & C_ABSOLUTE) == 0) { |
---|
457 | _Thread_Timer_insert_relative(executing, cpu_self, sleepq_timeout, |
---|
458 | (Watchdog_Interval)((sbt + tick_sbt - 1) / tick_sbt)); |
---|
459 | } else { |
---|
460 | _Thread_Timer_insert_absolute(executing, cpu_self, sleepq_timeout, |
---|
461 | _Watchdog_Ticks_from_sbintime(sbt)); |
---|
462 | } |
---|
463 | |
---|
464 | _Thread_Dispatch_direct(cpu_self); |
---|
465 | #endif /* __rtems__ */ |
---|
466 | } |
---|
467 | |
---|
468 | /* |
---|
469 | * Return the number of actual sleepers for the specified queue. |
---|
470 | */ |
---|
471 | u_int |
---|
472 | sleepq_sleepcnt(void *wchan, int queue) |
---|
473 | { |
---|
474 | struct sleepqueue *sq; |
---|
475 | |
---|
476 | KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); |
---|
477 | MPASS((queue >= 0) && (queue < NR_SLEEPQS)); |
---|
478 | sq = sleepq_lookup(wchan); |
---|
479 | if (sq == NULL) |
---|
480 | return (0); |
---|
481 | return (sq->sq_blockedcnt[queue]); |
---|
482 | } |
---|
483 | |
---|
484 | #ifndef __rtems__ |
---|
485 | /* |
---|
486 | * Marks the pending sleep of the current thread as interruptible and |
---|
487 | * makes an initial check for pending signals before putting a thread |
---|
488 | * to sleep. Enters and exits with the thread lock held. Thread lock |
---|
489 | * may have transitioned from the sleepq lock to a run lock. |
---|
490 | */ |
---|
491 | static int |
---|
492 | sleepq_catch_signals(void *wchan, int pri) |
---|
493 | { |
---|
494 | struct sleepqueue_chain *sc; |
---|
495 | struct sleepqueue *sq; |
---|
496 | struct thread *td; |
---|
497 | struct proc *p; |
---|
498 | struct sigacts *ps; |
---|
499 | int sig, ret; |
---|
500 | |
---|
501 | td = curthread; |
---|
502 | p = curproc; |
---|
503 | sc = SC_LOOKUP(wchan); |
---|
504 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
505 | MPASS(wchan != NULL); |
---|
506 | if ((td->td_pflags & TDP_WAKEUP) != 0) { |
---|
507 | td->td_pflags &= ~TDP_WAKEUP; |
---|
508 | ret = EINTR; |
---|
509 | thread_lock(td); |
---|
510 | goto out; |
---|
511 | } |
---|
512 | |
---|
513 | /* |
---|
514 | * See if there are any pending signals for this thread. If not |
---|
515 | * we can switch immediately. Otherwise do the signal processing |
---|
516 | * directly. |
---|
517 | */ |
---|
518 | thread_lock(td); |
---|
519 | if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) { |
---|
520 | sleepq_switch(wchan, pri); |
---|
521 | return (0); |
---|
522 | } |
---|
523 | thread_unlock(td); |
---|
524 | mtx_unlock_spin(&sc->sc_lock); |
---|
525 | CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", |
---|
526 | (void *)td, (long)p->p_pid, td->td_name); |
---|
527 | PROC_LOCK(p); |
---|
528 | ps = p->p_sigacts; |
---|
529 | mtx_lock(&ps->ps_mtx); |
---|
530 | sig = cursig(td); |
---|
531 | if (sig == -1) { |
---|
532 | mtx_unlock(&ps->ps_mtx); |
---|
533 | KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY")); |
---|
534 | KASSERT(TD_SBDRY_INTR(td), |
---|
535 | ("lost TDF_SERESTART of TDF_SEINTR")); |
---|
536 | KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != |
---|
537 | (TDF_SEINTR | TDF_SERESTART), |
---|
538 | ("both TDF_SEINTR and TDF_SERESTART")); |
---|
539 | ret = TD_SBDRY_ERRNO(td); |
---|
540 | } else if (sig == 0) { |
---|
541 | mtx_unlock(&ps->ps_mtx); |
---|
542 | ret = thread_suspend_check(1); |
---|
543 | MPASS(ret == 0 || ret == EINTR || ret == ERESTART); |
---|
544 | } else { |
---|
545 | if (SIGISMEMBER(ps->ps_sigintr, sig)) |
---|
546 | ret = EINTR; |
---|
547 | else |
---|
548 | ret = ERESTART; |
---|
549 | mtx_unlock(&ps->ps_mtx); |
---|
550 | } |
---|
551 | /* |
---|
552 | * Lock the per-process spinlock prior to dropping the PROC_LOCK |
---|
553 | * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and |
---|
554 | * thread_lock() are currently held in tdsendsignal(). |
---|
555 | */ |
---|
556 | PROC_SLOCK(p); |
---|
557 | mtx_lock_spin(&sc->sc_lock); |
---|
558 | PROC_UNLOCK(p); |
---|
559 | thread_lock(td); |
---|
560 | PROC_SUNLOCK(p); |
---|
561 | if (ret == 0) { |
---|
562 | sleepq_switch(wchan, pri); |
---|
563 | return (0); |
---|
564 | } |
---|
565 | out: |
---|
566 | /* |
---|
567 | * There were pending signals and this thread is still |
---|
568 | * on the sleep queue, remove it from the sleep queue. |
---|
569 | */ |
---|
570 | if (TD_ON_SLEEPQ(td)) { |
---|
571 | sq = sleepq_lookup(wchan); |
---|
572 | if (sleepq_resume_thread(sq, td, 0)) { |
---|
573 | #ifdef INVARIANTS |
---|
574 | /* |
---|
575 | * This thread hasn't gone to sleep yet, so it |
---|
576 | * should not be swapped out. |
---|
577 | */ |
---|
578 | panic("not waking up swapper"); |
---|
579 | #endif |
---|
580 | } |
---|
581 | } |
---|
582 | mtx_unlock_spin(&sc->sc_lock); |
---|
583 | MPASS(td->td_lock != &sc->sc_lock); |
---|
584 | return (ret); |
---|
585 | } |
---|
586 | #endif /* __rtems__ */ |
---|
587 | |
---|
588 | /* |
---|
589 | * Switches to another thread if we are still asleep on a sleep queue. |
---|
590 | * Returns with thread lock. |
---|
591 | */ |
---|
592 | static void |
---|
593 | sleepq_switch(void *wchan, int pri) |
---|
594 | { |
---|
595 | #ifndef __rtems__ |
---|
596 | struct sleepqueue_chain *sc; |
---|
597 | struct sleepqueue *sq; |
---|
598 | struct thread *td; |
---|
599 | |
---|
600 | td = curthread; |
---|
601 | sc = SC_LOOKUP(wchan); |
---|
602 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
603 | THREAD_LOCK_ASSERT(td, MA_OWNED); |
---|
604 | |
---|
605 | /* |
---|
606 | * If we have a sleep queue, then we've already been woken up, so |
---|
607 | * just return. |
---|
608 | */ |
---|
609 | if (td->td_sleepqueue != NULL) { |
---|
610 | mtx_unlock_spin(&sc->sc_lock); |
---|
611 | return; |
---|
612 | } |
---|
613 | |
---|
614 | /* |
---|
615 | * If TDF_TIMEOUT is set, then our sleep has been timed out |
---|
616 | * already but we are still on the sleep queue, so dequeue the |
---|
617 | * thread and return. |
---|
618 | */ |
---|
619 | if (td->td_flags & TDF_TIMEOUT) { |
---|
620 | MPASS(TD_ON_SLEEPQ(td)); |
---|
621 | sq = sleepq_lookup(wchan); |
---|
622 | if (sleepq_resume_thread(sq, td, 0)) { |
---|
623 | #ifdef INVARIANTS |
---|
624 | /* |
---|
625 | * This thread hasn't gone to sleep yet, so it |
---|
626 | * should not be swapped out. |
---|
627 | */ |
---|
628 | panic("not waking up swapper"); |
---|
629 | #endif |
---|
630 | } |
---|
631 | mtx_unlock_spin(&sc->sc_lock); |
---|
632 | return; |
---|
633 | } |
---|
634 | #ifdef SLEEPQUEUE_PROFILING |
---|
635 | if (prof_enabled) |
---|
636 | sleepq_profile(td->td_wmesg); |
---|
637 | #endif |
---|
638 | MPASS(td->td_sleepqueue == NULL); |
---|
639 | sched_sleep(td, pri); |
---|
640 | thread_lock_set(td, &sc->sc_lock); |
---|
641 | SDT_PROBE0(sched, , , sleep); |
---|
642 | TD_SET_SLEEPING(td); |
---|
643 | mi_switch(SW_VOL | SWT_SLEEPQ, NULL); |
---|
644 | KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); |
---|
645 | CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", |
---|
646 | (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); |
---|
647 | #else /* __rtems__ */ |
---|
648 | Thread_Control *executing; |
---|
649 | ISR_lock_Context lock_context; |
---|
650 | struct thread *td; |
---|
651 | bool block; |
---|
652 | bool remove; |
---|
653 | |
---|
654 | sleepq_release(wchan); |
---|
655 | |
---|
656 | executing = _Thread_Wait_acquire_default_for_executing(&lock_context); |
---|
657 | td = rtems_bsd_get_thread(executing); |
---|
658 | BSD_ASSERT(td != NULL); |
---|
659 | |
---|
660 | block = false; |
---|
661 | remove = false; |
---|
662 | switch (td->td_sq_state) { |
---|
663 | case TD_SQ_TIRED: |
---|
664 | BSD_ASSERT(td->td_wchan == wchan); |
---|
665 | td->td_sq_state = TD_SQ_SLEEPY; |
---|
666 | block = true; |
---|
667 | break; |
---|
668 | case TD_SQ_NIGHTMARE: |
---|
669 | BSD_ASSERT(td->td_wchan == wchan); |
---|
670 | td->td_sq_state = TD_SQ_PANIC; |
---|
671 | remove = true; |
---|
672 | break; |
---|
673 | default: |
---|
674 | BSD_ASSERT(td->td_wchan == NULL); |
---|
675 | BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP); |
---|
676 | break; |
---|
677 | } |
---|
678 | |
---|
679 | if (block) { |
---|
680 | Per_CPU_Control *cpu_self; |
---|
681 | bool unblock; |
---|
682 | |
---|
683 | cpu_self = _Thread_Dispatch_disable_critical(&lock_context); |
---|
684 | _Thread_Wait_release_default(executing, &lock_context); |
---|
685 | |
---|
686 | _Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP); |
---|
687 | |
---|
688 | _Thread_Wait_acquire_default(executing, &lock_context); |
---|
689 | |
---|
690 | unblock = false; |
---|
691 | switch (td->td_sq_state) { |
---|
692 | case TD_SQ_NIGHTMARE: |
---|
693 | BSD_ASSERT(td->td_wchan == wchan); |
---|
694 | td->td_sq_state = TD_SQ_PANIC; |
---|
695 | unblock = true; |
---|
696 | remove = true; |
---|
697 | break; |
---|
698 | case TD_SQ_WAKEUP: |
---|
699 | BSD_ASSERT(td->td_wchan == NULL); |
---|
700 | unblock = true; |
---|
701 | break; |
---|
702 | default: |
---|
703 | BSD_ASSERT(td->td_wchan == wchan); |
---|
704 | BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY); |
---|
705 | td->td_sq_state = TD_SQ_SLEEPING; |
---|
706 | break; |
---|
707 | } |
---|
708 | |
---|
709 | _Thread_Wait_release_default(executing, &lock_context); |
---|
710 | |
---|
711 | if (unblock) { |
---|
712 | _Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP); |
---|
713 | } |
---|
714 | |
---|
715 | _Thread_Dispatch_direct(cpu_self); |
---|
716 | _Thread_Wait_acquire_default(executing, &lock_context); |
---|
717 | |
---|
718 | switch (td->td_sq_state) { |
---|
719 | case TD_SQ_NIGHTMARE: |
---|
720 | BSD_ASSERT(td->td_wchan == wchan); |
---|
721 | td->td_sq_state = TD_SQ_PANIC; |
---|
722 | remove = true; |
---|
723 | break; |
---|
724 | default: |
---|
725 | BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP || |
---|
726 | td->td_sq_state == TD_SQ_PANIC); |
---|
727 | break; |
---|
728 | } |
---|
729 | } |
---|
730 | |
---|
731 | _Thread_Wait_release_default(executing, &lock_context); |
---|
732 | _Thread_Timer_remove(executing); |
---|
733 | |
---|
734 | if (remove) { |
---|
735 | sleepq_remove(td, wchan); |
---|
736 | } |
---|
737 | #endif /* __rtems__ */ |
---|
738 | } |
---|
739 | |
---|
740 | /* |
---|
741 | * Check to see if we timed out. |
---|
742 | */ |
---|
743 | static int |
---|
744 | sleepq_check_timeout(void) |
---|
745 | { |
---|
746 | struct thread *td; |
---|
747 | int res; |
---|
748 | |
---|
749 | td = curthread; |
---|
750 | #ifndef __rtems__ |
---|
751 | THREAD_LOCK_ASSERT(td, MA_OWNED); |
---|
752 | |
---|
753 | /* |
---|
754 | * If TDF_TIMEOUT is set, we timed out. But recheck |
---|
755 | * td_sleeptimo anyway. |
---|
756 | */ |
---|
757 | res = 0; |
---|
758 | if (td->td_sleeptimo != 0) { |
---|
759 | if (td->td_sleeptimo <= sbinuptime()) |
---|
760 | res = EWOULDBLOCK; |
---|
761 | td->td_sleeptimo = 0; |
---|
762 | } |
---|
763 | if (td->td_flags & TDF_TIMEOUT) |
---|
764 | td->td_flags &= ~TDF_TIMEOUT; |
---|
765 | else |
---|
766 | /* |
---|
767 | * We ignore the situation where timeout subsystem was |
---|
768 | * unable to stop our callout. The struct thread is |
---|
769 | * type-stable, the callout will use the correct |
---|
770 | * memory when running. The checks of the |
---|
771 | * td_sleeptimo value in this function and in |
---|
772 | * sleepq_timeout() ensure that the thread does not |
---|
773 | * get spurious wakeups, even if the callout was reset |
---|
774 | * or thread reused. |
---|
775 | */ |
---|
776 | callout_stop(&td->td_slpcallout); |
---|
777 | return (res); |
---|
778 | #else /* __rtems__ */ |
---|
779 | (void)res; |
---|
780 | return (td->td_sq_state); |
---|
781 | #endif /* __rtems__ */ |
---|
782 | } |
---|
783 | |
---|
784 | #ifndef __rtems__ |
---|
785 | /* |
---|
786 | * Check to see if we were awoken by a signal. |
---|
787 | */ |
---|
788 | static int |
---|
789 | sleepq_check_signals(void) |
---|
790 | { |
---|
791 | struct thread *td; |
---|
792 | |
---|
793 | td = curthread; |
---|
794 | THREAD_LOCK_ASSERT(td, MA_OWNED); |
---|
795 | |
---|
796 | /* We are no longer in an interruptible sleep. */ |
---|
797 | if (td->td_flags & TDF_SINTR) |
---|
798 | td->td_flags &= ~TDF_SINTR; |
---|
799 | |
---|
800 | if (td->td_flags & TDF_SLEEPABORT) { |
---|
801 | td->td_flags &= ~TDF_SLEEPABORT; |
---|
802 | return (td->td_intrval); |
---|
803 | } |
---|
804 | |
---|
805 | return (0); |
---|
806 | } |
---|
807 | #endif /* __rtems__ */ |
---|
808 | |
---|
809 | /* |
---|
810 | * Block the current thread until it is awakened from its sleep queue. |
---|
811 | */ |
---|
812 | void |
---|
813 | sleepq_wait(void *wchan, int pri) |
---|
814 | { |
---|
815 | #ifndef __rtems__ |
---|
816 | struct thread *td; |
---|
817 | |
---|
818 | td = curthread; |
---|
819 | MPASS(!(td->td_flags & TDF_SINTR)); |
---|
820 | thread_lock(td); |
---|
821 | #endif /* __rtems__ */ |
---|
822 | sleepq_switch(wchan, pri); |
---|
823 | #ifndef __rtems__ |
---|
824 | thread_unlock(td); |
---|
825 | #endif /* __rtems__ */ |
---|
826 | } |
---|
827 | |
---|
828 | #ifndef __rtems__ |
---|
829 | /* |
---|
830 | * Block the current thread until it is awakened from its sleep queue |
---|
831 | * or it is interrupted by a signal. |
---|
832 | */ |
---|
833 | int |
---|
834 | sleepq_wait_sig(void *wchan, int pri) |
---|
835 | { |
---|
836 | int rcatch; |
---|
837 | int rval; |
---|
838 | |
---|
839 | rcatch = sleepq_catch_signals(wchan, pri); |
---|
840 | rval = sleepq_check_signals(); |
---|
841 | thread_unlock(curthread); |
---|
842 | if (rcatch) |
---|
843 | return (rcatch); |
---|
844 | return (rval); |
---|
845 | } |
---|
846 | #endif /* __rtems__ */ |
---|
847 | |
---|
848 | /* |
---|
849 | * Block the current thread until it is awakened from its sleep queue |
---|
850 | * or it times out while waiting. |
---|
851 | */ |
---|
852 | int |
---|
853 | sleepq_timedwait(void *wchan, int pri) |
---|
854 | { |
---|
855 | #ifndef __rtems__ |
---|
856 | struct thread *td; |
---|
857 | #endif /* __rtems__ */ |
---|
858 | int rval; |
---|
859 | |
---|
860 | #ifndef __rtems__ |
---|
861 | td = curthread; |
---|
862 | MPASS(!(td->td_flags & TDF_SINTR)); |
---|
863 | thread_lock(td); |
---|
864 | #endif /* __rtems__ */ |
---|
865 | sleepq_switch(wchan, pri); |
---|
866 | rval = sleepq_check_timeout(); |
---|
867 | #ifndef __rtems__ |
---|
868 | thread_unlock(td); |
---|
869 | #endif /* __rtems__ */ |
---|
870 | |
---|
871 | return (rval); |
---|
872 | } |
---|
873 | |
---|
874 | #ifndef __rtems__ |
---|
875 | /* |
---|
876 | * Block the current thread until it is awakened from its sleep queue, |
---|
877 | * it is interrupted by a signal, or it times out waiting to be awakened. |
---|
878 | */ |
---|
879 | int |
---|
880 | sleepq_timedwait_sig(void *wchan, int pri) |
---|
881 | { |
---|
882 | int rcatch, rvalt, rvals; |
---|
883 | |
---|
884 | rcatch = sleepq_catch_signals(wchan, pri); |
---|
885 | rvalt = sleepq_check_timeout(); |
---|
886 | rvals = sleepq_check_signals(); |
---|
887 | thread_unlock(curthread); |
---|
888 | if (rcatch) |
---|
889 | return (rcatch); |
---|
890 | if (rvals) |
---|
891 | return (rvals); |
---|
892 | return (rvalt); |
---|
893 | } |
---|
894 | #endif /* __rtems__ */ |
---|
895 | |
---|
896 | /* |
---|
897 | * Returns the type of sleepqueue given a waitchannel. |
---|
898 | */ |
---|
899 | int |
---|
900 | sleepq_type(void *wchan) |
---|
901 | { |
---|
902 | struct sleepqueue *sq; |
---|
903 | int type; |
---|
904 | |
---|
905 | MPASS(wchan != NULL); |
---|
906 | |
---|
907 | sleepq_lock(wchan); |
---|
908 | sq = sleepq_lookup(wchan); |
---|
909 | if (sq == NULL) { |
---|
910 | sleepq_release(wchan); |
---|
911 | return (-1); |
---|
912 | } |
---|
913 | type = sq->sq_type; |
---|
914 | sleepq_release(wchan); |
---|
915 | return (type); |
---|
916 | } |
---|
917 | |
---|
918 | /* |
---|
919 | * Removes a thread from a sleep queue and makes it |
---|
920 | * runnable. |
---|
921 | */ |
---|
922 | static int |
---|
923 | sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) |
---|
924 | { |
---|
925 | struct sleepqueue_chain *sc; |
---|
926 | #ifdef __rtems__ |
---|
927 | Thread_Control *thread; |
---|
928 | ISR_lock_Context lock_context; |
---|
929 | bool unblock; |
---|
930 | |
---|
931 | BSD_ASSERT(sq != NULL); |
---|
932 | #endif /* __rtems__ */ |
---|
933 | |
---|
934 | MPASS(td != NULL); |
---|
935 | MPASS(sq->sq_wchan != NULL); |
---|
936 | MPASS(td->td_wchan == sq->sq_wchan); |
---|
937 | MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); |
---|
938 | THREAD_LOCK_ASSERT(td, MA_OWNED); |
---|
939 | sc = SC_LOOKUP(sq->sq_wchan); |
---|
940 | mtx_assert(&sc->sc_lock, MA_OWNED); |
---|
941 | |
---|
942 | SDT_PROBE2(sched, , , wakeup, td, td->td_proc); |
---|
943 | |
---|
944 | /* Remove the thread from the queue. */ |
---|
945 | sq->sq_blockedcnt[td->td_sqqueue]--; |
---|
946 | TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); |
---|
947 | |
---|
948 | /* |
---|
949 | * Get a sleep queue for this thread. If this is the last waiter, |
---|
950 | * use the queue itself and take it out of the chain, otherwise, |
---|
951 | * remove a queue from the free list. |
---|
952 | */ |
---|
953 | if (LIST_EMPTY(&sq->sq_free)) { |
---|
954 | td->td_sleepqueue = sq; |
---|
955 | #ifdef INVARIANTS |
---|
956 | sq->sq_wchan = NULL; |
---|
957 | #endif |
---|
958 | #ifdef SLEEPQUEUE_PROFILING |
---|
959 | sc->sc_depth--; |
---|
960 | #endif |
---|
961 | } else |
---|
962 | td->td_sleepqueue = LIST_FIRST(&sq->sq_free); |
---|
963 | LIST_REMOVE(td->td_sleepqueue, sq_hash); |
---|
964 | #ifdef __rtems__ |
---|
965 | (void)sc; |
---|
966 | thread = td->td_thread; |
---|
967 | _ISR_lock_ISR_disable(&lock_context); |
---|
968 | _Thread_Wait_acquire_default_critical(thread, &lock_context); |
---|
969 | #endif /* __rtems__ */ |
---|
970 | |
---|
971 | td->td_wmesg = NULL; |
---|
972 | td->td_wchan = NULL; |
---|
973 | #ifndef __rtems__ |
---|
974 | td->td_flags &= ~TDF_SINTR; |
---|
975 | |
---|
976 | CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", |
---|
977 | (void *)td, (long)td->td_proc->p_pid, td->td_name); |
---|
978 | |
---|
979 | /* Adjust priority if requested. */ |
---|
980 | MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); |
---|
981 | if (pri != 0 && td->td_priority > pri && |
---|
982 | PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) |
---|
983 | sched_prio(td, pri); |
---|
984 | |
---|
985 | /* |
---|
986 | * Note that thread td might not be sleeping if it is running |
---|
987 | * sleepq_catch_signals() on another CPU or is blocked on its |
---|
988 | * proc lock to check signals. There's no need to mark the |
---|
989 | * thread runnable in that case. |
---|
990 | */ |
---|
991 | if (TD_IS_SLEEPING(td)) { |
---|
992 | TD_CLR_SLEEPING(td); |
---|
993 | return (setrunnable(td)); |
---|
994 | } |
---|
995 | #else /* __rtems__ */ |
---|
996 | unblock = false; |
---|
997 | switch (td->td_sq_state) { |
---|
998 | case TD_SQ_SLEEPING: |
---|
999 | unblock = true; |
---|
1000 | /* FALLTHROUGH */ |
---|
1001 | case TD_SQ_TIRED: |
---|
1002 | case TD_SQ_SLEEPY: |
---|
1003 | case TD_SQ_NIGHTMARE: |
---|
1004 | td->td_sq_state = TD_SQ_WAKEUP; |
---|
1005 | break; |
---|
1006 | default: |
---|
1007 | BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC); |
---|
1008 | break; |
---|
1009 | } |
---|
1010 | |
---|
1011 | if (unblock) { |
---|
1012 | Per_CPU_Control *cpu_self; |
---|
1013 | |
---|
1014 | cpu_self = _Thread_Dispatch_disable_critical(&lock_context); |
---|
1015 | _Thread_Wait_release_default(thread, &lock_context); |
---|
1016 | _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP); |
---|
1017 | _Thread_Dispatch_direct(cpu_self); |
---|
1018 | } else { |
---|
1019 | _Thread_Wait_release_default(thread, &lock_context); |
---|
1020 | } |
---|
1021 | #endif /* __rtems__ */ |
---|
1022 | return (0); |
---|
1023 | } |
---|
1024 | |
---|
1025 | #ifdef INVARIANTS |
---|
1026 | /* |
---|
1027 | * UMA zone item deallocator. |
---|
1028 | */ |
---|
1029 | static void |
---|
1030 | sleepq_dtor(void *mem, int size, void *arg) |
---|
1031 | { |
---|
1032 | struct sleepqueue *sq; |
---|
1033 | int i; |
---|
1034 | |
---|
1035 | sq = mem; |
---|
1036 | for (i = 0; i < NR_SLEEPQS; i++) { |
---|
1037 | MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); |
---|
1038 | MPASS(sq->sq_blockedcnt[i] == 0); |
---|
1039 | } |
---|
1040 | } |
---|
1041 | #endif |
---|
1042 | |
---|
1043 | /* |
---|
1044 | * UMA zone item initializer. |
---|
1045 | */ |
---|
1046 | static int |
---|
1047 | sleepq_init(void *mem, int size, int flags) |
---|
1048 | { |
---|
1049 | struct sleepqueue *sq; |
---|
1050 | int i; |
---|
1051 | |
---|
1052 | bzero(mem, size); |
---|
1053 | sq = mem; |
---|
1054 | for (i = 0; i < NR_SLEEPQS; i++) { |
---|
1055 | TAILQ_INIT(&sq->sq_blocked[i]); |
---|
1056 | sq->sq_blockedcnt[i] = 0; |
---|
1057 | } |
---|
1058 | LIST_INIT(&sq->sq_free); |
---|
1059 | return (0); |
---|
1060 | } |
---|
1061 | |
---|
1062 | /* |
---|
1063 | * Find the highest priority thread sleeping on a wait channel and resume it. |
---|
1064 | */ |
---|
1065 | int |
---|
1066 | sleepq_signal(void *wchan, int flags, int pri, int queue) |
---|
1067 | { |
---|
1068 | struct sleepqueue *sq; |
---|
1069 | #ifndef __rtems__ |
---|
1070 | struct thread *td, *besttd; |
---|
1071 | #else /* __rtems__ */ |
---|
1072 | struct thread *besttd; |
---|
1073 | #endif /* __rtems__ */ |
---|
1074 | int wakeup_swapper; |
---|
1075 | |
---|
1076 | CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); |
---|
1077 | KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); |
---|
1078 | MPASS((queue >= 0) && (queue < NR_SLEEPQS)); |
---|
1079 | sq = sleepq_lookup(wchan); |
---|
1080 | if (sq == NULL) |
---|
1081 | return (0); |
---|
1082 | KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), |
---|
1083 | ("%s: mismatch between sleep/wakeup and cv_*", __func__)); |
---|
1084 | |
---|
1085 | #ifndef __rtems__ |
---|
1086 | /* |
---|
1087 | * Find the highest priority thread on the queue. If there is a |
---|
1088 | * tie, use the thread that first appears in the queue as it has |
---|
1089 | * been sleeping the longest since threads are always added to |
---|
1090 | * the tail of sleep queues. |
---|
1091 | */ |
---|
1092 | besttd = TAILQ_FIRST(&sq->sq_blocked[queue]); |
---|
1093 | TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) { |
---|
1094 | if (td->td_priority < besttd->td_priority) |
---|
1095 | besttd = td; |
---|
1096 | } |
---|
1097 | #else /* __rtems__ */ |
---|
1098 | besttd = TAILQ_FIRST(&sq->sq_blocked[queue]); |
---|
1099 | #endif /* __rtems__ */ |
---|
1100 | MPASS(besttd != NULL); |
---|
1101 | thread_lock(besttd); |
---|
1102 | wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); |
---|
1103 | thread_unlock(besttd); |
---|
1104 | return (wakeup_swapper); |
---|
1105 | } |
---|
1106 | |
---|
1107 | /* |
---|
1108 | * Resume all threads sleeping on a specified wait channel. |
---|
1109 | */ |
---|
1110 | int |
---|
1111 | sleepq_broadcast(void *wchan, int flags, int pri, int queue) |
---|
1112 | { |
---|
1113 | struct sleepqueue *sq; |
---|
1114 | struct thread *td, *tdn; |
---|
1115 | int wakeup_swapper; |
---|
1116 | |
---|
1117 | CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); |
---|
1118 | KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); |
---|
1119 | MPASS((queue >= 0) && (queue < NR_SLEEPQS)); |
---|
1120 | sq = sleepq_lookup(wchan); |
---|
1121 | if (sq == NULL) |
---|
1122 | return (0); |
---|
1123 | KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), |
---|
1124 | ("%s: mismatch between sleep/wakeup and cv_*", __func__)); |
---|
1125 | |
---|
1126 | /* |
---|
1127 | * Resume all blocked threads on the sleep queue. The last thread will |
---|
1128 | * be given ownership of sq and may re-enqueue itself before |
---|
1129 | * sleepq_resume_thread() returns, so we must cache the "next" queue |
---|
1130 | * item at the beginning of the final iteration. |
---|
1131 | */ |
---|
1132 | wakeup_swapper = 0; |
---|
1133 | TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { |
---|
1134 | thread_lock(td); |
---|
1135 | wakeup_swapper |= sleepq_resume_thread(sq, td, pri); |
---|
1136 | thread_unlock(td); |
---|
1137 | } |
---|
1138 | return (wakeup_swapper); |
---|
1139 | } |
---|
1140 | |
---|
1141 | #ifndef __rtems__ |
---|
1142 | /* |
---|
1143 | * Time sleeping threads out. When the timeout expires, the thread is |
---|
1144 | * removed from the sleep queue and made runnable if it is still asleep. |
---|
1145 | */ |
---|
1146 | static void |
---|
1147 | sleepq_timeout(void *arg) |
---|
1148 | { |
---|
1149 | struct sleepqueue_chain *sc; |
---|
1150 | struct sleepqueue *sq; |
---|
1151 | struct thread *td; |
---|
1152 | void *wchan; |
---|
1153 | int wakeup_swapper; |
---|
1154 | |
---|
1155 | td = arg; |
---|
1156 | wakeup_swapper = 0; |
---|
1157 | CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", |
---|
1158 | (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); |
---|
1159 | |
---|
1160 | thread_lock(td); |
---|
1161 | |
---|
1162 | if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) { |
---|
1163 | /* |
---|
1164 | * The thread does not want a timeout (yet). |
---|
1165 | */ |
---|
1166 | } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { |
---|
1167 | /* |
---|
1168 | * See if the thread is asleep and get the wait |
---|
1169 | * channel if it is. |
---|
1170 | */ |
---|
1171 | wchan = td->td_wchan; |
---|
1172 | sc = SC_LOOKUP(wchan); |
---|
1173 | THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); |
---|
1174 | sq = sleepq_lookup(wchan); |
---|
1175 | MPASS(sq != NULL); |
---|
1176 | td->td_flags |= TDF_TIMEOUT; |
---|
1177 | wakeup_swapper = sleepq_resume_thread(sq, td, 0); |
---|
1178 | } else if (TD_ON_SLEEPQ(td)) { |
---|
1179 | /* |
---|
1180 | * If the thread is on the SLEEPQ but isn't sleeping |
---|
1181 | * yet, it can either be on another CPU in between |
---|
1182 | * sleepq_add() and one of the sleepq_*wait*() |
---|
1183 | * routines or it can be in sleepq_catch_signals(). |
---|
1184 | */ |
---|
1185 | td->td_flags |= TDF_TIMEOUT; |
---|
1186 | } |
---|
1187 | |
---|
1188 | thread_unlock(td); |
---|
1189 | if (wakeup_swapper) |
---|
1190 | kick_proc0(); |
---|
1191 | } |
---|
1192 | #else /* __rtems__ */ |
---|
1193 | static void |
---|
1194 | sleepq_timeout(Watchdog_Control *watchdog) |
---|
1195 | { |
---|
1196 | Thread_Control *thread; |
---|
1197 | struct thread *td; |
---|
1198 | ISR_lock_Context lock_context; |
---|
1199 | bool unblock; |
---|
1200 | |
---|
1201 | thread = RTEMS_CONTAINER_OF(watchdog, Thread_Control, Timer.Watchdog); |
---|
1202 | td = rtems_bsd_get_thread(thread); |
---|
1203 | BSD_ASSERT(td != NULL); |
---|
1204 | |
---|
1205 | _ISR_lock_ISR_disable(&lock_context); |
---|
1206 | _Thread_Wait_acquire_default_critical(thread, &lock_context); |
---|
1207 | |
---|
1208 | unblock = false; |
---|
1209 | switch (td->td_sq_state) { |
---|
1210 | case TD_SQ_SLEEPING: |
---|
1211 | unblock = true; |
---|
1212 | /* Fall through */ |
---|
1213 | case TD_SQ_TIRED: |
---|
1214 | case TD_SQ_SLEEPY: |
---|
1215 | td->td_sq_state = TD_SQ_NIGHTMARE; |
---|
1216 | break; |
---|
1217 | default: |
---|
1218 | BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP); |
---|
1219 | break; |
---|
1220 | } |
---|
1221 | |
---|
1222 | if (unblock) { |
---|
1223 | Per_CPU_Control *cpu_self; |
---|
1224 | |
---|
1225 | cpu_self = _Thread_Dispatch_disable_critical(&lock_context); |
---|
1226 | _Thread_Wait_release_default(thread, &lock_context); |
---|
1227 | |
---|
1228 | _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP); |
---|
1229 | |
---|
1230 | _Thread_Dispatch_enable(cpu_self); |
---|
1231 | } else { |
---|
1232 | _Thread_Wait_release_default(thread, &lock_context); |
---|
1233 | } |
---|
1234 | } |
---|
1235 | #endif /* __rtems__ */ |
---|
1236 | |
---|
1237 | /* |
---|
1238 | * Resumes a specific thread from the sleep queue associated with a specific |
---|
1239 | * wait channel if it is on that queue. |
---|
1240 | */ |
---|
1241 | void |
---|
1242 | sleepq_remove(struct thread *td, void *wchan) |
---|
1243 | { |
---|
1244 | struct sleepqueue *sq; |
---|
1245 | int wakeup_swapper; |
---|
1246 | |
---|
1247 | /* |
---|
1248 | * Look up the sleep queue for this wait channel, then re-check |
---|
1249 | * that the thread is asleep on that channel, if it is not, then |
---|
1250 | * bail. |
---|
1251 | */ |
---|
1252 | MPASS(wchan != NULL); |
---|
1253 | sleepq_lock(wchan); |
---|
1254 | sq = sleepq_lookup(wchan); |
---|
1255 | /* |
---|
1256 | * We can not lock the thread here as it may be sleeping on a |
---|
1257 | * different sleepq. However, holding the sleepq lock for this |
---|
1258 | * wchan can guarantee that we do not miss a wakeup for this |
---|
1259 | * channel. The asserts below will catch any false positives. |
---|
1260 | */ |
---|
1261 | if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { |
---|
1262 | sleepq_release(wchan); |
---|
1263 | return; |
---|
1264 | } |
---|
1265 | /* Thread is asleep on sleep queue sq, so wake it up. */ |
---|
1266 | thread_lock(td); |
---|
1267 | MPASS(sq != NULL); |
---|
1268 | MPASS(td->td_wchan == wchan); |
---|
1269 | wakeup_swapper = sleepq_resume_thread(sq, td, 0); |
---|
1270 | thread_unlock(td); |
---|
1271 | sleepq_release(wchan); |
---|
1272 | if (wakeup_swapper) |
---|
1273 | kick_proc0(); |
---|
1274 | } |
---|
1275 | |
---|
1276 | #ifndef __rtems__ |
---|
1277 | /* |
---|
1278 | * Abort a thread as if an interrupt had occurred. Only abort |
---|
1279 | * interruptible waits (unfortunately it isn't safe to abort others). |
---|
1280 | */ |
---|
1281 | int |
---|
1282 | sleepq_abort(struct thread *td, int intrval) |
---|
1283 | { |
---|
1284 | struct sleepqueue *sq; |
---|
1285 | void *wchan; |
---|
1286 | |
---|
1287 | THREAD_LOCK_ASSERT(td, MA_OWNED); |
---|
1288 | MPASS(TD_ON_SLEEPQ(td)); |
---|
1289 | MPASS(td->td_flags & TDF_SINTR); |
---|
1290 | MPASS(intrval == EINTR || intrval == ERESTART); |
---|
1291 | |
---|
1292 | /* |
---|
1293 | * If the TDF_TIMEOUT flag is set, just leave. A |
---|
1294 | * timeout is scheduled anyhow. |
---|
1295 | */ |
---|
1296 | if (td->td_flags & TDF_TIMEOUT) |
---|
1297 | return (0); |
---|
1298 | |
---|
1299 | CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", |
---|
1300 | (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); |
---|
1301 | td->td_intrval = intrval; |
---|
1302 | td->td_flags |= TDF_SLEEPABORT; |
---|
1303 | /* |
---|
1304 | * If the thread has not slept yet it will find the signal in |
---|
1305 | * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise |
---|
1306 | * we have to do it here. |
---|
1307 | */ |
---|
1308 | if (!TD_IS_SLEEPING(td)) |
---|
1309 | return (0); |
---|
1310 | wchan = td->td_wchan; |
---|
1311 | MPASS(wchan != NULL); |
---|
1312 | sq = sleepq_lookup(wchan); |
---|
1313 | MPASS(sq != NULL); |
---|
1314 | |
---|
1315 | /* Thread is asleep on sleep queue sq, so wake it up. */ |
---|
1316 | return (sleepq_resume_thread(sq, td, 0)); |
---|
1317 | } |
---|
1318 | #endif /* __rtems__ */ |
---|
1319 | |
---|
1320 | /* |
---|
1321 | * Prints the stacks of all threads presently sleeping on wchan/queue to |
---|
1322 | * the sbuf sb. Sets count_stacks_printed to the number of stacks actually |
---|
1323 | * printed. Typically, this will equal the number of threads sleeping on the |
---|
1324 | * queue, but may be less if sb overflowed before all stacks were printed. |
---|
1325 | */ |
---|
1326 | #ifdef STACK |
---|
1327 | int |
---|
1328 | sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue, |
---|
1329 | int *count_stacks_printed) |
---|
1330 | { |
---|
1331 | struct thread *td, *td_next; |
---|
1332 | struct sleepqueue *sq; |
---|
1333 | struct stack **st; |
---|
1334 | struct sbuf **td_infos; |
---|
1335 | int i, stack_idx, error, stacks_to_allocate; |
---|
1336 | bool finished, partial_print; |
---|
1337 | |
---|
1338 | error = 0; |
---|
1339 | finished = false; |
---|
1340 | partial_print = false; |
---|
1341 | |
---|
1342 | KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); |
---|
1343 | MPASS((queue >= 0) && (queue < NR_SLEEPQS)); |
---|
1344 | |
---|
1345 | stacks_to_allocate = 10; |
---|
1346 | for (i = 0; i < 3 && !finished ; i++) { |
---|
1347 | /* We cannot malloc while holding the queue's spinlock, so |
---|
1348 | * we do our mallocs now, and hope it is enough. If it |
---|
1349 | * isn't, we will free these, drop the lock, malloc more, |
---|
1350 | * and try again, up to a point. After that point we will |
---|
1351 | * give up and report ENOMEM. We also cannot write to sb |
---|
1352 | * during this time since the client may have set the |
---|
1353 | * SBUF_AUTOEXTEND flag on their sbuf, which could cause a |
---|
1354 | * malloc as we print to it. So we defer actually printing |
---|
1355 | * to sb until after we drop the spinlock. |
---|
1356 | */ |
---|
1357 | |
---|
1358 | /* Where we will store the stacks. */ |
---|
1359 | st = malloc(sizeof(struct stack *) * stacks_to_allocate, |
---|
1360 | M_TEMP, M_WAITOK); |
---|
1361 | for (stack_idx = 0; stack_idx < stacks_to_allocate; |
---|
1362 | stack_idx++) |
---|
1363 | st[stack_idx] = stack_create(); |
---|
1364 | |
---|
1365 | /* Where we will store the td name, tid, etc. */ |
---|
1366 | td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, |
---|
1367 | M_TEMP, M_WAITOK); |
---|
1368 | for (stack_idx = 0; stack_idx < stacks_to_allocate; |
---|
1369 | stack_idx++) |
---|
1370 | td_infos[stack_idx] = sbuf_new(NULL, NULL, |
---|
1371 | MAXCOMLEN + sizeof(struct thread *) * 2 + 40, |
---|
1372 | SBUF_FIXEDLEN); |
---|
1373 | |
---|
1374 | sleepq_lock(wchan); |
---|
1375 | sq = sleepq_lookup(wchan); |
---|
1376 | if (sq == NULL) { |
---|
1377 | /* This sleepq does not exist; exit and return ENOENT. */ |
---|
1378 | error = ENOENT; |
---|
1379 | finished = true; |
---|
1380 | sleepq_release(wchan); |
---|
1381 | goto loop_end; |
---|
1382 | } |
---|
1383 | |
---|
1384 | stack_idx = 0; |
---|
1385 | /* Save thread info */ |
---|
1386 | TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, |
---|
1387 | td_next) { |
---|
1388 | if (stack_idx >= stacks_to_allocate) |
---|
1389 | goto loop_end; |
---|
1390 | |
---|
1391 | /* Note the td_lock is equal to the sleepq_lock here. */ |
---|
1392 | stack_save_td(st[stack_idx], td); |
---|
1393 | |
---|
1394 | sbuf_printf(td_infos[stack_idx], "%d: %s %p", |
---|
1395 | td->td_tid, td->td_name, td); |
---|
1396 | |
---|
1397 | ++stack_idx; |
---|
1398 | } |
---|
1399 | |
---|
1400 | finished = true; |
---|
1401 | sleepq_release(wchan); |
---|
1402 | |
---|
1403 | /* Print the stacks */ |
---|
1404 | for (i = 0; i < stack_idx; i++) { |
---|
1405 | sbuf_finish(td_infos[i]); |
---|
1406 | sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); |
---|
1407 | stack_sbuf_print(sb, st[i]); |
---|
1408 | sbuf_printf(sb, "\n"); |
---|
1409 | |
---|
1410 | error = sbuf_error(sb); |
---|
1411 | if (error == 0) |
---|
1412 | *count_stacks_printed = stack_idx; |
---|
1413 | } |
---|
1414 | |
---|
1415 | loop_end: |
---|
1416 | if (!finished) |
---|
1417 | sleepq_release(wchan); |
---|
1418 | for (stack_idx = 0; stack_idx < stacks_to_allocate; |
---|
1419 | stack_idx++) |
---|
1420 | stack_destroy(st[stack_idx]); |
---|
1421 | for (stack_idx = 0; stack_idx < stacks_to_allocate; |
---|
1422 | stack_idx++) |
---|
1423 | sbuf_delete(td_infos[stack_idx]); |
---|
1424 | free(st, M_TEMP); |
---|
1425 | free(td_infos, M_TEMP); |
---|
1426 | stacks_to_allocate *= 10; |
---|
1427 | } |
---|
1428 | |
---|
1429 | if (!finished && error == 0) |
---|
1430 | error = ENOMEM; |
---|
1431 | |
---|
1432 | return (error); |
---|
1433 | } |
---|
1434 | #endif |
---|
1435 | |
---|
1436 | #ifdef SLEEPQUEUE_PROFILING |
---|
1437 | #define SLEEPQ_PROF_LOCATIONS 1024 |
---|
1438 | #define SLEEPQ_SBUFSIZE 512 |
---|
1439 | struct sleepq_prof { |
---|
1440 | LIST_ENTRY(sleepq_prof) sp_link; |
---|
1441 | const char *sp_wmesg; |
---|
1442 | long sp_count; |
---|
1443 | }; |
---|
1444 | |
---|
1445 | LIST_HEAD(sqphead, sleepq_prof); |
---|
1446 | |
---|
1447 | struct sqphead sleepq_prof_free; |
---|
1448 | struct sqphead sleepq_hash[SC_TABLESIZE]; |
---|
1449 | static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; |
---|
1450 | static struct mtx sleepq_prof_lock; |
---|
1451 | MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); |
---|
1452 | |
---|
1453 | static void |
---|
1454 | sleepq_profile(const char *wmesg) |
---|
1455 | { |
---|
1456 | struct sleepq_prof *sp; |
---|
1457 | |
---|
1458 | mtx_lock_spin(&sleepq_prof_lock); |
---|
1459 | if (prof_enabled == 0) |
---|
1460 | goto unlock; |
---|
1461 | LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) |
---|
1462 | if (sp->sp_wmesg == wmesg) |
---|
1463 | goto done; |
---|
1464 | sp = LIST_FIRST(&sleepq_prof_free); |
---|
1465 | if (sp == NULL) |
---|
1466 | goto unlock; |
---|
1467 | sp->sp_wmesg = wmesg; |
---|
1468 | LIST_REMOVE(sp, sp_link); |
---|
1469 | LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); |
---|
1470 | done: |
---|
1471 | sp->sp_count++; |
---|
1472 | unlock: |
---|
1473 | mtx_unlock_spin(&sleepq_prof_lock); |
---|
1474 | return; |
---|
1475 | } |
---|
1476 | |
---|
1477 | static void |
---|
1478 | sleepq_prof_reset(void) |
---|
1479 | { |
---|
1480 | struct sleepq_prof *sp; |
---|
1481 | int enabled; |
---|
1482 | int i; |
---|
1483 | |
---|
1484 | mtx_lock_spin(&sleepq_prof_lock); |
---|
1485 | enabled = prof_enabled; |
---|
1486 | prof_enabled = 0; |
---|
1487 | for (i = 0; i < SC_TABLESIZE; i++) |
---|
1488 | LIST_INIT(&sleepq_hash[i]); |
---|
1489 | LIST_INIT(&sleepq_prof_free); |
---|
1490 | for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { |
---|
1491 | sp = &sleepq_profent[i]; |
---|
1492 | sp->sp_wmesg = NULL; |
---|
1493 | sp->sp_count = 0; |
---|
1494 | LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); |
---|
1495 | } |
---|
1496 | prof_enabled = enabled; |
---|
1497 | mtx_unlock_spin(&sleepq_prof_lock); |
---|
1498 | } |
---|
1499 | |
---|
1500 | static int |
---|
1501 | enable_sleepq_prof(SYSCTL_HANDLER_ARGS) |
---|
1502 | { |
---|
1503 | int error, v; |
---|
1504 | |
---|
1505 | v = prof_enabled; |
---|
1506 | error = sysctl_handle_int(oidp, &v, v, req); |
---|
1507 | if (error) |
---|
1508 | return (error); |
---|
1509 | if (req->newptr == NULL) |
---|
1510 | return (error); |
---|
1511 | if (v == prof_enabled) |
---|
1512 | return (0); |
---|
1513 | if (v == 1) |
---|
1514 | sleepq_prof_reset(); |
---|
1515 | mtx_lock_spin(&sleepq_prof_lock); |
---|
1516 | prof_enabled = !!v; |
---|
1517 | mtx_unlock_spin(&sleepq_prof_lock); |
---|
1518 | |
---|
1519 | return (0); |
---|
1520 | } |
---|
1521 | |
---|
1522 | static int |
---|
1523 | reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) |
---|
1524 | { |
---|
1525 | int error, v; |
---|
1526 | |
---|
1527 | v = 0; |
---|
1528 | error = sysctl_handle_int(oidp, &v, 0, req); |
---|
1529 | if (error) |
---|
1530 | return (error); |
---|
1531 | if (req->newptr == NULL) |
---|
1532 | return (error); |
---|
1533 | if (v == 0) |
---|
1534 | return (0); |
---|
1535 | sleepq_prof_reset(); |
---|
1536 | |
---|
1537 | return (0); |
---|
1538 | } |
---|
1539 | |
---|
1540 | static int |
---|
1541 | dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) |
---|
1542 | { |
---|
1543 | struct sleepq_prof *sp; |
---|
1544 | struct sbuf *sb; |
---|
1545 | int enabled; |
---|
1546 | int error; |
---|
1547 | int i; |
---|
1548 | |
---|
1549 | error = sysctl_wire_old_buffer(req, 0); |
---|
1550 | if (error != 0) |
---|
1551 | return (error); |
---|
1552 | sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); |
---|
1553 | sbuf_printf(sb, "\nwmesg\tcount\n"); |
---|
1554 | enabled = prof_enabled; |
---|
1555 | mtx_lock_spin(&sleepq_prof_lock); |
---|
1556 | prof_enabled = 0; |
---|
1557 | mtx_unlock_spin(&sleepq_prof_lock); |
---|
1558 | for (i = 0; i < SC_TABLESIZE; i++) { |
---|
1559 | LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { |
---|
1560 | sbuf_printf(sb, "%s\t%ld\n", |
---|
1561 | sp->sp_wmesg, sp->sp_count); |
---|
1562 | } |
---|
1563 | } |
---|
1564 | mtx_lock_spin(&sleepq_prof_lock); |
---|
1565 | prof_enabled = enabled; |
---|
1566 | mtx_unlock_spin(&sleepq_prof_lock); |
---|
1567 | |
---|
1568 | error = sbuf_finish(sb); |
---|
1569 | sbuf_delete(sb); |
---|
1570 | return (error); |
---|
1571 | } |
---|
1572 | |
---|
1573 | SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, |
---|
1574 | NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics"); |
---|
1575 | SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, |
---|
1576 | NULL, 0, reset_sleepq_prof_stats, "I", |
---|
1577 | "Reset sleepqueue profiling statistics"); |
---|
1578 | SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, |
---|
1579 | NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling"); |
---|
1580 | #endif |
---|
1581 | |
---|
1582 | #ifdef DDB |
---|
1583 | DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) |
---|
1584 | { |
---|
1585 | struct sleepqueue_chain *sc; |
---|
1586 | struct sleepqueue *sq; |
---|
1587 | #ifdef INVARIANTS |
---|
1588 | struct lock_object *lock; |
---|
1589 | #endif |
---|
1590 | struct thread *td; |
---|
1591 | void *wchan; |
---|
1592 | int i; |
---|
1593 | |
---|
1594 | if (!have_addr) |
---|
1595 | return; |
---|
1596 | |
---|
1597 | /* |
---|
1598 | * First, see if there is an active sleep queue for the wait channel |
---|
1599 | * indicated by the address. |
---|
1600 | */ |
---|
1601 | wchan = (void *)addr; |
---|
1602 | sc = SC_LOOKUP(wchan); |
---|
1603 | LIST_FOREACH(sq, &sc->sc_queues, sq_hash) |
---|
1604 | if (sq->sq_wchan == wchan) |
---|
1605 | goto found; |
---|
1606 | |
---|
1607 | /* |
---|
1608 | * Second, see if there is an active sleep queue at the address |
---|
1609 | * indicated. |
---|
1610 | */ |
---|
1611 | for (i = 0; i < SC_TABLESIZE; i++) |
---|
1612 | LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { |
---|
1613 | if (sq == (struct sleepqueue *)addr) |
---|
1614 | goto found; |
---|
1615 | } |
---|
1616 | |
---|
1617 | db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); |
---|
1618 | return; |
---|
1619 | found: |
---|
1620 | db_printf("Wait channel: %p\n", sq->sq_wchan); |
---|
1621 | db_printf("Queue type: %d\n", sq->sq_type); |
---|
1622 | #ifdef INVARIANTS |
---|
1623 | if (sq->sq_lock) { |
---|
1624 | lock = sq->sq_lock; |
---|
1625 | db_printf("Associated Interlock: %p - (%s) %s\n", lock, |
---|
1626 | LOCK_CLASS(lock)->lc_name, lock->lo_name); |
---|
1627 | } |
---|
1628 | #endif |
---|
1629 | db_printf("Blocked threads:\n"); |
---|
1630 | for (i = 0; i < NR_SLEEPQS; i++) { |
---|
1631 | db_printf("\nQueue[%d]:\n", i); |
---|
1632 | if (TAILQ_EMPTY(&sq->sq_blocked[i])) |
---|
1633 | db_printf("\tempty\n"); |
---|
1634 | else |
---|
1635 | TAILQ_FOREACH(td, &sq->sq_blocked[0], |
---|
1636 | td_slpq) { |
---|
1637 | db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, |
---|
1638 | td->td_tid, td->td_proc->p_pid, |
---|
1639 | td->td_name); |
---|
1640 | } |
---|
1641 | db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); |
---|
1642 | } |
---|
1643 | } |
---|
1644 | |
---|
1645 | /* Alias 'show sleepqueue' to 'show sleepq'. */ |
---|
1646 | DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); |
---|
1647 | #endif |
---|