source: rtems-libbsd/freebsd/sys/kern/subr_sleepqueue.c @ 1bf6529

55-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 1bf6529 was 1bf6529, checked in by Chris Johns <chrisj@…>, on 07/28/16 at 02:16:09

kern/sleepqueue: Update due to API changes

  • Property mode set to 100644
File size: 38.8 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Copyright (c) 2015 embedded brains GmbH <rtems@embedded-brains.de>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel.  Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals.  That said, there are several similarities between the turnstile
36 * and sleep queue implementations.  (Note: turnstiles were implemented
37 * first.)  For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues.  An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue.  This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head.  Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking.  Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout.  The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep.  A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep).  The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53 * sleep queues also provide some extra assertions.  One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API.  The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD$");
64
65#include <rtems/bsd/local/opt_sleepqueue_profiling.h>
66#include <rtems/bsd/local/opt_ddb.h>
67#include <rtems/bsd/local/opt_kdtrace.h>
68#include <rtems/bsd/local/opt_sched.h>
69
70#include <rtems/bsd/sys/param.h>
71#include <sys/systm.h>
72#include <rtems/bsd/sys/lock.h>
73#include <sys/kernel.h>
74#include <sys/ktr.h>
75#include <sys/mutex.h>
76#include <sys/proc.h>
77#include <sys/sbuf.h>
78#include <sys/sched.h>
79#include <sys/sdt.h>
80#include <sys/signalvar.h>
81#include <sys/sleepqueue.h>
82#include <sys/sysctl.h>
83
84#include <vm/uma.h>
85
86#ifdef DDB
87#include <ddb/ddb.h>
88#endif
89#ifdef __rtems__
90#include <machine/rtems-bsd-thread.h>
91#undef ticks
92#include <rtems/score/threadimpl.h>
93#include <rtems/score/watchdogimpl.h>
94#endif /* __rtems__ */
95
96/*
97 * Constants for the hash table of sleep queue chains.  These constants are
98 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
99 * Basically, we ignore the lower 8 bits of the address since most wait
100 * channel pointers are aligned and only look at the next 7 bits for the
101 * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
102 */
103#define SC_TABLESIZE    128                     /* Must be power of 2. */
104#define SC_MASK         (SC_TABLESIZE - 1)
105#define SC_SHIFT        8
106#define SC_HASH(wc)     (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
107#define SC_LOOKUP(wc)   &sleepq_chains[SC_HASH(wc)]
108#define NR_SLEEPQS      2
109/*
110 * There two different lists of sleep queues.  Both lists are connected
111 * via the sq_hash entries.  The first list is the sleep queue chain list
112 * that a sleep queue is on when it is attached to a wait channel.  The
113 * second list is the free list hung off of a sleep queue that is attached
114 * to a wait channel.
115 *
116 * Each sleep queue also contains the wait channel it is attached to, the
117 * list of threads blocked on that wait channel, flags specific to the
118 * wait channel, and the lock used to synchronize with a wait channel.
119 * The flags are used to catch mismatches between the various consumers
120 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
121 * The lock pointer is only used when invariants are enabled for various
122 * debugging checks.
123 *
124 * Locking key:
125 *  c - sleep queue chain lock
126 */
127struct sleepqueue {
128        TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];    /* (c) Blocked threads. */
129        u_int sq_blockedcnt[NR_SLEEPQS];        /* (c) N. of blocked threads. */
130        LIST_ENTRY(sleepqueue) sq_hash;         /* (c) Chain and free list. */
131        LIST_HEAD(, sleepqueue) sq_free;        /* (c) Free queues. */
132        void    *sq_wchan;                      /* (c) Wait channel. */
133        int     sq_type;                        /* (c) Queue type. */
134#ifdef INVARIANTS
135        struct lock_object *sq_lock;            /* (c) Associated lock. */
136#endif
137};
138
139struct sleepqueue_chain {
140        LIST_HEAD(, sleepqueue) sc_queues;      /* List of sleep queues. */
141        struct mtx sc_lock;                     /* Spin lock for this chain. */
142#ifdef SLEEPQUEUE_PROFILING
143        u_int   sc_depth;                       /* Length of sc_queues. */
144        u_int   sc_max_depth;                   /* Max length of sc_queues. */
145#endif
146};
147
148#ifdef SLEEPQUEUE_PROFILING
149u_int sleepq_max_depth;
150static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
151static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
152    "sleepq chain stats");
153SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
154    0, "maxmimum depth achieved of a single chain");
155
156static void     sleepq_profile(const char *wmesg);
157static int      prof_enabled;
158#endif
159static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
160static uma_zone_t sleepq_zone;
161
162/*
163 * Prototypes for non-exported routines.
164 */
165#ifndef __rtems__
166static int      sleepq_catch_signals(void *wchan, int pri);
167static int      sleepq_check_signals(void);
168static int      sleepq_check_timeout(void);
169#endif /* __rtems__ */
170#ifdef INVARIANTS
171static void     sleepq_dtor(void *mem, int size, void *arg);
172#endif
173static int      sleepq_init(void *mem, int size, int flags);
174static int      sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
175                    int pri);
176static void     sleepq_switch(void *wchan, int pri);
177#ifndef __rtems__
178static void     sleepq_timeout(void *arg);
179#else /* __rtems__ */
180static void     sleepq_timeout(Watchdog_Control *watchdog);
181#endif /* __rtems__ */
182
183SDT_PROBE_DECLARE(sched, , , sleep);
184SDT_PROBE_DECLARE(sched, , , wakeup);
185
186/*
187 * Early initialization of sleep queues that is called from the sleepinit()
188 * SYSINIT.
189 */
190void
191init_sleepqueues(void)
192{
193#ifdef SLEEPQUEUE_PROFILING
194        struct sysctl_oid *chain_oid;
195        char chain_name[10];
196#endif
197        int i;
198
199        for (i = 0; i < SC_TABLESIZE; i++) {
200                LIST_INIT(&sleepq_chains[i].sc_queues);
201                mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
202                    MTX_SPIN | MTX_RECURSE);
203#ifdef SLEEPQUEUE_PROFILING
204                snprintf(chain_name, sizeof(chain_name), "%d", i);
205                chain_oid = SYSCTL_ADD_NODE(NULL,
206                    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
207                    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
208                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
209                    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
210                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
211                    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
212                    NULL);
213#endif
214        }
215        sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
216#ifdef INVARIANTS
217            NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
218#else
219            NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
220#endif
221
222#ifndef __rtems__
223        thread0.td_sleepqueue = sleepq_alloc();
224#endif /* __rtems__ */
225}
226
227/*
228 * Get a sleep queue for a new thread.
229 */
230struct sleepqueue *
231sleepq_alloc(void)
232{
233
234        return (uma_zalloc(sleepq_zone, M_WAITOK));
235}
236
237/*
238 * Free a sleep queue when a thread is destroyed.
239 */
240void
241sleepq_free(struct sleepqueue *sq)
242{
243
244        uma_zfree(sleepq_zone, sq);
245}
246
247/*
248 * Lock the sleep queue chain associated with the specified wait channel.
249 */
250void
251sleepq_lock(void *wchan)
252{
253        struct sleepqueue_chain *sc;
254
255        sc = SC_LOOKUP(wchan);
256        mtx_lock_spin(&sc->sc_lock);
257}
258
259/*
260 * Look up the sleep queue associated with a given wait channel in the hash
261 * table locking the associated sleep queue chain.  If no queue is found in
262 * the table, NULL is returned.
263 */
264struct sleepqueue *
265sleepq_lookup(void *wchan)
266{
267        struct sleepqueue_chain *sc;
268        struct sleepqueue *sq;
269
270        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
271        sc = SC_LOOKUP(wchan);
272        mtx_assert(&sc->sc_lock, MA_OWNED);
273        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
274                if (sq->sq_wchan == wchan)
275                        return (sq);
276        return (NULL);
277}
278
279/*
280 * Unlock the sleep queue chain associated with a given wait channel.
281 */
282void
283sleepq_release(void *wchan)
284{
285        struct sleepqueue_chain *sc;
286
287        sc = SC_LOOKUP(wchan);
288        mtx_unlock_spin(&sc->sc_lock);
289}
290
291/*
292 * Places the current thread on the sleep queue for the specified wait
293 * channel.  If INVARIANTS is enabled, then it associates the passed in
294 * lock with the sleepq to make sure it is held when that sleep queue is
295 * woken up.
296 */
297void
298sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
299    int queue)
300{
301        struct sleepqueue_chain *sc;
302        struct sleepqueue *sq;
303        struct thread *td;
304#ifdef __rtems__
305        ISR_lock_Context lock_context;
306        Thread_Control *executing;
307        struct thread *succ;
308#endif /* __rtems__ */
309
310        td = curthread;
311        sc = SC_LOOKUP(wchan);
312        mtx_assert(&sc->sc_lock, MA_OWNED);
313        MPASS(td->td_sleepqueue != NULL);
314        MPASS(wchan != NULL);
315        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
316
317        /* If this thread is not allowed to sleep, die a horrible death. */
318#ifndef __rtems__
319        KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
320            ("Trying sleep, but thread marked as sleeping prohibited"));
321#endif /* __rtems__ */
322
323        /* Look up the sleep queue associated with the wait channel 'wchan'. */
324        sq = sleepq_lookup(wchan);
325
326        /*
327         * If the wait channel does not already have a sleep queue, use
328         * this thread's sleep queue.  Otherwise, insert the current thread
329         * into the sleep queue already in use by this wait channel.
330         */
331        if (sq == NULL) {
332#ifdef INVARIANTS
333                int i;
334
335                sq = td->td_sleepqueue;
336                for (i = 0; i < NR_SLEEPQS; i++) {
337                        KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
338                            ("thread's sleep queue %d is not empty", i));
339                        KASSERT(sq->sq_blockedcnt[i] == 0,
340                            ("thread's sleep queue %d count mismatches", i));
341                }
342                KASSERT(LIST_EMPTY(&sq->sq_free),
343                    ("thread's sleep queue has a non-empty free list"));
344                KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
345                sq->sq_lock = lock;
346#endif
347#ifdef SLEEPQUEUE_PROFILING
348                sc->sc_depth++;
349                if (sc->sc_depth > sc->sc_max_depth) {
350                        sc->sc_max_depth = sc->sc_depth;
351                        if (sc->sc_max_depth > sleepq_max_depth)
352                                sleepq_max_depth = sc->sc_max_depth;
353                }
354#endif
355                sq = td->td_sleepqueue;
356                LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
357                sq->sq_wchan = wchan;
358                sq->sq_type = flags & SLEEPQ_TYPE;
359        } else {
360                MPASS(wchan == sq->sq_wchan);
361                MPASS(lock == sq->sq_lock);
362                MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
363                LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
364        }
365        thread_lock(td);
366#ifndef __rtems__
367        TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
368#else /* __rtems__ */
369        /* FIXME: This is broken with clustered scheduling */
370        succ = NULL;
371        TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) {
372                if (td->td_thread->current_priority <
373                    succ->td_thread->current_priority)
374                        break;
375        }
376        if (succ == NULL)
377                TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
378        else
379                TAILQ_INSERT_BEFORE(succ, td, td_slpq);
380#endif /* __rtems__ */
381        sq->sq_blockedcnt[queue]++;
382#ifdef __rtems__
383        executing = td->td_thread;
384        _Thread_Wait_acquire_default(executing, &lock_context);
385        td->td_sq_state = TD_SQ_TIRED;
386#endif /* __rtems__ */
387        td->td_sleepqueue = NULL;
388        td->td_sqqueue = queue;
389        td->td_wchan = wchan;
390        td->td_wmesg = wmesg;
391#ifndef __rtems__
392        if (flags & SLEEPQ_INTERRUPTIBLE) {
393                td->td_flags |= TDF_SINTR;
394                td->td_flags &= ~TDF_SLEEPABORT;
395        }
396        thread_unlock(td);
397#else /* __rtems__ */
398        _Thread_Wait_release_default(executing, &lock_context);
399#endif /* __rtems__ */
400}
401
402/*
403 * Sets a timeout that will remove the current thread from the specified
404 * sleep queue after timo ticks if the thread has not already been awakened.
405 */
406void
407sleepq_set_timeout(void *wchan, int timo)
408{
409#ifndef __rtems__
410        struct sleepqueue_chain *sc;
411        struct thread *td;
412
413        td = curthread;
414        sc = SC_LOOKUP(wchan);
415        mtx_assert(&sc->sc_lock, MA_OWNED);
416        MPASS(TD_ON_SLEEPQ(td));
417        MPASS(td->td_sleepqueue == NULL);
418        MPASS(wchan != NULL);
419        callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
420#else /* __rtems__ */
421        Per_CPU_Control *cpu_self;
422        Thread_Control *executing;
423
424        cpu_self = _Thread_Dispatch_disable();
425        executing = _Per_CPU_Get_executing(cpu_self);
426        BSD_ASSERT(_Watchdog_Get_state(&executing->Timer.Watchdog) ==
427            WATCHDOG_INACTIVE);
428        _Thread_Timer_insert_relative(executing, cpu_self, sleepq_timeout,
429            (Watchdog_Interval)timo);
430        _Thread_Dispatch_enable(cpu_self);
431#endif /* __rtems__ */
432}
433
434/*
435 * Return the number of actual sleepers for the specified queue.
436 */
437u_int
438sleepq_sleepcnt(void *wchan, int queue)
439{
440        struct sleepqueue *sq;
441
442        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
443        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
444        sq = sleepq_lookup(wchan);
445        if (sq == NULL)
446                return (0);
447        return (sq->sq_blockedcnt[queue]);
448}
449
450#ifndef __rtems__
451/*
452 * Marks the pending sleep of the current thread as interruptible and
453 * makes an initial check for pending signals before putting a thread
454 * to sleep. Enters and exits with the thread lock held.  Thread lock
455 * may have transitioned from the sleepq lock to a run lock.
456 */
457static int
458sleepq_catch_signals(void *wchan, int pri)
459{
460        struct sleepqueue_chain *sc;
461        struct sleepqueue *sq;
462        struct thread *td;
463        struct proc *p;
464        struct sigacts *ps;
465        int sig, ret, stop_allowed;
466
467        td = curthread;
468        p = curproc;
469        sc = SC_LOOKUP(wchan);
470        mtx_assert(&sc->sc_lock, MA_OWNED);
471        MPASS(wchan != NULL);
472        if ((td->td_pflags & TDP_WAKEUP) != 0) {
473                td->td_pflags &= ~TDP_WAKEUP;
474                ret = EINTR;
475                thread_lock(td);
476                goto out;
477        }
478
479        /*
480         * See if there are any pending signals for this thread.  If not
481         * we can switch immediately.  Otherwise do the signal processing
482         * directly.
483         */
484        thread_lock(td);
485        if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
486                sleepq_switch(wchan, pri);
487                return (0);
488        }
489        stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
490            SIG_STOP_ALLOWED;
491        thread_unlock(td);
492        mtx_unlock_spin(&sc->sc_lock);
493        CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
494                (void *)td, (long)p->p_pid, td->td_name);
495        PROC_LOCK(p);
496        ps = p->p_sigacts;
497        mtx_lock(&ps->ps_mtx);
498        sig = cursig(td, stop_allowed);
499        if (sig == 0) {
500                mtx_unlock(&ps->ps_mtx);
501                ret = thread_suspend_check(1);
502                MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
503        } else {
504                if (SIGISMEMBER(ps->ps_sigintr, sig))
505                        ret = EINTR;
506                else
507                        ret = ERESTART;
508                mtx_unlock(&ps->ps_mtx);
509        }
510        /*
511         * Lock the per-process spinlock prior to dropping the PROC_LOCK
512         * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
513         * thread_lock() are currently held in tdsendsignal().
514         */
515        PROC_SLOCK(p);
516        mtx_lock_spin(&sc->sc_lock);
517        PROC_UNLOCK(p);
518        thread_lock(td);
519        PROC_SUNLOCK(p);
520        if (ret == 0) {
521                sleepq_switch(wchan, pri);
522                return (0);
523        }
524out:
525        /*
526         * There were pending signals and this thread is still
527         * on the sleep queue, remove it from the sleep queue.
528         */
529        if (TD_ON_SLEEPQ(td)) {
530                sq = sleepq_lookup(wchan);
531                if (sleepq_resume_thread(sq, td, 0)) {
532#ifdef INVARIANTS
533                        /*
534                         * This thread hasn't gone to sleep yet, so it
535                         * should not be swapped out.
536                         */
537                        panic("not waking up swapper");
538#endif
539                }
540        }
541        mtx_unlock_spin(&sc->sc_lock);
542        MPASS(td->td_lock != &sc->sc_lock);
543        return (ret);
544}
545#endif /* __rtems__ */
546
547/*
548 * Switches to another thread if we are still asleep on a sleep queue.
549 * Returns with thread lock.
550 */
551static void
552sleepq_switch(void *wchan, int pri)
553{
554#ifndef __rtems__
555        struct sleepqueue_chain *sc;
556        struct sleepqueue *sq;
557        struct thread *td;
558
559        td = curthread;
560        sc = SC_LOOKUP(wchan);
561        mtx_assert(&sc->sc_lock, MA_OWNED);
562        THREAD_LOCK_ASSERT(td, MA_OWNED);
563
564        /*
565         * If we have a sleep queue, then we've already been woken up, so
566         * just return.
567         */
568        if (td->td_sleepqueue != NULL) {
569                mtx_unlock_spin(&sc->sc_lock);
570                return;
571        }
572
573        /*
574         * If TDF_TIMEOUT is set, then our sleep has been timed out
575         * already but we are still on the sleep queue, so dequeue the
576         * thread and return.
577         */
578        if (td->td_flags & TDF_TIMEOUT) {
579                MPASS(TD_ON_SLEEPQ(td));
580                sq = sleepq_lookup(wchan);
581                if (sleepq_resume_thread(sq, td, 0)) {
582#ifdef INVARIANTS
583                        /*
584                         * This thread hasn't gone to sleep yet, so it
585                         * should not be swapped out.
586                         */
587                        panic("not waking up swapper");
588#endif
589                }
590                mtx_unlock_spin(&sc->sc_lock);
591                return;
592        }
593#ifdef SLEEPQUEUE_PROFILING
594        if (prof_enabled)
595                sleepq_profile(td->td_wmesg);
596#endif
597        MPASS(td->td_sleepqueue == NULL);
598        sched_sleep(td, pri);
599        thread_lock_set(td, &sc->sc_lock);
600        SDT_PROBE0(sched, , , sleep);
601        TD_SET_SLEEPING(td);
602        mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
603        KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
604        CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
605            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
606#else /* __rtems__ */
607        Thread_Control *executing;
608        ISR_lock_Context lock_context;
609        struct thread *td;
610        bool block;
611        bool remove;
612
613        sleepq_release(wchan);
614
615        executing = _Thread_Wait_acquire_default_for_executing(&lock_context);
616        td = rtems_bsd_get_thread(executing);
617        BSD_ASSERT(td != NULL);
618
619        block = false;
620        remove = false;
621        switch (td->td_sq_state) {
622        case TD_SQ_TIRED:
623                BSD_ASSERT(td->td_wchan == wchan);
624                td->td_sq_state = TD_SQ_SLEEPY;
625                block = true;
626                break;
627        case TD_SQ_NIGHTMARE:
628                BSD_ASSERT(td->td_wchan == wchan);
629                td->td_sq_state = TD_SQ_PANIC;
630                remove = true;
631                break;
632        default:
633                BSD_ASSERT(td->td_wchan == NULL);
634                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
635                break;
636        }
637
638        if (block) {
639                Per_CPU_Control *cpu_self;
640                bool unblock;
641
642                cpu_self = _Thread_Dispatch_disable_critical(&lock_context);
643                _Thread_Wait_release_default(executing, &lock_context);
644
645                _Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
646
647                _Thread_Wait_acquire_default(executing, &lock_context);
648
649                unblock = false;
650                switch (td->td_sq_state) {
651                case TD_SQ_NIGHTMARE:
652                        BSD_ASSERT(td->td_wchan == wchan);
653                        td->td_sq_state = TD_SQ_PANIC;
654                        unblock = true;
655                        remove = true;
656                        break;
657                case TD_SQ_WAKEUP:
658                        BSD_ASSERT(td->td_wchan == NULL);
659                        unblock = true;
660                        break;
661                default:
662                        BSD_ASSERT(td->td_wchan == wchan);
663                        BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY);
664                        td->td_sq_state = TD_SQ_SLEEPING;
665                        break;
666                }
667
668                _Thread_Wait_release_default(executing, &lock_context);
669
670                if (unblock) {
671                        _Thread_Timer_remove(executing);
672                        _Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
673                }
674
675                _Thread_Dispatch_enable(cpu_self);
676
677                _Thread_Wait_acquire_default(executing, &lock_context);
678
679                switch (td->td_sq_state) {
680                case TD_SQ_NIGHTMARE:
681                        BSD_ASSERT(td->td_wchan == wchan);
682                        td->td_sq_state = TD_SQ_PANIC;
683                        remove = true;
684                        break;
685                default:
686                        BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP ||
687                            td->td_sq_state == TD_SQ_PANIC);
688                        break;
689                }
690        }
691
692        _Thread_Wait_release_default(executing, &lock_context);
693
694        if (remove) {
695                sleepq_remove(td, wchan);
696        }
697#endif /* __rtems__ */
698}
699
700/*
701 * Check to see if we timed out.
702 */
703static int
704sleepq_check_timeout(void)
705{
706        struct thread *td;
707
708        td = curthread;
709#ifndef __rtems__
710        THREAD_LOCK_ASSERT(td, MA_OWNED);
711
712        /*
713         * If TDF_TIMEOUT is set, we timed out.
714         */
715        if (td->td_flags & TDF_TIMEOUT) {
716                td->td_flags &= ~TDF_TIMEOUT;
717                return (EWOULDBLOCK);
718        }
719
720        /*
721         * If TDF_TIMOFAIL is set, the timeout ran after we had
722         * already been woken up.
723         */
724        if (td->td_flags & TDF_TIMOFAIL)
725                td->td_flags &= ~TDF_TIMOFAIL;
726
727        /*
728         * If callout_stop() fails, then the timeout is running on
729         * another CPU, so synchronize with it to avoid having it
730         * accidentally wake up a subsequent sleep.
731         */
732        else if (callout_stop(&td->td_slpcallout) == 0) {
733                td->td_flags |= TDF_TIMEOUT;
734                TD_SET_SLEEPING(td);
735                mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
736        }
737        return (0);
738#else /* __rtems__ */
739        return (td->td_sq_state);
740#endif /* __rtems__ */
741}
742
743#ifndef __rtems__
744/*
745 * Check to see if we were awoken by a signal.
746 */
747static int
748sleepq_check_signals(void)
749{
750        struct thread *td;
751
752        td = curthread;
753        THREAD_LOCK_ASSERT(td, MA_OWNED);
754
755        /* We are no longer in an interruptible sleep. */
756        if (td->td_flags & TDF_SINTR)
757                td->td_flags &= ~TDF_SINTR;
758
759        if (td->td_flags & TDF_SLEEPABORT) {
760                td->td_flags &= ~TDF_SLEEPABORT;
761                return (td->td_intrval);
762        }
763
764        return (0);
765}
766#endif /* __rtems__ */
767
768/*
769 * Block the current thread until it is awakened from its sleep queue.
770 */
771void
772sleepq_wait(void *wchan, int pri)
773{
774#ifndef __rtems__
775        struct thread *td;
776
777        td = curthread;
778        MPASS(!(td->td_flags & TDF_SINTR));
779        thread_lock(td);
780#endif /* __rtems__ */
781        sleepq_switch(wchan, pri);
782#ifndef __rtems__
783        thread_unlock(td);
784#endif /* __rtems__ */
785}
786
787#ifndef __rtems__
788/*
789 * Block the current thread until it is awakened from its sleep queue
790 * or it is interrupted by a signal.
791 */
792int
793sleepq_wait_sig(void *wchan, int pri)
794{
795        int rcatch;
796        int rval;
797
798        rcatch = sleepq_catch_signals(wchan, pri);
799        rval = sleepq_check_signals();
800        thread_unlock(curthread);
801        if (rcatch)
802                return (rcatch);
803        return (rval);
804}
805#endif /* __rtems__ */
806
807/*
808 * Block the current thread until it is awakened from its sleep queue
809 * or it times out while waiting.
810 */
811int
812sleepq_timedwait(void *wchan, int pri)
813{
814#ifndef __rtems__
815        struct thread *td;
816#endif /* __rtems__ */
817        int rval;
818
819#ifndef __rtems__
820        td = curthread;
821        MPASS(!(td->td_flags & TDF_SINTR));
822        thread_lock(td);
823#endif /* __rtems__ */
824        sleepq_switch(wchan, pri);
825        rval = sleepq_check_timeout();
826#ifndef __rtems__
827        thread_unlock(td);
828#endif /* __rtems__ */
829
830        return (rval);
831}
832
833#ifndef __rtems__
834/*
835 * Block the current thread until it is awakened from its sleep queue,
836 * it is interrupted by a signal, or it times out waiting to be awakened.
837 */
838int
839sleepq_timedwait_sig(void *wchan, int pri)
840{
841        int rcatch, rvalt, rvals;
842
843        rcatch = sleepq_catch_signals(wchan, pri);
844        rvalt = sleepq_check_timeout();
845        rvals = sleepq_check_signals();
846        thread_unlock(curthread);
847        if (rcatch)
848                return (rcatch);
849        if (rvals)
850                return (rvals);
851        return (rvalt);
852}
853#endif /* __rtems__ */
854
855/*
856 * Returns the type of sleepqueue given a waitchannel.
857 */
858int
859sleepq_type(void *wchan)
860{
861        struct sleepqueue *sq;
862        int type;
863
864        MPASS(wchan != NULL);
865
866        sleepq_lock(wchan);
867        sq = sleepq_lookup(wchan);
868        if (sq == NULL) {
869                sleepq_release(wchan);
870                return (-1);
871        }
872        type = sq->sq_type;
873        sleepq_release(wchan);
874        return (type);
875}
876
877/*
878 * Removes a thread from a sleep queue and makes it
879 * runnable.
880 */
881static int
882sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
883{
884        struct sleepqueue_chain *sc;
885#ifdef __rtems__
886        Thread_Control *thread;
887        ISR_lock_Context lock_context;
888        bool unblock;
889
890        BSD_ASSERT(sq != NULL);
891#endif /* __rtems__ */
892
893        MPASS(td != NULL);
894        MPASS(sq->sq_wchan != NULL);
895        MPASS(td->td_wchan == sq->sq_wchan);
896        MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
897        THREAD_LOCK_ASSERT(td, MA_OWNED);
898        sc = SC_LOOKUP(sq->sq_wchan);
899        mtx_assert(&sc->sc_lock, MA_OWNED);
900
901        SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
902
903        /* Remove the thread from the queue. */
904        sq->sq_blockedcnt[td->td_sqqueue]--;
905        TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
906
907        /*
908         * Get a sleep queue for this thread.  If this is the last waiter,
909         * use the queue itself and take it out of the chain, otherwise,
910         * remove a queue from the free list.
911         */
912        if (LIST_EMPTY(&sq->sq_free)) {
913                td->td_sleepqueue = sq;
914#ifdef INVARIANTS
915                sq->sq_wchan = NULL;
916#endif
917#ifdef SLEEPQUEUE_PROFILING
918                sc->sc_depth--;
919#endif
920        } else
921                td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
922        LIST_REMOVE(td->td_sleepqueue, sq_hash);
923#ifdef __rtems__
924        (void)sc;
925        thread = td->td_thread;
926        _ISR_lock_ISR_disable(&lock_context);
927        _Thread_Wait_acquire_default_critical(thread, &lock_context);
928#endif /* __rtems__ */
929
930        td->td_wmesg = NULL;
931        td->td_wchan = NULL;
932#ifndef __rtems__
933        td->td_flags &= ~TDF_SINTR;
934
935        CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
936            (void *)td, (long)td->td_proc->p_pid, td->td_name);
937
938        /* Adjust priority if requested. */
939        MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
940        if (pri != 0 && td->td_priority > pri &&
941            PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
942                sched_prio(td, pri);
943
944        /*
945         * Note that thread td might not be sleeping if it is running
946         * sleepq_catch_signals() on another CPU or is blocked on its
947         * proc lock to check signals.  There's no need to mark the
948         * thread runnable in that case.
949         */
950        if (TD_IS_SLEEPING(td)) {
951                TD_CLR_SLEEPING(td);
952                return (setrunnable(td));
953        }
954#else /* __rtems__ */
955        unblock = _Watchdog_Is_scheduled(&thread->Timer.Watchdog);
956        switch (td->td_sq_state) {
957        case TD_SQ_SLEEPING:
958                unblock = true;
959                /* FALLTHROUGH */
960        case TD_SQ_TIRED:
961        case TD_SQ_SLEEPY:
962        case TD_SQ_NIGHTMARE:
963                td->td_sq_state = TD_SQ_WAKEUP;
964                break;
965        default:
966                BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC);
967                break;
968        }
969
970        if (unblock) {
971                Per_CPU_Control *cpu_self;
972
973                cpu_self = _Thread_Dispatch_disable_critical(&lock_context);
974                _Thread_Wait_release_default(thread, &lock_context);
975
976                _Thread_Timer_remove(thread);
977                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
978
979                _Thread_Dispatch_enable(cpu_self);
980        } else {
981                _Thread_Wait_release_default(thread, &lock_context);
982        }
983#endif /* __rtems__ */
984        return (0);
985}
986
987#ifdef INVARIANTS
988/*
989 * UMA zone item deallocator.
990 */
991static void
992sleepq_dtor(void *mem, int size, void *arg)
993{
994        struct sleepqueue *sq;
995        int i;
996
997        sq = mem;
998        for (i = 0; i < NR_SLEEPQS; i++) {
999                MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
1000                MPASS(sq->sq_blockedcnt[i] == 0);
1001        }
1002}
1003#endif
1004
1005/*
1006 * UMA zone item initializer.
1007 */
1008static int
1009sleepq_init(void *mem, int size, int flags)
1010{
1011        struct sleepqueue *sq;
1012        int i;
1013
1014        bzero(mem, size);
1015        sq = mem;
1016        for (i = 0; i < NR_SLEEPQS; i++) {
1017                TAILQ_INIT(&sq->sq_blocked[i]);
1018                sq->sq_blockedcnt[i] = 0;
1019        }
1020        LIST_INIT(&sq->sq_free);
1021        return (0);
1022}
1023
1024/*
1025 * Find the highest priority thread sleeping on a wait channel and resume it.
1026 */
1027int
1028sleepq_signal(void *wchan, int flags, int pri, int queue)
1029{
1030        struct sleepqueue *sq;
1031#ifndef __rtems__
1032        struct thread *td, *besttd;
1033#else /* __rtems__ */
1034        struct thread *besttd;
1035#endif /* __rtems__ */
1036        int wakeup_swapper;
1037
1038        CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
1039        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1040        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1041        sq = sleepq_lookup(wchan);
1042        if (sq == NULL)
1043                return (0);
1044        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1045            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1046
1047#ifndef __rtems__
1048        /*
1049         * Find the highest priority thread on the queue.  If there is a
1050         * tie, use the thread that first appears in the queue as it has
1051         * been sleeping the longest since threads are always added to
1052         * the tail of sleep queues.
1053         */
1054        besttd = NULL;
1055        TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
1056                if (besttd == NULL || td->td_priority < besttd->td_priority)
1057                        besttd = td;
1058        }
1059#else /* __rtems__ */
1060        besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
1061#endif /* __rtems__ */
1062        MPASS(besttd != NULL);
1063        thread_lock(besttd);
1064        wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
1065        thread_unlock(besttd);
1066        return (wakeup_swapper);
1067}
1068
1069/*
1070 * Resume all threads sleeping on a specified wait channel.
1071 */
1072int
1073sleepq_broadcast(void *wchan, int flags, int pri, int queue)
1074{
1075        struct sleepqueue *sq;
1076        struct thread *td, *tdn;
1077        int wakeup_swapper;
1078
1079        CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
1080        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1081        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1082        sq = sleepq_lookup(wchan);
1083        if (sq == NULL)
1084                return (0);
1085        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1086            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1087
1088        /* Resume all blocked threads on the sleep queue. */
1089        wakeup_swapper = 0;
1090        TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1091                thread_lock(td);
1092                if (sleepq_resume_thread(sq, td, pri))
1093                        wakeup_swapper = 1;
1094                thread_unlock(td);
1095        }
1096        return (wakeup_swapper);
1097}
1098
1099#ifndef __rtems__
1100/*
1101 * Time sleeping threads out.  When the timeout expires, the thread is
1102 * removed from the sleep queue and made runnable if it is still asleep.
1103 */
1104static void
1105sleepq_timeout(void *arg)
1106{
1107        struct sleepqueue_chain *sc;
1108        struct sleepqueue *sq;
1109        struct thread *td;
1110        void *wchan;
1111        int wakeup_swapper;
1112
1113        td = arg;
1114        wakeup_swapper = 0;
1115        CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1116            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1117
1118        /*
1119         * First, see if the thread is asleep and get the wait channel if
1120         * it is.
1121         */
1122        thread_lock(td);
1123        if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1124                wchan = td->td_wchan;
1125                sc = SC_LOOKUP(wchan);
1126                THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1127                sq = sleepq_lookup(wchan);
1128                MPASS(sq != NULL);
1129                td->td_flags |= TDF_TIMEOUT;
1130                wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1131                thread_unlock(td);
1132                if (wakeup_swapper)
1133                        kick_proc0();
1134                return;
1135        }
1136
1137        /*
1138         * If the thread is on the SLEEPQ but isn't sleeping yet, it
1139         * can either be on another CPU in between sleepq_add() and
1140         * one of the sleepq_*wait*() routines or it can be in
1141         * sleepq_catch_signals().
1142         */
1143        if (TD_ON_SLEEPQ(td)) {
1144                td->td_flags |= TDF_TIMEOUT;
1145                thread_unlock(td);
1146                return;
1147        }
1148
1149        /*
1150         * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
1151         * then the other thread has already yielded to us, so clear
1152         * the flag and resume it.  If TDF_TIMEOUT is not set, then the
1153         * we know that the other thread is not on a sleep queue, but it
1154         * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
1155         * to let it know that the timeout has already run and doesn't
1156         * need to be canceled.
1157         */
1158        if (td->td_flags & TDF_TIMEOUT) {
1159                MPASS(TD_IS_SLEEPING(td));
1160                td->td_flags &= ~TDF_TIMEOUT;
1161                TD_CLR_SLEEPING(td);
1162                wakeup_swapper = setrunnable(td);
1163        } else
1164                td->td_flags |= TDF_TIMOFAIL;
1165        thread_unlock(td);
1166        if (wakeup_swapper)
1167                kick_proc0();
1168}
1169#else /* __rtems__ */
1170static void
1171sleepq_timeout(Watchdog_Control *watchdog)
1172{
1173        Thread_Control *thread;
1174        struct thread *td;
1175        ISR_lock_Context lock_context;
1176        bool unblock;
1177
1178        thread = RTEMS_CONTAINER_OF(watchdog, Thread_Control, Timer.Watchdog);
1179        td = rtems_bsd_get_thread(thread);
1180        BSD_ASSERT(td != NULL);
1181
1182        _ISR_lock_ISR_disable(&lock_context);
1183        _Thread_Wait_acquire_default_critical(thread, &lock_context);
1184
1185        unblock = false;
1186        switch (td->td_sq_state) {
1187        case TD_SQ_SLEEPING:
1188                unblock = true;
1189                /* Fall through */
1190        case TD_SQ_TIRED:
1191        case TD_SQ_SLEEPY:
1192                td->td_sq_state = TD_SQ_NIGHTMARE;
1193                break;
1194        default:
1195                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
1196                break;
1197        }
1198
1199        if (unblock) {
1200                Per_CPU_Control *cpu_self;
1201
1202                cpu_self = _Thread_Dispatch_disable_critical(&lock_context);
1203                _Thread_Wait_release_default(thread, &lock_context);
1204
1205                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
1206
1207                _Thread_Dispatch_enable(cpu_self);
1208        } else {
1209                _Thread_Wait_release_default(thread, &lock_context);
1210        }
1211}
1212#endif /* __rtems__ */
1213
1214/*
1215 * Resumes a specific thread from the sleep queue associated with a specific
1216 * wait channel if it is on that queue.
1217 */
1218void
1219sleepq_remove(struct thread *td, void *wchan)
1220{
1221        struct sleepqueue *sq;
1222        int wakeup_swapper;
1223
1224        /*
1225         * Look up the sleep queue for this wait channel, then re-check
1226         * that the thread is asleep on that channel, if it is not, then
1227         * bail.
1228         */
1229        MPASS(wchan != NULL);
1230        sleepq_lock(wchan);
1231        sq = sleepq_lookup(wchan);
1232        /*
1233         * We can not lock the thread here as it may be sleeping on a
1234         * different sleepq.  However, holding the sleepq lock for this
1235         * wchan can guarantee that we do not miss a wakeup for this
1236         * channel.  The asserts below will catch any false positives.
1237         */
1238        if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1239                sleepq_release(wchan);
1240                return;
1241        }
1242        /* Thread is asleep on sleep queue sq, so wake it up. */
1243        thread_lock(td);
1244        MPASS(sq != NULL);
1245        MPASS(td->td_wchan == wchan);
1246        wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1247        thread_unlock(td);
1248        sleepq_release(wchan);
1249        if (wakeup_swapper)
1250                kick_proc0();
1251}
1252
1253#ifndef __rtems__
1254/*
1255 * Abort a thread as if an interrupt had occurred.  Only abort
1256 * interruptible waits (unfortunately it isn't safe to abort others).
1257 */
1258int
1259sleepq_abort(struct thread *td, int intrval)
1260{
1261        struct sleepqueue *sq;
1262        void *wchan;
1263
1264        THREAD_LOCK_ASSERT(td, MA_OWNED);
1265        MPASS(TD_ON_SLEEPQ(td));
1266        MPASS(td->td_flags & TDF_SINTR);
1267        MPASS(intrval == EINTR || intrval == ERESTART);
1268
1269        /*
1270         * If the TDF_TIMEOUT flag is set, just leave. A
1271         * timeout is scheduled anyhow.
1272         */
1273        if (td->td_flags & TDF_TIMEOUT)
1274                return (0);
1275
1276        CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1277            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1278        td->td_intrval = intrval;
1279        td->td_flags |= TDF_SLEEPABORT;
1280        /*
1281         * If the thread has not slept yet it will find the signal in
1282         * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1283         * we have to do it here.
1284         */
1285        if (!TD_IS_SLEEPING(td))
1286                return (0);
1287        wchan = td->td_wchan;
1288        MPASS(wchan != NULL);
1289        sq = sleepq_lookup(wchan);
1290        MPASS(sq != NULL);
1291
1292        /* Thread is asleep on sleep queue sq, so wake it up. */
1293        return (sleepq_resume_thread(sq, td, 0));
1294}
1295#endif /* __rtems__ */
1296
1297#ifdef SLEEPQUEUE_PROFILING
1298#define SLEEPQ_PROF_LOCATIONS   1024
1299#define SLEEPQ_SBUFSIZE         512
1300struct sleepq_prof {
1301        LIST_ENTRY(sleepq_prof) sp_link;
1302        const char      *sp_wmesg;
1303        long            sp_count;
1304};
1305
1306LIST_HEAD(sqphead, sleepq_prof);
1307
1308struct sqphead sleepq_prof_free;
1309struct sqphead sleepq_hash[SC_TABLESIZE];
1310static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1311static struct mtx sleepq_prof_lock;
1312MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1313
1314static void
1315sleepq_profile(const char *wmesg)
1316{
1317        struct sleepq_prof *sp;
1318
1319        mtx_lock_spin(&sleepq_prof_lock);
1320        if (prof_enabled == 0)
1321                goto unlock;
1322        LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1323                if (sp->sp_wmesg == wmesg)
1324                        goto done;
1325        sp = LIST_FIRST(&sleepq_prof_free);
1326        if (sp == NULL)
1327                goto unlock;
1328        sp->sp_wmesg = wmesg;
1329        LIST_REMOVE(sp, sp_link);
1330        LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1331done:
1332        sp->sp_count++;
1333unlock:
1334        mtx_unlock_spin(&sleepq_prof_lock);
1335        return;
1336}
1337
1338static void
1339sleepq_prof_reset(void)
1340{
1341        struct sleepq_prof *sp;
1342        int enabled;
1343        int i;
1344
1345        mtx_lock_spin(&sleepq_prof_lock);
1346        enabled = prof_enabled;
1347        prof_enabled = 0;
1348        for (i = 0; i < SC_TABLESIZE; i++)
1349                LIST_INIT(&sleepq_hash[i]);
1350        LIST_INIT(&sleepq_prof_free);
1351        for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1352                sp = &sleepq_profent[i];
1353                sp->sp_wmesg = NULL;
1354                sp->sp_count = 0;
1355                LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1356        }
1357        prof_enabled = enabled;
1358        mtx_unlock_spin(&sleepq_prof_lock);
1359}
1360
1361static int
1362enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1363{
1364        int error, v;
1365
1366        v = prof_enabled;
1367        error = sysctl_handle_int(oidp, &v, v, req);
1368        if (error)
1369                return (error);
1370        if (req->newptr == NULL)
1371                return (error);
1372        if (v == prof_enabled)
1373                return (0);
1374        if (v == 1)
1375                sleepq_prof_reset();
1376        mtx_lock_spin(&sleepq_prof_lock);
1377        prof_enabled = !!v;
1378        mtx_unlock_spin(&sleepq_prof_lock);
1379
1380        return (0);
1381}
1382
1383static int
1384reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1385{
1386        int error, v;
1387
1388        v = 0;
1389        error = sysctl_handle_int(oidp, &v, 0, req);
1390        if (error)
1391                return (error);
1392        if (req->newptr == NULL)
1393                return (error);
1394        if (v == 0)
1395                return (0);
1396        sleepq_prof_reset();
1397
1398        return (0);
1399}
1400
1401static int
1402dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1403{
1404        struct sleepq_prof *sp;
1405        struct sbuf *sb;
1406        int enabled;
1407        int error;
1408        int i;
1409
1410        error = sysctl_wire_old_buffer(req, 0);
1411        if (error != 0)
1412                return (error);
1413        sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1414        sbuf_printf(sb, "\nwmesg\tcount\n");
1415        enabled = prof_enabled;
1416        mtx_lock_spin(&sleepq_prof_lock);
1417        prof_enabled = 0;
1418        mtx_unlock_spin(&sleepq_prof_lock);
1419        for (i = 0; i < SC_TABLESIZE; i++) {
1420                LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1421                        sbuf_printf(sb, "%s\t%ld\n",
1422                            sp->sp_wmesg, sp->sp_count);
1423                }
1424        }
1425        mtx_lock_spin(&sleepq_prof_lock);
1426        prof_enabled = enabled;
1427        mtx_unlock_spin(&sleepq_prof_lock);
1428
1429        error = sbuf_finish(sb);
1430        sbuf_delete(sb);
1431        return (error);
1432}
1433
1434SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1435    NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1436SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1437    NULL, 0, reset_sleepq_prof_stats, "I",
1438    "Reset sleepqueue profiling statistics");
1439SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1440    NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1441#endif
1442
1443#ifdef DDB
1444DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1445{
1446        struct sleepqueue_chain *sc;
1447        struct sleepqueue *sq;
1448#ifdef INVARIANTS
1449        struct lock_object *lock;
1450#endif
1451        struct thread *td;
1452        void *wchan;
1453        int i;
1454
1455        if (!have_addr)
1456                return;
1457
1458        /*
1459         * First, see if there is an active sleep queue for the wait channel
1460         * indicated by the address.
1461         */
1462        wchan = (void *)addr;
1463        sc = SC_LOOKUP(wchan);
1464        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1465                if (sq->sq_wchan == wchan)
1466                        goto found;
1467
1468        /*
1469         * Second, see if there is an active sleep queue at the address
1470         * indicated.
1471         */
1472        for (i = 0; i < SC_TABLESIZE; i++)
1473                LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1474                        if (sq == (struct sleepqueue *)addr)
1475                                goto found;
1476                }
1477
1478        db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1479        return;
1480found:
1481        db_printf("Wait channel: %p\n", sq->sq_wchan);
1482        db_printf("Queue type: %d\n", sq->sq_type);
1483#ifdef INVARIANTS
1484        if (sq->sq_lock) {
1485                lock = sq->sq_lock;
1486                db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1487                    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1488        }
1489#endif
1490        db_printf("Blocked threads:\n");
1491        for (i = 0; i < NR_SLEEPQS; i++) {
1492                db_printf("\nQueue[%d]:\n", i);
1493                if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1494                        db_printf("\tempty\n");
1495                else
1496                        TAILQ_FOREACH(td, &sq->sq_blocked[0],
1497                                      td_slpq) {
1498                                db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1499                                          td->td_tid, td->td_proc->p_pid,
1500                                          td->td_name);
1501                        }
1502                db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1503        }
1504}
1505
1506/* Alias 'show sleepqueue' to 'show sleepq'. */
1507DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1508#endif
Note: See TracBrowser for help on using the repository browser.