source: rtems-libbsd/freebsd/sys/kern/subr_sleepqueue.c @ 8475e7a

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 8475e7a was 8475e7a, checked in by Sebastian Huber <sebastian.huber@…>, on 03/24/15 at 09:02:45

SLEEPQUEUE(9): Port to RTEMS

  • Property mode set to 100644
File size: 38.9 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Copyright (c) 2015 embedded brains GmbH <rtems@embedded-brains.de>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel.  Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals.  That said, there are several similarities between the turnstile
36 * and sleep queue implementations.  (Note: turnstiles were implemented
37 * first.)  For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues.  An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue.  This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head.  Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking.  Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout.  The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep.  A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep).  The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53 * sleep queues also provide some extra assertions.  One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API.  The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD$");
64
65#include <rtems/bsd/local/opt_sleepqueue_profiling.h>
66#include <rtems/bsd/local/opt_ddb.h>
67#include <rtems/bsd/local/opt_kdtrace.h>
68#include <rtems/bsd/local/opt_sched.h>
69
70#include <rtems/bsd/sys/param.h>
71#include <sys/systm.h>
72#include <rtems/bsd/sys/lock.h>
73#include <sys/kernel.h>
74#include <sys/ktr.h>
75#include <sys/mutex.h>
76#include <sys/proc.h>
77#include <sys/sbuf.h>
78#include <sys/sched.h>
79#include <sys/sdt.h>
80#include <sys/signalvar.h>
81#include <sys/sleepqueue.h>
82#include <sys/sysctl.h>
83
84#include <vm/uma.h>
85
86#ifdef DDB
87#include <ddb/ddb.h>
88#endif
89#ifdef __rtems__
90#include <machine/rtems-bsd-thread.h>
91#include <rtems/score/threadimpl.h>
92#endif /* __rtems__ */
93
94/*
95 * Constants for the hash table of sleep queue chains.  These constants are
96 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
97 * Basically, we ignore the lower 8 bits of the address since most wait
98 * channel pointers are aligned and only look at the next 7 bits for the
99 * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
100 */
101#define SC_TABLESIZE    128                     /* Must be power of 2. */
102#define SC_MASK         (SC_TABLESIZE - 1)
103#define SC_SHIFT        8
104#define SC_HASH(wc)     (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
105#define SC_LOOKUP(wc)   &sleepq_chains[SC_HASH(wc)]
106#define NR_SLEEPQS      2
107/*
108 * There two different lists of sleep queues.  Both lists are connected
109 * via the sq_hash entries.  The first list is the sleep queue chain list
110 * that a sleep queue is on when it is attached to a wait channel.  The
111 * second list is the free list hung off of a sleep queue that is attached
112 * to a wait channel.
113 *
114 * Each sleep queue also contains the wait channel it is attached to, the
115 * list of threads blocked on that wait channel, flags specific to the
116 * wait channel, and the lock used to synchronize with a wait channel.
117 * The flags are used to catch mismatches between the various consumers
118 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
119 * The lock pointer is only used when invariants are enabled for various
120 * debugging checks.
121 *
122 * Locking key:
123 *  c - sleep queue chain lock
124 */
125struct sleepqueue {
126        TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];    /* (c) Blocked threads. */
127        u_int sq_blockedcnt[NR_SLEEPQS];        /* (c) N. of blocked threads. */
128        LIST_ENTRY(sleepqueue) sq_hash;         /* (c) Chain and free list. */
129        LIST_HEAD(, sleepqueue) sq_free;        /* (c) Free queues. */
130        void    *sq_wchan;                      /* (c) Wait channel. */
131        int     sq_type;                        /* (c) Queue type. */
132#ifdef INVARIANTS
133        struct lock_object *sq_lock;            /* (c) Associated lock. */
134#endif
135};
136
137struct sleepqueue_chain {
138        LIST_HEAD(, sleepqueue) sc_queues;      /* List of sleep queues. */
139        struct mtx sc_lock;                     /* Spin lock for this chain. */
140#ifdef SLEEPQUEUE_PROFILING
141        u_int   sc_depth;                       /* Length of sc_queues. */
142        u_int   sc_max_depth;                   /* Max length of sc_queues. */
143#endif
144};
145
146#ifdef SLEEPQUEUE_PROFILING
147u_int sleepq_max_depth;
148static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
149static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
150    "sleepq chain stats");
151SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
152    0, "maxmimum depth achieved of a single chain");
153
154static void     sleepq_profile(const char *wmesg);
155static int      prof_enabled;
156#endif
157static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
158static uma_zone_t sleepq_zone;
159
160/*
161 * Prototypes for non-exported routines.
162 */
163#ifndef __rtems__
164static int      sleepq_catch_signals(void *wchan, int pri);
165static int      sleepq_check_signals(void);
166static int      sleepq_check_timeout(void);
167#endif /* __rtems__ */
168#ifdef INVARIANTS
169static void     sleepq_dtor(void *mem, int size, void *arg);
170#endif
171static int      sleepq_init(void *mem, int size, int flags);
172static int      sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
173                    int pri);
174static void     sleepq_switch(void *wchan, int pri);
175#ifndef __rtems__
176static void     sleepq_timeout(void *arg);
177#else /* __rtems__ */
178static void     sleepq_timeout(Objects_Id id, void *arg);
179#endif /* __rtems__ */
180
181SDT_PROBE_DECLARE(sched, , , sleep);
182SDT_PROBE_DECLARE(sched, , , wakeup);
183
184/*
185 * Early initialization of sleep queues that is called from the sleepinit()
186 * SYSINIT.
187 */
188void
189init_sleepqueues(void)
190{
191#ifdef SLEEPQUEUE_PROFILING
192        struct sysctl_oid *chain_oid;
193        char chain_name[10];
194#endif
195        int i;
196
197        for (i = 0; i < SC_TABLESIZE; i++) {
198                LIST_INIT(&sleepq_chains[i].sc_queues);
199                mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
200                    MTX_SPIN | MTX_RECURSE);
201#ifdef SLEEPQUEUE_PROFILING
202                snprintf(chain_name, sizeof(chain_name), "%d", i);
203                chain_oid = SYSCTL_ADD_NODE(NULL,
204                    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
205                    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
206                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
207                    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
208                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
209                    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
210                    NULL);
211#endif
212        }
213        sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
214#ifdef INVARIANTS
215            NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
216#else
217            NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
218#endif
219       
220#ifndef __rtems__
221        thread0.td_sleepqueue = sleepq_alloc();
222#endif /* __rtems__ */
223}
224
225/*
226 * Get a sleep queue for a new thread.
227 */
228struct sleepqueue *
229sleepq_alloc(void)
230{
231
232        return (uma_zalloc(sleepq_zone, M_WAITOK));
233}
234
235/*
236 * Free a sleep queue when a thread is destroyed.
237 */
238void
239sleepq_free(struct sleepqueue *sq)
240{
241
242        uma_zfree(sleepq_zone, sq);
243}
244
245/*
246 * Lock the sleep queue chain associated with the specified wait channel.
247 */
248void
249sleepq_lock(void *wchan)
250{
251        struct sleepqueue_chain *sc;
252
253        sc = SC_LOOKUP(wchan);
254        mtx_lock_spin(&sc->sc_lock);
255}
256
257/*
258 * Look up the sleep queue associated with a given wait channel in the hash
259 * table locking the associated sleep queue chain.  If no queue is found in
260 * the table, NULL is returned.
261 */
262struct sleepqueue *
263sleepq_lookup(void *wchan)
264{
265        struct sleepqueue_chain *sc;
266        struct sleepqueue *sq;
267
268        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
269        sc = SC_LOOKUP(wchan);
270        mtx_assert(&sc->sc_lock, MA_OWNED);
271        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
272                if (sq->sq_wchan == wchan)
273                        return (sq);
274        return (NULL);
275}
276
277/*
278 * Unlock the sleep queue chain associated with a given wait channel.
279 */
280void
281sleepq_release(void *wchan)
282{
283        struct sleepqueue_chain *sc;
284
285        sc = SC_LOOKUP(wchan);
286        mtx_unlock_spin(&sc->sc_lock);
287}
288
289/*
290 * Places the current thread on the sleep queue for the specified wait
291 * channel.  If INVARIANTS is enabled, then it associates the passed in
292 * lock with the sleepq to make sure it is held when that sleep queue is
293 * woken up.
294 */
295void
296sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
297    int queue)
298{
299        struct sleepqueue_chain *sc;
300        struct sleepqueue *sq;
301        struct thread *td;
302#ifdef __rtems__
303        ISR_lock_Context lock_context;
304        Thread_Control *executing;
305        struct thread *succ;
306#endif /* __rtems__ */
307
308        td = curthread;
309        sc = SC_LOOKUP(wchan);
310        mtx_assert(&sc->sc_lock, MA_OWNED);
311        MPASS(td->td_sleepqueue != NULL);
312        MPASS(wchan != NULL);
313        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
314
315        /* If this thread is not allowed to sleep, die a horrible death. */
316        KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
317            ("Trying sleep, but thread marked as sleeping prohibited"));
318
319        /* Look up the sleep queue associated with the wait channel 'wchan'. */
320        sq = sleepq_lookup(wchan);
321
322        /*
323         * If the wait channel does not already have a sleep queue, use
324         * this thread's sleep queue.  Otherwise, insert the current thread
325         * into the sleep queue already in use by this wait channel.
326         */
327        if (sq == NULL) {
328#ifdef INVARIANTS
329                int i;
330
331                sq = td->td_sleepqueue;
332                for (i = 0; i < NR_SLEEPQS; i++) {
333                        KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
334                            ("thread's sleep queue %d is not empty", i));
335                        KASSERT(sq->sq_blockedcnt[i] == 0,
336                            ("thread's sleep queue %d count mismatches", i));
337                }
338                KASSERT(LIST_EMPTY(&sq->sq_free),
339                    ("thread's sleep queue has a non-empty free list"));
340                KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
341                sq->sq_lock = lock;
342#endif
343#ifdef SLEEPQUEUE_PROFILING
344                sc->sc_depth++;
345                if (sc->sc_depth > sc->sc_max_depth) {
346                        sc->sc_max_depth = sc->sc_depth;
347                        if (sc->sc_max_depth > sleepq_max_depth)
348                                sleepq_max_depth = sc->sc_max_depth;
349                }
350#endif
351                sq = td->td_sleepqueue;
352                LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
353                sq->sq_wchan = wchan;
354                sq->sq_type = flags & SLEEPQ_TYPE;
355        } else {
356                MPASS(wchan == sq->sq_wchan);
357                MPASS(lock == sq->sq_lock);
358                MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
359                LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
360        }
361        thread_lock(td);
362#ifndef __rtems__
363        TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
364#else /* __rtems__ */
365        /* FIXME: This is broken with clustered scheduling */
366        succ = NULL;
367        TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) {
368                if (td->td_thread->current_priority <
369                    succ->td_thread->current_priority)
370                        break;
371        }
372        if (succ == NULL)
373                TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
374        else
375                TAILQ_INSERT_BEFORE(succ, td, td_slpq);
376#endif /* __rtems__ */
377        sq->sq_blockedcnt[queue]++;
378#ifdef __rtems__
379        executing = td->td_thread;
380        _Objects_ISR_disable_and_acquire(&executing->Object, &lock_context);
381        td->td_sq_state = TD_SQ_TIRED;
382#endif /* __rtems__ */
383        td->td_sleepqueue = NULL;
384        td->td_sqqueue = queue;
385        td->td_wchan = wchan;
386        td->td_wmesg = wmesg;
387#ifndef __rtems__
388        if (flags & SLEEPQ_INTERRUPTIBLE) {
389                td->td_flags |= TDF_SINTR;
390                td->td_flags &= ~TDF_SLEEPABORT;
391        }
392        thread_unlock(td);
393#else /* __rtems__ */
394        _Objects_Release_and_ISR_enable(&executing->Object, &lock_context);
395#endif /* __rtems__ */
396}
397
398/*
399 * Sets a timeout that will remove the current thread from the specified
400 * sleep queue after timo ticks if the thread has not already been awakened.
401 */
402void
403sleepq_set_timeout(void *wchan, int timo)
404{
405#ifndef __rtems__
406        struct sleepqueue_chain *sc;
407        struct thread *td;
408
409        td = curthread;
410        sc = SC_LOOKUP(wchan);
411        mtx_assert(&sc->sc_lock, MA_OWNED);
412        MPASS(TD_ON_SLEEPQ(td));
413        MPASS(td->td_sleepqueue == NULL);
414        MPASS(wchan != NULL);
415        callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
416#else /* __rtems__ */
417        Thread_Control *executing;
418
419        _Thread_Disable_dispatch();
420        executing = _Thread_Executing;
421        BSD_ASSERT(executing->Timer.state == WATCHDOG_INACTIVE);
422        _Watchdog_Initialize(&executing->Timer, sleepq_timeout,
423            0, executing);
424        _Watchdog_Insert_ticks(&executing->Timer, (Watchdog_Interval)timo);
425        _Thread_Enable_dispatch();
426#endif /* __rtems__ */
427}
428
429/*
430 * Return the number of actual sleepers for the specified queue.
431 */
432u_int
433sleepq_sleepcnt(void *wchan, int queue)
434{
435        struct sleepqueue *sq;
436
437        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
438        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
439        sq = sleepq_lookup(wchan);
440        if (sq == NULL)
441                return (0);
442        return (sq->sq_blockedcnt[queue]);
443}
444
445#ifndef __rtems__
446/*
447 * Marks the pending sleep of the current thread as interruptible and
448 * makes an initial check for pending signals before putting a thread
449 * to sleep. Enters and exits with the thread lock held.  Thread lock
450 * may have transitioned from the sleepq lock to a run lock.
451 */
452static int
453sleepq_catch_signals(void *wchan, int pri)
454{
455        struct sleepqueue_chain *sc;
456        struct sleepqueue *sq;
457        struct thread *td;
458        struct proc *p;
459        struct sigacts *ps;
460        int sig, ret, stop_allowed;
461
462        td = curthread;
463        p = curproc;
464        sc = SC_LOOKUP(wchan);
465        mtx_assert(&sc->sc_lock, MA_OWNED);
466        MPASS(wchan != NULL);
467        if ((td->td_pflags & TDP_WAKEUP) != 0) {
468                td->td_pflags &= ~TDP_WAKEUP;
469                ret = EINTR;
470                thread_lock(td);
471                goto out;
472        }
473
474        /*
475         * See if there are any pending signals for this thread.  If not
476         * we can switch immediately.  Otherwise do the signal processing
477         * directly.
478         */
479        thread_lock(td);
480        if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
481                sleepq_switch(wchan, pri);
482                return (0);
483        }
484        stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
485            SIG_STOP_ALLOWED;
486        thread_unlock(td);
487        mtx_unlock_spin(&sc->sc_lock);
488        CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
489                (void *)td, (long)p->p_pid, td->td_name);
490        PROC_LOCK(p);
491        ps = p->p_sigacts;
492        mtx_lock(&ps->ps_mtx);
493        sig = cursig(td, stop_allowed);
494        if (sig == 0) {
495                mtx_unlock(&ps->ps_mtx);
496                ret = thread_suspend_check(1);
497                MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
498        } else {
499                if (SIGISMEMBER(ps->ps_sigintr, sig))
500                        ret = EINTR;
501                else
502                        ret = ERESTART;
503                mtx_unlock(&ps->ps_mtx);
504        }
505        /*
506         * Lock the per-process spinlock prior to dropping the PROC_LOCK
507         * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
508         * thread_lock() are currently held in tdsendsignal().
509         */
510        PROC_SLOCK(p);
511        mtx_lock_spin(&sc->sc_lock);
512        PROC_UNLOCK(p);
513        thread_lock(td);
514        PROC_SUNLOCK(p);
515        if (ret == 0) {
516                sleepq_switch(wchan, pri);
517                return (0);
518        }
519out:
520        /*
521         * There were pending signals and this thread is still
522         * on the sleep queue, remove it from the sleep queue.
523         */
524        if (TD_ON_SLEEPQ(td)) {
525                sq = sleepq_lookup(wchan);
526                if (sleepq_resume_thread(sq, td, 0)) {
527#ifdef INVARIANTS
528                        /*
529                         * This thread hasn't gone to sleep yet, so it
530                         * should not be swapped out.
531                         */
532                        panic("not waking up swapper");
533#endif
534                }
535        }
536        mtx_unlock_spin(&sc->sc_lock);
537        MPASS(td->td_lock != &sc->sc_lock);
538        return (ret);
539}
540#endif /* __rtems__ */
541
542/*
543 * Switches to another thread if we are still asleep on a sleep queue.
544 * Returns with thread lock.
545 */
546static void
547sleepq_switch(void *wchan, int pri)
548{
549#ifndef __rtems__
550        struct sleepqueue_chain *sc;
551        struct sleepqueue *sq;
552        struct thread *td;
553
554        td = curthread;
555        sc = SC_LOOKUP(wchan);
556        mtx_assert(&sc->sc_lock, MA_OWNED);
557        THREAD_LOCK_ASSERT(td, MA_OWNED);
558
559        /*
560         * If we have a sleep queue, then we've already been woken up, so
561         * just return.
562         */
563        if (td->td_sleepqueue != NULL) {
564                mtx_unlock_spin(&sc->sc_lock);
565                return;
566        }
567
568        /*
569         * If TDF_TIMEOUT is set, then our sleep has been timed out
570         * already but we are still on the sleep queue, so dequeue the
571         * thread and return.
572         */
573        if (td->td_flags & TDF_TIMEOUT) {
574                MPASS(TD_ON_SLEEPQ(td));
575                sq = sleepq_lookup(wchan);
576                if (sleepq_resume_thread(sq, td, 0)) {
577#ifdef INVARIANTS
578                        /*
579                         * This thread hasn't gone to sleep yet, so it
580                         * should not be swapped out.
581                         */
582                        panic("not waking up swapper");
583#endif
584                }
585                mtx_unlock_spin(&sc->sc_lock);
586                return;         
587        }
588#ifdef SLEEPQUEUE_PROFILING
589        if (prof_enabled)
590                sleepq_profile(td->td_wmesg);
591#endif
592        MPASS(td->td_sleepqueue == NULL);
593        sched_sleep(td, pri);
594        thread_lock_set(td, &sc->sc_lock);
595        SDT_PROBE0(sched, , , sleep);
596        TD_SET_SLEEPING(td);
597        mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
598        KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
599        CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
600            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
601#else /* __rtems__ */
602        Thread_Control *executing;
603        ISR_lock_Context lock_context;
604        struct thread *td;
605        bool block;
606        bool remove;
607
608        sleepq_release(wchan);
609
610        executing = _Thread_Acquire_executing(&lock_context);
611        td = rtems_bsd_get_thread(executing);
612        BSD_ASSERT(td != NULL);
613
614        block = false;
615        remove = false;
616        switch (td->td_sq_state) {
617        case TD_SQ_TIRED:
618                BSD_ASSERT(td->td_wchan == wchan);
619                td->td_sq_state = TD_SQ_SLEEPY;
620                block = true;
621                break;
622        case TD_SQ_NIGHTMARE:
623                BSD_ASSERT(td->td_wchan == wchan);
624                td->td_sq_state = TD_SQ_PANIC;
625                remove = true;
626                break;
627        default:
628                BSD_ASSERT(td->td_wchan == NULL);
629                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
630                break;
631        }
632
633        if (block) {
634                Per_CPU_Control *cpu_self;
635                bool unblock;
636
637                cpu_self = _Objects_Release_and_thread_dispatch_disable(
638                    &executing->Object, &lock_context);
639
640                _Giant_Acquire(cpu_self);
641                _Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
642                _Giant_Release(cpu_self);
643
644                _Objects_ISR_disable_and_acquire(&executing->Object,
645                    &lock_context);
646
647                unblock = false;
648                switch (td->td_sq_state) {
649                case TD_SQ_NIGHTMARE:
650                        BSD_ASSERT(td->td_wchan == wchan);
651                        td->td_sq_state = TD_SQ_PANIC;
652                        unblock = true;
653                        remove = true;
654                        break;
655                case TD_SQ_WAKEUP:
656                        BSD_ASSERT(td->td_wchan == NULL);
657                        unblock = true;
658                        break;
659                default:
660                        BSD_ASSERT(td->td_wchan == wchan);
661                        BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY);
662                        td->td_sq_state = TD_SQ_SLEEPING;
663                        break;
664                }
665
666                _Objects_Release_and_ISR_enable(&executing->Object,
667                    &lock_context);
668
669                if (unblock) {
670                        _Giant_Acquire(cpu_self);
671                        _Watchdog_Remove(&executing->Timer);
672                        _Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
673                        _Giant_Release(cpu_self);
674                }
675
676                _Thread_Dispatch_enable(cpu_self);
677
678                _Objects_ISR_disable_and_acquire(&executing->Object,
679                    &lock_context);
680
681                switch (td->td_sq_state) {
682                case TD_SQ_NIGHTMARE:
683                        BSD_ASSERT(td->td_wchan == wchan);
684                        td->td_sq_state = TD_SQ_PANIC;
685                        remove = true;
686                        break;
687                default:
688                        BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP ||
689                            td->td_sq_state == TD_SQ_PANIC);
690                        break;
691                }
692        }
693
694        _Objects_Release_and_ISR_enable(&executing->Object,
695            &lock_context);
696
697        if (remove) {
698                sleepq_remove(td, wchan);
699        }
700#endif /* __rtems__ */
701}
702
703/*
704 * Check to see if we timed out.
705 */
706static int
707sleepq_check_timeout(void)
708{
709        struct thread *td;
710
711        td = curthread;
712#ifndef __rtems__
713        THREAD_LOCK_ASSERT(td, MA_OWNED);
714
715        /*
716         * If TDF_TIMEOUT is set, we timed out.
717         */
718        if (td->td_flags & TDF_TIMEOUT) {
719                td->td_flags &= ~TDF_TIMEOUT;
720                return (EWOULDBLOCK);
721        }
722
723        /*
724         * If TDF_TIMOFAIL is set, the timeout ran after we had
725         * already been woken up.
726         */
727        if (td->td_flags & TDF_TIMOFAIL)
728                td->td_flags &= ~TDF_TIMOFAIL;
729
730        /*
731         * If callout_stop() fails, then the timeout is running on
732         * another CPU, so synchronize with it to avoid having it
733         * accidentally wake up a subsequent sleep.
734         */
735        else if (callout_stop(&td->td_slpcallout) == 0) {
736                td->td_flags |= TDF_TIMEOUT;
737                TD_SET_SLEEPING(td);
738                mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
739        }
740        return (0);
741#else /* __rtems__ */
742        return (td->td_sq_state);
743#endif /* __rtems__ */
744}
745
746#ifndef __rtems__
747/*
748 * Check to see if we were awoken by a signal.
749 */
750static int
751sleepq_check_signals(void)
752{
753        struct thread *td;
754
755        td = curthread;
756        THREAD_LOCK_ASSERT(td, MA_OWNED);
757
758        /* We are no longer in an interruptible sleep. */
759        if (td->td_flags & TDF_SINTR)
760                td->td_flags &= ~TDF_SINTR;
761
762        if (td->td_flags & TDF_SLEEPABORT) {
763                td->td_flags &= ~TDF_SLEEPABORT;
764                return (td->td_intrval);
765        }
766
767        return (0);
768}
769#endif /* __rtems__ */
770
771/*
772 * Block the current thread until it is awakened from its sleep queue.
773 */
774void
775sleepq_wait(void *wchan, int pri)
776{
777#ifndef __rtems__
778        struct thread *td;
779
780        td = curthread;
781        MPASS(!(td->td_flags & TDF_SINTR));
782        thread_lock(td);
783#endif /* __rtems__ */
784        sleepq_switch(wchan, pri);
785#ifndef __rtems__
786        thread_unlock(td);
787#endif /* __rtems__ */
788}
789
790#ifndef __rtems__
791/*
792 * Block the current thread until it is awakened from its sleep queue
793 * or it is interrupted by a signal.
794 */
795int
796sleepq_wait_sig(void *wchan, int pri)
797{
798        int rcatch;
799        int rval;
800
801        rcatch = sleepq_catch_signals(wchan, pri);
802        rval = sleepq_check_signals();
803        thread_unlock(curthread);
804        if (rcatch)
805                return (rcatch);
806        return (rval);
807}
808#endif /* __rtems__ */
809
810/*
811 * Block the current thread until it is awakened from its sleep queue
812 * or it times out while waiting.
813 */
814int
815sleepq_timedwait(void *wchan, int pri)
816{
817#ifndef __rtems__
818        struct thread *td;
819#endif /* __rtems__ */
820        int rval;
821
822#ifndef __rtems__
823        td = curthread;
824        MPASS(!(td->td_flags & TDF_SINTR));
825        thread_lock(td);
826#endif /* __rtems__ */
827        sleepq_switch(wchan, pri);
828        rval = sleepq_check_timeout();
829#ifndef __rtems__
830        thread_unlock(td);
831#endif /* __rtems__ */
832
833        return (rval);
834}
835
836#ifndef __rtems__
837/*
838 * Block the current thread until it is awakened from its sleep queue,
839 * it is interrupted by a signal, or it times out waiting to be awakened.
840 */
841int
842sleepq_timedwait_sig(void *wchan, int pri)
843{
844        int rcatch, rvalt, rvals;
845
846        rcatch = sleepq_catch_signals(wchan, pri);
847        rvalt = sleepq_check_timeout();
848        rvals = sleepq_check_signals();
849        thread_unlock(curthread);
850        if (rcatch)
851                return (rcatch);
852        if (rvals)
853                return (rvals);
854        return (rvalt);
855}
856#endif /* __rtems__ */
857
858/*
859 * Returns the type of sleepqueue given a waitchannel.
860 */
861int
862sleepq_type(void *wchan)
863{
864        struct sleepqueue *sq;
865        int type;
866
867        MPASS(wchan != NULL);
868
869        sleepq_lock(wchan);
870        sq = sleepq_lookup(wchan);
871        if (sq == NULL) {
872                sleepq_release(wchan);
873                return (-1);
874        }
875        type = sq->sq_type;
876        sleepq_release(wchan);
877        return (type);
878}
879
880/*
881 * Removes a thread from a sleep queue and makes it
882 * runnable.
883 */
884static int
885sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
886{
887        struct sleepqueue_chain *sc;
888#ifdef __rtems__
889        Thread_Control *thread;
890        ISR_lock_Context lock_context;
891        bool unblock;
892
893        BSD_ASSERT(sq != NULL);
894#endif /* __rtems__ */
895
896        MPASS(td != NULL);
897        MPASS(sq->sq_wchan != NULL);
898        MPASS(td->td_wchan == sq->sq_wchan);
899        MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
900        THREAD_LOCK_ASSERT(td, MA_OWNED);
901        sc = SC_LOOKUP(sq->sq_wchan);
902        mtx_assert(&sc->sc_lock, MA_OWNED);
903
904        SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
905
906        /* Remove the thread from the queue. */
907        sq->sq_blockedcnt[td->td_sqqueue]--;
908        TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
909
910        /*
911         * Get a sleep queue for this thread.  If this is the last waiter,
912         * use the queue itself and take it out of the chain, otherwise,
913         * remove a queue from the free list.
914         */
915        if (LIST_EMPTY(&sq->sq_free)) {
916                td->td_sleepqueue = sq;
917#ifdef INVARIANTS
918                sq->sq_wchan = NULL;
919#endif
920#ifdef SLEEPQUEUE_PROFILING
921                sc->sc_depth--;
922#endif
923        } else
924                td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
925        LIST_REMOVE(td->td_sleepqueue, sq_hash);
926#ifdef __rtems__
927        (void)sc;
928        thread = td->td_thread;
929        _Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
930#endif /* __rtems__ */
931
932        td->td_wmesg = NULL;
933        td->td_wchan = NULL;
934#ifndef __rtems__
935        td->td_flags &= ~TDF_SINTR;
936
937        CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
938            (void *)td, (long)td->td_proc->p_pid, td->td_name);
939
940        /* Adjust priority if requested. */
941        MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
942        if (pri != 0 && td->td_priority > pri &&
943            PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
944                sched_prio(td, pri);
945
946        /*
947         * Note that thread td might not be sleeping if it is running
948         * sleepq_catch_signals() on another CPU or is blocked on its
949         * proc lock to check signals.  There's no need to mark the
950         * thread runnable in that case.
951         */
952        if (TD_IS_SLEEPING(td)) {
953                TD_CLR_SLEEPING(td);
954                return (setrunnable(td));
955        }
956#else /* __rtems__ */
957        unblock = _Watchdog_Is_active(&thread->Timer);
958        switch (td->td_sq_state) {
959        case TD_SQ_SLEEPING:
960                unblock = true;
961                /* FALLTHROUGH */
962        case TD_SQ_TIRED:
963        case TD_SQ_SLEEPY:
964        case TD_SQ_NIGHTMARE:
965                td->td_sq_state = TD_SQ_WAKEUP;
966                break;
967        default:
968                BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC);
969                break;
970        }
971
972        if (unblock) {
973                Per_CPU_Control *cpu_self;
974
975                cpu_self = _Objects_Release_and_thread_dispatch_disable(
976                    &thread->Object, &lock_context);
977                _Giant_Acquire(cpu_self);
978
979                _Watchdog_Remove(&thread->Timer);
980                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
981
982                _Giant_Release(cpu_self);
983                _Thread_Dispatch_enable(cpu_self);
984        } else {
985                _Objects_Release_and_ISR_enable(&thread->Object,
986                    &lock_context);
987        }
988#endif /* __rtems__ */
989        return (0);
990}
991
992#ifdef INVARIANTS
993/*
994 * UMA zone item deallocator.
995 */
996static void
997sleepq_dtor(void *mem, int size, void *arg)
998{
999        struct sleepqueue *sq;
1000        int i;
1001
1002        sq = mem;
1003        for (i = 0; i < NR_SLEEPQS; i++) {
1004                MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
1005                MPASS(sq->sq_blockedcnt[i] == 0);
1006        }
1007}
1008#endif
1009
1010/*
1011 * UMA zone item initializer.
1012 */
1013static int
1014sleepq_init(void *mem, int size, int flags)
1015{
1016        struct sleepqueue *sq;
1017        int i;
1018
1019        bzero(mem, size);
1020        sq = mem;
1021        for (i = 0; i < NR_SLEEPQS; i++) {
1022                TAILQ_INIT(&sq->sq_blocked[i]);
1023                sq->sq_blockedcnt[i] = 0;
1024        }
1025        LIST_INIT(&sq->sq_free);
1026        return (0);
1027}
1028
1029/*
1030 * Find the highest priority thread sleeping on a wait channel and resume it.
1031 */
1032int
1033sleepq_signal(void *wchan, int flags, int pri, int queue)
1034{
1035        struct sleepqueue *sq;
1036#ifndef __rtems__
1037        struct thread *td, *besttd;
1038#else /* __rtems__ */
1039        struct thread *besttd;
1040#endif /* __rtems__ */
1041        int wakeup_swapper;
1042
1043        CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
1044        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1045        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1046        sq = sleepq_lookup(wchan);
1047        if (sq == NULL)
1048                return (0);
1049        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1050            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1051
1052#ifndef __rtems__
1053        /*
1054         * Find the highest priority thread on the queue.  If there is a
1055         * tie, use the thread that first appears in the queue as it has
1056         * been sleeping the longest since threads are always added to
1057         * the tail of sleep queues.
1058         */
1059        besttd = NULL;
1060        TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
1061                if (besttd == NULL || td->td_priority < besttd->td_priority)
1062                        besttd = td;
1063        }
1064#else /* __rtems__ */
1065        besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
1066#endif /* __rtems__ */
1067        MPASS(besttd != NULL);
1068        thread_lock(besttd);
1069        wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
1070        thread_unlock(besttd);
1071        return (wakeup_swapper);
1072}
1073
1074/*
1075 * Resume all threads sleeping on a specified wait channel.
1076 */
1077int
1078sleepq_broadcast(void *wchan, int flags, int pri, int queue)
1079{
1080        struct sleepqueue *sq;
1081        struct thread *td, *tdn;
1082        int wakeup_swapper;
1083
1084        CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
1085        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1086        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1087        sq = sleepq_lookup(wchan);
1088        if (sq == NULL)
1089                return (0);
1090        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1091            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1092
1093        /* Resume all blocked threads on the sleep queue. */
1094        wakeup_swapper = 0;
1095        TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1096                thread_lock(td);
1097                if (sleepq_resume_thread(sq, td, pri))
1098                        wakeup_swapper = 1;
1099                thread_unlock(td);
1100        }
1101        return (wakeup_swapper);
1102}
1103
1104#ifndef __rtems__
1105/*
1106 * Time sleeping threads out.  When the timeout expires, the thread is
1107 * removed from the sleep queue and made runnable if it is still asleep.
1108 */
1109static void
1110sleepq_timeout(void *arg)
1111{
1112        struct sleepqueue_chain *sc;
1113        struct sleepqueue *sq;
1114        struct thread *td;
1115        void *wchan;
1116        int wakeup_swapper;
1117
1118        td = arg;
1119        wakeup_swapper = 0;
1120        CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1121            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1122
1123        /*
1124         * First, see if the thread is asleep and get the wait channel if
1125         * it is.
1126         */
1127        thread_lock(td);
1128        if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1129                wchan = td->td_wchan;
1130                sc = SC_LOOKUP(wchan);
1131                THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1132                sq = sleepq_lookup(wchan);
1133                MPASS(sq != NULL);
1134                td->td_flags |= TDF_TIMEOUT;
1135                wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1136                thread_unlock(td);
1137                if (wakeup_swapper)
1138                        kick_proc0();
1139                return;
1140        }
1141
1142        /*
1143         * If the thread is on the SLEEPQ but isn't sleeping yet, it
1144         * can either be on another CPU in between sleepq_add() and
1145         * one of the sleepq_*wait*() routines or it can be in
1146         * sleepq_catch_signals().
1147         */
1148        if (TD_ON_SLEEPQ(td)) {
1149                td->td_flags |= TDF_TIMEOUT;
1150                thread_unlock(td);
1151                return;
1152        }
1153
1154        /*
1155         * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
1156         * then the other thread has already yielded to us, so clear
1157         * the flag and resume it.  If TDF_TIMEOUT is not set, then the
1158         * we know that the other thread is not on a sleep queue, but it
1159         * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
1160         * to let it know that the timeout has already run and doesn't
1161         * need to be canceled.
1162         */
1163        if (td->td_flags & TDF_TIMEOUT) {
1164                MPASS(TD_IS_SLEEPING(td));
1165                td->td_flags &= ~TDF_TIMEOUT;
1166                TD_CLR_SLEEPING(td);
1167                wakeup_swapper = setrunnable(td);
1168        } else
1169                td->td_flags |= TDF_TIMOFAIL;
1170        thread_unlock(td);
1171        if (wakeup_swapper)
1172                kick_proc0();
1173}
1174#else /* __rtems__ */
1175static void
1176sleepq_timeout(Objects_Id id, void *arg)
1177{
1178        Thread_Control *thread;
1179        struct thread *td;
1180        ISR_lock_Context lock_context;
1181        bool unblock;
1182
1183        thread = arg;
1184        td = rtems_bsd_get_thread(thread);
1185        BSD_ASSERT(td != NULL);
1186
1187        _Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
1188
1189        unblock = false;
1190        switch (td->td_sq_state) {
1191        case TD_SQ_SLEEPING:
1192                unblock = true;
1193                /* Fall through */
1194        case TD_SQ_TIRED:
1195        case TD_SQ_SLEEPY:
1196                td->td_sq_state = TD_SQ_NIGHTMARE;
1197                break;
1198        default:
1199                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
1200                break;
1201        }
1202
1203        if (unblock) {
1204                Per_CPU_Control *cpu_self;
1205
1206                cpu_self = _Objects_Release_and_thread_dispatch_disable(
1207                    &thread->Object, &lock_context);
1208                _Giant_Acquire(cpu_self);
1209
1210                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
1211
1212                _Giant_Release(cpu_self);
1213                _Thread_Dispatch_enable(cpu_self);
1214        } else {
1215                _Objects_Release_and_ISR_enable(&thread->Object,
1216                    &lock_context);
1217        }
1218}
1219#endif /* __rtems__ */
1220
1221/*
1222 * Resumes a specific thread from the sleep queue associated with a specific
1223 * wait channel if it is on that queue.
1224 */
1225void
1226sleepq_remove(struct thread *td, void *wchan)
1227{
1228        struct sleepqueue *sq;
1229        int wakeup_swapper;
1230
1231        /*
1232         * Look up the sleep queue for this wait channel, then re-check
1233         * that the thread is asleep on that channel, if it is not, then
1234         * bail.
1235         */
1236        MPASS(wchan != NULL);
1237        sleepq_lock(wchan);
1238        sq = sleepq_lookup(wchan);
1239        /*
1240         * We can not lock the thread here as it may be sleeping on a
1241         * different sleepq.  However, holding the sleepq lock for this
1242         * wchan can guarantee that we do not miss a wakeup for this
1243         * channel.  The asserts below will catch any false positives.
1244         */
1245        if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1246                sleepq_release(wchan);
1247                return;
1248        }
1249        /* Thread is asleep on sleep queue sq, so wake it up. */
1250        thread_lock(td);
1251        MPASS(sq != NULL);
1252        MPASS(td->td_wchan == wchan);
1253        wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1254        thread_unlock(td);
1255        sleepq_release(wchan);
1256        if (wakeup_swapper)
1257                kick_proc0();
1258}
1259
1260#ifndef __rtems__
1261/*
1262 * Abort a thread as if an interrupt had occurred.  Only abort
1263 * interruptible waits (unfortunately it isn't safe to abort others).
1264 */
1265int
1266sleepq_abort(struct thread *td, int intrval)
1267{
1268        struct sleepqueue *sq;
1269        void *wchan;
1270
1271        THREAD_LOCK_ASSERT(td, MA_OWNED);
1272        MPASS(TD_ON_SLEEPQ(td));
1273        MPASS(td->td_flags & TDF_SINTR);
1274        MPASS(intrval == EINTR || intrval == ERESTART);
1275
1276        /*
1277         * If the TDF_TIMEOUT flag is set, just leave. A
1278         * timeout is scheduled anyhow.
1279         */
1280        if (td->td_flags & TDF_TIMEOUT)
1281                return (0);
1282
1283        CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1284            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1285        td->td_intrval = intrval;
1286        td->td_flags |= TDF_SLEEPABORT;
1287        /*
1288         * If the thread has not slept yet it will find the signal in
1289         * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1290         * we have to do it here.
1291         */
1292        if (!TD_IS_SLEEPING(td))
1293                return (0);
1294        wchan = td->td_wchan;
1295        MPASS(wchan != NULL);
1296        sq = sleepq_lookup(wchan);
1297        MPASS(sq != NULL);
1298
1299        /* Thread is asleep on sleep queue sq, so wake it up. */
1300        return (sleepq_resume_thread(sq, td, 0));
1301}
1302#endif /* __rtems__ */
1303
1304#ifdef SLEEPQUEUE_PROFILING
1305#define SLEEPQ_PROF_LOCATIONS   1024
1306#define SLEEPQ_SBUFSIZE         512
1307struct sleepq_prof {
1308        LIST_ENTRY(sleepq_prof) sp_link;
1309        const char      *sp_wmesg;
1310        long            sp_count;
1311};
1312
1313LIST_HEAD(sqphead, sleepq_prof);
1314
1315struct sqphead sleepq_prof_free;
1316struct sqphead sleepq_hash[SC_TABLESIZE];
1317static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1318static struct mtx sleepq_prof_lock;
1319MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1320
1321static void
1322sleepq_profile(const char *wmesg)
1323{
1324        struct sleepq_prof *sp;
1325
1326        mtx_lock_spin(&sleepq_prof_lock);
1327        if (prof_enabled == 0)
1328                goto unlock;
1329        LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1330                if (sp->sp_wmesg == wmesg)
1331                        goto done;
1332        sp = LIST_FIRST(&sleepq_prof_free);
1333        if (sp == NULL)
1334                goto unlock;
1335        sp->sp_wmesg = wmesg;
1336        LIST_REMOVE(sp, sp_link);
1337        LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1338done:
1339        sp->sp_count++;
1340unlock:
1341        mtx_unlock_spin(&sleepq_prof_lock);
1342        return;
1343}
1344
1345static void
1346sleepq_prof_reset(void)
1347{
1348        struct sleepq_prof *sp;
1349        int enabled;
1350        int i;
1351
1352        mtx_lock_spin(&sleepq_prof_lock);
1353        enabled = prof_enabled;
1354        prof_enabled = 0;
1355        for (i = 0; i < SC_TABLESIZE; i++)
1356                LIST_INIT(&sleepq_hash[i]);
1357        LIST_INIT(&sleepq_prof_free);
1358        for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1359                sp = &sleepq_profent[i];
1360                sp->sp_wmesg = NULL;
1361                sp->sp_count = 0;
1362                LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1363        }
1364        prof_enabled = enabled;
1365        mtx_unlock_spin(&sleepq_prof_lock);
1366}
1367
1368static int
1369enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1370{
1371        int error, v;
1372
1373        v = prof_enabled;
1374        error = sysctl_handle_int(oidp, &v, v, req);
1375        if (error)
1376                return (error);
1377        if (req->newptr == NULL)
1378                return (error);
1379        if (v == prof_enabled)
1380                return (0);
1381        if (v == 1)
1382                sleepq_prof_reset();
1383        mtx_lock_spin(&sleepq_prof_lock);
1384        prof_enabled = !!v;
1385        mtx_unlock_spin(&sleepq_prof_lock);
1386
1387        return (0);
1388}
1389
1390static int
1391reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1392{
1393        int error, v;
1394
1395        v = 0;
1396        error = sysctl_handle_int(oidp, &v, 0, req);
1397        if (error)
1398                return (error);
1399        if (req->newptr == NULL)
1400                return (error);
1401        if (v == 0)
1402                return (0);
1403        sleepq_prof_reset();
1404
1405        return (0);
1406}
1407
1408static int
1409dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1410{
1411        struct sleepq_prof *sp;
1412        struct sbuf *sb;
1413        int enabled;
1414        int error;
1415        int i;
1416
1417        error = sysctl_wire_old_buffer(req, 0);
1418        if (error != 0)
1419                return (error);
1420        sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1421        sbuf_printf(sb, "\nwmesg\tcount\n");
1422        enabled = prof_enabled;
1423        mtx_lock_spin(&sleepq_prof_lock);
1424        prof_enabled = 0;
1425        mtx_unlock_spin(&sleepq_prof_lock);
1426        for (i = 0; i < SC_TABLESIZE; i++) {
1427                LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1428                        sbuf_printf(sb, "%s\t%ld\n",
1429                            sp->sp_wmesg, sp->sp_count);
1430                }
1431        }
1432        mtx_lock_spin(&sleepq_prof_lock);
1433        prof_enabled = enabled;
1434        mtx_unlock_spin(&sleepq_prof_lock);
1435
1436        error = sbuf_finish(sb);
1437        sbuf_delete(sb);
1438        return (error);
1439}
1440
1441SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1442    NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1443SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1444    NULL, 0, reset_sleepq_prof_stats, "I",
1445    "Reset sleepqueue profiling statistics");
1446SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1447    NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1448#endif
1449
1450#ifdef DDB
1451DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1452{
1453        struct sleepqueue_chain *sc;
1454        struct sleepqueue *sq;
1455#ifdef INVARIANTS
1456        struct lock_object *lock;
1457#endif
1458        struct thread *td;
1459        void *wchan;
1460        int i;
1461
1462        if (!have_addr)
1463                return;
1464
1465        /*
1466         * First, see if there is an active sleep queue for the wait channel
1467         * indicated by the address.
1468         */
1469        wchan = (void *)addr;
1470        sc = SC_LOOKUP(wchan);
1471        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1472                if (sq->sq_wchan == wchan)
1473                        goto found;
1474
1475        /*
1476         * Second, see if there is an active sleep queue at the address
1477         * indicated.
1478         */
1479        for (i = 0; i < SC_TABLESIZE; i++)
1480                LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1481                        if (sq == (struct sleepqueue *)addr)
1482                                goto found;
1483                }
1484
1485        db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1486        return;
1487found:
1488        db_printf("Wait channel: %p\n", sq->sq_wchan);
1489        db_printf("Queue type: %d\n", sq->sq_type);
1490#ifdef INVARIANTS
1491        if (sq->sq_lock) {
1492                lock = sq->sq_lock;
1493                db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1494                    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1495        }
1496#endif
1497        db_printf("Blocked threads:\n");
1498        for (i = 0; i < NR_SLEEPQS; i++) {
1499                db_printf("\nQueue[%d]:\n", i);
1500                if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1501                        db_printf("\tempty\n");
1502                else
1503                        TAILQ_FOREACH(td, &sq->sq_blocked[0],
1504                                      td_slpq) {
1505                                db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1506                                          td->td_tid, td->td_proc->p_pid,
1507                                          td->td_name);
1508                        }
1509                db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1510        }
1511}
1512
1513/* Alias 'show sleepqueue' to 'show sleepq'. */
1514DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1515#endif
Note: See TracBrowser for help on using the repository browser.