source: rtems-libbsd/freebsd/sys/kern/subr_sleepqueue.c @ 1ee85a5

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 1ee85a5 was 1ee85a5, checked in by Sebastian Huber <sebastian.huber@…>, on Apr 30, 2015 at 9:00:02 AM

SLEEPQUEUE(9): Update due to API changes

  • Property mode set to 100644
File size: 38.6 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Copyright (c) 2015 embedded brains GmbH <rtems@embedded-brains.de>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel.  Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals.  That said, there are several similarities between the turnstile
36 * and sleep queue implementations.  (Note: turnstiles were implemented
37 * first.)  For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues.  An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue.  This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head.  Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking.  Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout.  The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep.  A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep).  The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53 * sleep queues also provide some extra assertions.  One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API.  The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD$");
64
65#include <rtems/bsd/local/opt_sleepqueue_profiling.h>
66#include <rtems/bsd/local/opt_ddb.h>
67#include <rtems/bsd/local/opt_kdtrace.h>
68#include <rtems/bsd/local/opt_sched.h>
69
70#include <rtems/bsd/sys/param.h>
71#include <sys/systm.h>
72#include <rtems/bsd/sys/lock.h>
73#include <sys/kernel.h>
74#include <sys/ktr.h>
75#include <sys/mutex.h>
76#include <sys/proc.h>
77#include <sys/sbuf.h>
78#include <sys/sched.h>
79#include <sys/sdt.h>
80#include <sys/signalvar.h>
81#include <sys/sleepqueue.h>
82#include <sys/sysctl.h>
83
84#include <vm/uma.h>
85
86#ifdef DDB
87#include <ddb/ddb.h>
88#endif
89#ifdef __rtems__
90#include <machine/rtems-bsd-thread.h>
91#include <rtems/score/threadimpl.h>
92#include <rtems/score/watchdogimpl.h>
93#endif /* __rtems__ */
94
95/*
96 * Constants for the hash table of sleep queue chains.  These constants are
97 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
98 * Basically, we ignore the lower 8 bits of the address since most wait
99 * channel pointers are aligned and only look at the next 7 bits for the
100 * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
101 */
102#define SC_TABLESIZE    128                     /* Must be power of 2. */
103#define SC_MASK         (SC_TABLESIZE - 1)
104#define SC_SHIFT        8
105#define SC_HASH(wc)     (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
106#define SC_LOOKUP(wc)   &sleepq_chains[SC_HASH(wc)]
107#define NR_SLEEPQS      2
108/*
109 * There two different lists of sleep queues.  Both lists are connected
110 * via the sq_hash entries.  The first list is the sleep queue chain list
111 * that a sleep queue is on when it is attached to a wait channel.  The
112 * second list is the free list hung off of a sleep queue that is attached
113 * to a wait channel.
114 *
115 * Each sleep queue also contains the wait channel it is attached to, the
116 * list of threads blocked on that wait channel, flags specific to the
117 * wait channel, and the lock used to synchronize with a wait channel.
118 * The flags are used to catch mismatches between the various consumers
119 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
120 * The lock pointer is only used when invariants are enabled for various
121 * debugging checks.
122 *
123 * Locking key:
124 *  c - sleep queue chain lock
125 */
126struct sleepqueue {
127        TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];    /* (c) Blocked threads. */
128        u_int sq_blockedcnt[NR_SLEEPQS];        /* (c) N. of blocked threads. */
129        LIST_ENTRY(sleepqueue) sq_hash;         /* (c) Chain and free list. */
130        LIST_HEAD(, sleepqueue) sq_free;        /* (c) Free queues. */
131        void    *sq_wchan;                      /* (c) Wait channel. */
132        int     sq_type;                        /* (c) Queue type. */
133#ifdef INVARIANTS
134        struct lock_object *sq_lock;            /* (c) Associated lock. */
135#endif
136};
137
138struct sleepqueue_chain {
139        LIST_HEAD(, sleepqueue) sc_queues;      /* List of sleep queues. */
140        struct mtx sc_lock;                     /* Spin lock for this chain. */
141#ifdef SLEEPQUEUE_PROFILING
142        u_int   sc_depth;                       /* Length of sc_queues. */
143        u_int   sc_max_depth;                   /* Max length of sc_queues. */
144#endif
145};
146
147#ifdef SLEEPQUEUE_PROFILING
148u_int sleepq_max_depth;
149static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
150static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
151    "sleepq chain stats");
152SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
153    0, "maxmimum depth achieved of a single chain");
154
155static void     sleepq_profile(const char *wmesg);
156static int      prof_enabled;
157#endif
158static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
159static uma_zone_t sleepq_zone;
160
161/*
162 * Prototypes for non-exported routines.
163 */
164#ifndef __rtems__
165static int      sleepq_catch_signals(void *wchan, int pri);
166static int      sleepq_check_signals(void);
167static int      sleepq_check_timeout(void);
168#endif /* __rtems__ */
169#ifdef INVARIANTS
170static void     sleepq_dtor(void *mem, int size, void *arg);
171#endif
172static int      sleepq_init(void *mem, int size, int flags);
173static int      sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
174                    int pri);
175static void     sleepq_switch(void *wchan, int pri);
176#ifndef __rtems__
177static void     sleepq_timeout(void *arg);
178#else /* __rtems__ */
179static void     sleepq_timeout(Objects_Id id, void *arg);
180#endif /* __rtems__ */
181
182SDT_PROBE_DECLARE(sched, , , sleep);
183SDT_PROBE_DECLARE(sched, , , wakeup);
184
185/*
186 * Early initialization of sleep queues that is called from the sleepinit()
187 * SYSINIT.
188 */
189void
190init_sleepqueues(void)
191{
192#ifdef SLEEPQUEUE_PROFILING
193        struct sysctl_oid *chain_oid;
194        char chain_name[10];
195#endif
196        int i;
197
198        for (i = 0; i < SC_TABLESIZE; i++) {
199                LIST_INIT(&sleepq_chains[i].sc_queues);
200                mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
201                    MTX_SPIN | MTX_RECURSE);
202#ifdef SLEEPQUEUE_PROFILING
203                snprintf(chain_name, sizeof(chain_name), "%d", i);
204                chain_oid = SYSCTL_ADD_NODE(NULL, 
205                    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
206                    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
207                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
208                    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
209                SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
210                    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
211                    NULL);
212#endif
213        }
214        sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
215#ifdef INVARIANTS
216            NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
217#else
218            NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
219#endif
220       
221#ifndef __rtems__
222        thread0.td_sleepqueue = sleepq_alloc();
223#endif /* __rtems__ */
224}
225
226/*
227 * Get a sleep queue for a new thread.
228 */
229struct sleepqueue *
230sleepq_alloc(void)
231{
232
233        return (uma_zalloc(sleepq_zone, M_WAITOK));
234}
235
236/*
237 * Free a sleep queue when a thread is destroyed.
238 */
239void
240sleepq_free(struct sleepqueue *sq)
241{
242
243        uma_zfree(sleepq_zone, sq);
244}
245
246/*
247 * Lock the sleep queue chain associated with the specified wait channel.
248 */
249void
250sleepq_lock(void *wchan)
251{
252        struct sleepqueue_chain *sc;
253
254        sc = SC_LOOKUP(wchan);
255        mtx_lock_spin(&sc->sc_lock);
256}
257
258/*
259 * Look up the sleep queue associated with a given wait channel in the hash
260 * table locking the associated sleep queue chain.  If no queue is found in
261 * the table, NULL is returned.
262 */
263struct sleepqueue *
264sleepq_lookup(void *wchan)
265{
266        struct sleepqueue_chain *sc;
267        struct sleepqueue *sq;
268
269        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
270        sc = SC_LOOKUP(wchan);
271        mtx_assert(&sc->sc_lock, MA_OWNED);
272        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
273                if (sq->sq_wchan == wchan)
274                        return (sq);
275        return (NULL);
276}
277
278/*
279 * Unlock the sleep queue chain associated with a given wait channel.
280 */
281void
282sleepq_release(void *wchan)
283{
284        struct sleepqueue_chain *sc;
285
286        sc = SC_LOOKUP(wchan);
287        mtx_unlock_spin(&sc->sc_lock);
288}
289
290/*
291 * Places the current thread on the sleep queue for the specified wait
292 * channel.  If INVARIANTS is enabled, then it associates the passed in
293 * lock with the sleepq to make sure it is held when that sleep queue is
294 * woken up.
295 */
296void
297sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
298    int queue)
299{
300        struct sleepqueue_chain *sc;
301        struct sleepqueue *sq;
302        struct thread *td;
303#ifdef __rtems__
304        ISR_lock_Context lock_context;
305        Thread_Control *executing;
306        struct thread *succ;
307#endif /* __rtems__ */
308
309        td = curthread;
310        sc = SC_LOOKUP(wchan);
311        mtx_assert(&sc->sc_lock, MA_OWNED);
312        MPASS(td->td_sleepqueue != NULL);
313        MPASS(wchan != NULL);
314        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
315
316        /* If this thread is not allowed to sleep, die a horrible death. */
317        KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
318            ("Trying sleep, but thread marked as sleeping prohibited"));
319
320        /* Look up the sleep queue associated with the wait channel 'wchan'. */
321        sq = sleepq_lookup(wchan);
322
323        /*
324         * If the wait channel does not already have a sleep queue, use
325         * this thread's sleep queue.  Otherwise, insert the current thread
326         * into the sleep queue already in use by this wait channel.
327         */
328        if (sq == NULL) {
329#ifdef INVARIANTS
330                int i;
331
332                sq = td->td_sleepqueue;
333                for (i = 0; i < NR_SLEEPQS; i++) {
334                        KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
335                            ("thread's sleep queue %d is not empty", i));
336                        KASSERT(sq->sq_blockedcnt[i] == 0,
337                            ("thread's sleep queue %d count mismatches", i));
338                }
339                KASSERT(LIST_EMPTY(&sq->sq_free),
340                    ("thread's sleep queue has a non-empty free list"));
341                KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
342                sq->sq_lock = lock;
343#endif
344#ifdef SLEEPQUEUE_PROFILING
345                sc->sc_depth++;
346                if (sc->sc_depth > sc->sc_max_depth) {
347                        sc->sc_max_depth = sc->sc_depth;
348                        if (sc->sc_max_depth > sleepq_max_depth)
349                                sleepq_max_depth = sc->sc_max_depth;
350                }
351#endif
352                sq = td->td_sleepqueue;
353                LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
354                sq->sq_wchan = wchan;
355                sq->sq_type = flags & SLEEPQ_TYPE;
356        } else {
357                MPASS(wchan == sq->sq_wchan);
358                MPASS(lock == sq->sq_lock);
359                MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
360                LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
361        }
362        thread_lock(td);
363#ifndef __rtems__
364        TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
365#else /* __rtems__ */
366        /* FIXME: This is broken with clustered scheduling */
367        succ = NULL;
368        TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) {
369                if (td->td_thread->current_priority <
370                    succ->td_thread->current_priority)
371                        break;
372        }
373        if (succ == NULL)
374                TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
375        else
376                TAILQ_INSERT_BEFORE(succ, td, td_slpq);
377#endif /* __rtems__ */
378        sq->sq_blockedcnt[queue]++;
379#ifdef __rtems__
380        executing = td->td_thread;
381        _Thread_Lock_acquire_default(executing, &lock_context);
382        td->td_sq_state = TD_SQ_TIRED;
383#endif /* __rtems__ */
384        td->td_sleepqueue = NULL;
385        td->td_sqqueue = queue;
386        td->td_wchan = wchan;
387        td->td_wmesg = wmesg;
388#ifndef __rtems__
389        if (flags & SLEEPQ_INTERRUPTIBLE) {
390                td->td_flags |= TDF_SINTR;
391                td->td_flags &= ~TDF_SLEEPABORT;
392        }
393        thread_unlock(td);
394#else /* __rtems__ */
395        _Thread_Lock_release_default(executing, &lock_context);
396#endif /* __rtems__ */
397}
398
399/*
400 * Sets a timeout that will remove the current thread from the specified
401 * sleep queue after timo ticks if the thread has not already been awakened.
402 */
403void
404sleepq_set_timeout(void *wchan, int timo)
405{
406#ifndef __rtems__
407        struct sleepqueue_chain *sc;
408        struct thread *td;
409
410        td = curthread;
411        sc = SC_LOOKUP(wchan);
412        mtx_assert(&sc->sc_lock, MA_OWNED);
413        MPASS(TD_ON_SLEEPQ(td));
414        MPASS(td->td_sleepqueue == NULL);
415        MPASS(wchan != NULL);
416        callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
417#else /* __rtems__ */
418        Thread_Control *executing;
419
420        _Thread_Disable_dispatch();
421        executing = _Thread_Executing;
422        BSD_ASSERT(executing->Timer.state == WATCHDOG_INACTIVE);
423        _Watchdog_Initialize(&executing->Timer, sleepq_timeout,
424            0, executing);
425        _Watchdog_Insert_ticks(&executing->Timer, (Watchdog_Interval)timo);
426        _Thread_Enable_dispatch();
427#endif /* __rtems__ */
428}
429
430/*
431 * Return the number of actual sleepers for the specified queue.
432 */
433u_int
434sleepq_sleepcnt(void *wchan, int queue)
435{
436        struct sleepqueue *sq;
437
438        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
439        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
440        sq = sleepq_lookup(wchan);
441        if (sq == NULL)
442                return (0);
443        return (sq->sq_blockedcnt[queue]);
444}
445
446#ifndef __rtems__
447/*
448 * Marks the pending sleep of the current thread as interruptible and
449 * makes an initial check for pending signals before putting a thread
450 * to sleep. Enters and exits with the thread lock held.  Thread lock
451 * may have transitioned from the sleepq lock to a run lock.
452 */
453static int
454sleepq_catch_signals(void *wchan, int pri)
455{
456        struct sleepqueue_chain *sc;
457        struct sleepqueue *sq;
458        struct thread *td;
459        struct proc *p;
460        struct sigacts *ps;
461        int sig, ret, stop_allowed;
462
463        td = curthread;
464        p = curproc;
465        sc = SC_LOOKUP(wchan);
466        mtx_assert(&sc->sc_lock, MA_OWNED);
467        MPASS(wchan != NULL);
468        if ((td->td_pflags & TDP_WAKEUP) != 0) {
469                td->td_pflags &= ~TDP_WAKEUP;
470                ret = EINTR;
471                thread_lock(td);
472                goto out;
473        }
474
475        /*
476         * See if there are any pending signals for this thread.  If not
477         * we can switch immediately.  Otherwise do the signal processing
478         * directly.
479         */
480        thread_lock(td);
481        if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
482                sleepq_switch(wchan, pri);
483                return (0);
484        }
485        stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
486            SIG_STOP_ALLOWED;
487        thread_unlock(td);
488        mtx_unlock_spin(&sc->sc_lock);
489        CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
490                (void *)td, (long)p->p_pid, td->td_name);
491        PROC_LOCK(p);
492        ps = p->p_sigacts;
493        mtx_lock(&ps->ps_mtx);
494        sig = cursig(td, stop_allowed);
495        if (sig == 0) {
496                mtx_unlock(&ps->ps_mtx);
497                ret = thread_suspend_check(1);
498                MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
499        } else {
500                if (SIGISMEMBER(ps->ps_sigintr, sig))
501                        ret = EINTR;
502                else
503                        ret = ERESTART;
504                mtx_unlock(&ps->ps_mtx);
505        }
506        /*
507         * Lock the per-process spinlock prior to dropping the PROC_LOCK
508         * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
509         * thread_lock() are currently held in tdsendsignal().
510         */
511        PROC_SLOCK(p);
512        mtx_lock_spin(&sc->sc_lock);
513        PROC_UNLOCK(p);
514        thread_lock(td);
515        PROC_SUNLOCK(p);
516        if (ret == 0) {
517                sleepq_switch(wchan, pri);
518                return (0);
519        }
520out:
521        /*
522         * There were pending signals and this thread is still
523         * on the sleep queue, remove it from the sleep queue.
524         */
525        if (TD_ON_SLEEPQ(td)) {
526                sq = sleepq_lookup(wchan);
527                if (sleepq_resume_thread(sq, td, 0)) {
528#ifdef INVARIANTS
529                        /*
530                         * This thread hasn't gone to sleep yet, so it
531                         * should not be swapped out.
532                         */
533                        panic("not waking up swapper");
534#endif
535                }
536        }
537        mtx_unlock_spin(&sc->sc_lock);
538        MPASS(td->td_lock != &sc->sc_lock);
539        return (ret);
540}
541#endif /* __rtems__ */
542
543/*
544 * Switches to another thread if we are still asleep on a sleep queue.
545 * Returns with thread lock.
546 */
547static void
548sleepq_switch(void *wchan, int pri)
549{
550#ifndef __rtems__
551        struct sleepqueue_chain *sc;
552        struct sleepqueue *sq;
553        struct thread *td;
554
555        td = curthread;
556        sc = SC_LOOKUP(wchan);
557        mtx_assert(&sc->sc_lock, MA_OWNED);
558        THREAD_LOCK_ASSERT(td, MA_OWNED);
559
560        /*
561         * If we have a sleep queue, then we've already been woken up, so
562         * just return.
563         */
564        if (td->td_sleepqueue != NULL) {
565                mtx_unlock_spin(&sc->sc_lock);
566                return;
567        }
568
569        /*
570         * If TDF_TIMEOUT is set, then our sleep has been timed out
571         * already but we are still on the sleep queue, so dequeue the
572         * thread and return.
573         */
574        if (td->td_flags & TDF_TIMEOUT) {
575                MPASS(TD_ON_SLEEPQ(td));
576                sq = sleepq_lookup(wchan);
577                if (sleepq_resume_thread(sq, td, 0)) {
578#ifdef INVARIANTS
579                        /*
580                         * This thread hasn't gone to sleep yet, so it
581                         * should not be swapped out.
582                         */
583                        panic("not waking up swapper");
584#endif
585                }
586                mtx_unlock_spin(&sc->sc_lock);
587                return;         
588        }
589#ifdef SLEEPQUEUE_PROFILING
590        if (prof_enabled)
591                sleepq_profile(td->td_wmesg);
592#endif
593        MPASS(td->td_sleepqueue == NULL);
594        sched_sleep(td, pri);
595        thread_lock_set(td, &sc->sc_lock);
596        SDT_PROBE0(sched, , , sleep);
597        TD_SET_SLEEPING(td);
598        mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
599        KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
600        CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
601            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
602#else /* __rtems__ */
603        Thread_Control *executing;
604        ISR_lock_Context lock_context;
605        struct thread *td;
606        bool block;
607        bool remove;
608
609        sleepq_release(wchan);
610
611        executing = _Thread_Lock_acquire_default_for_executing(&lock_context);
612        td = rtems_bsd_get_thread(executing);
613        BSD_ASSERT(td != NULL);
614
615        block = false;
616        remove = false;
617        switch (td->td_sq_state) {
618        case TD_SQ_TIRED:
619                BSD_ASSERT(td->td_wchan == wchan);
620                td->td_sq_state = TD_SQ_SLEEPY;
621                block = true;
622                break;
623        case TD_SQ_NIGHTMARE:
624                BSD_ASSERT(td->td_wchan == wchan);
625                td->td_sq_state = TD_SQ_PANIC;
626                remove = true;
627                break;
628        default:
629                BSD_ASSERT(td->td_wchan == NULL);
630                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
631                break;
632        }
633
634        if (block) {
635                Per_CPU_Control *cpu_self;
636                bool unblock;
637
638                cpu_self = _Thread_Dispatch_disable_critical();
639                _Thread_Lock_release_default(executing, &lock_context);
640
641                _Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
642
643                _Thread_Lock_acquire_default(executing, &lock_context);
644
645                unblock = false;
646                switch (td->td_sq_state) {
647                case TD_SQ_NIGHTMARE:
648                        BSD_ASSERT(td->td_wchan == wchan);
649                        td->td_sq_state = TD_SQ_PANIC;
650                        unblock = true;
651                        remove = true;
652                        break;
653                case TD_SQ_WAKEUP:
654                        BSD_ASSERT(td->td_wchan == NULL);
655                        unblock = true;
656                        break;
657                default:
658                        BSD_ASSERT(td->td_wchan == wchan);
659                        BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY);
660                        td->td_sq_state = TD_SQ_SLEEPING;
661                        break;
662                }
663
664                _Thread_Lock_release_default(executing, &lock_context);
665
666                if (unblock) {
667                        _Watchdog_Remove_ticks(&executing->Timer);
668                        _Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
669                }
670
671                _Thread_Dispatch_enable(cpu_self);
672
673                _Thread_Lock_acquire_default(executing, &lock_context);
674
675                switch (td->td_sq_state) {
676                case TD_SQ_NIGHTMARE:
677                        BSD_ASSERT(td->td_wchan == wchan);
678                        td->td_sq_state = TD_SQ_PANIC;
679                        remove = true;
680                        break;
681                default:
682                        BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP ||
683                            td->td_sq_state == TD_SQ_PANIC);
684                        break;
685                }
686        }
687
688        _Thread_Lock_release_default(executing, &lock_context);
689
690        if (remove) {
691                sleepq_remove(td, wchan);
692        }
693#endif /* __rtems__ */
694}
695
696/*
697 * Check to see if we timed out.
698 */
699static int
700sleepq_check_timeout(void)
701{
702        struct thread *td;
703
704        td = curthread;
705#ifndef __rtems__
706        THREAD_LOCK_ASSERT(td, MA_OWNED);
707
708        /*
709         * If TDF_TIMEOUT is set, we timed out.
710         */
711        if (td->td_flags & TDF_TIMEOUT) {
712                td->td_flags &= ~TDF_TIMEOUT;
713                return (EWOULDBLOCK);
714        }
715
716        /*
717         * If TDF_TIMOFAIL is set, the timeout ran after we had
718         * already been woken up.
719         */
720        if (td->td_flags & TDF_TIMOFAIL)
721                td->td_flags &= ~TDF_TIMOFAIL;
722
723        /*
724         * If callout_stop() fails, then the timeout is running on
725         * another CPU, so synchronize with it to avoid having it
726         * accidentally wake up a subsequent sleep.
727         */
728        else if (callout_stop(&td->td_slpcallout) == 0) {
729                td->td_flags |= TDF_TIMEOUT;
730                TD_SET_SLEEPING(td);
731                mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
732        }
733        return (0);
734#else /* __rtems__ */
735        return (td->td_sq_state);
736#endif /* __rtems__ */
737}
738
739#ifndef __rtems__
740/*
741 * Check to see if we were awoken by a signal.
742 */
743static int
744sleepq_check_signals(void)
745{
746        struct thread *td;
747
748        td = curthread;
749        THREAD_LOCK_ASSERT(td, MA_OWNED);
750
751        /* We are no longer in an interruptible sleep. */
752        if (td->td_flags & TDF_SINTR)
753                td->td_flags &= ~TDF_SINTR;
754
755        if (td->td_flags & TDF_SLEEPABORT) {
756                td->td_flags &= ~TDF_SLEEPABORT;
757                return (td->td_intrval);
758        }
759
760        return (0);
761}
762#endif /* __rtems__ */
763
764/*
765 * Block the current thread until it is awakened from its sleep queue.
766 */
767void
768sleepq_wait(void *wchan, int pri)
769{
770#ifndef __rtems__
771        struct thread *td;
772
773        td = curthread;
774        MPASS(!(td->td_flags & TDF_SINTR));
775        thread_lock(td);
776#endif /* __rtems__ */
777        sleepq_switch(wchan, pri);
778#ifndef __rtems__
779        thread_unlock(td);
780#endif /* __rtems__ */
781}
782
783#ifndef __rtems__
784/*
785 * Block the current thread until it is awakened from its sleep queue
786 * or it is interrupted by a signal.
787 */
788int
789sleepq_wait_sig(void *wchan, int pri)
790{
791        int rcatch;
792        int rval;
793
794        rcatch = sleepq_catch_signals(wchan, pri);
795        rval = sleepq_check_signals();
796        thread_unlock(curthread);
797        if (rcatch)
798                return (rcatch);
799        return (rval);
800}
801#endif /* __rtems__ */
802
803/*
804 * Block the current thread until it is awakened from its sleep queue
805 * or it times out while waiting.
806 */
807int
808sleepq_timedwait(void *wchan, int pri)
809{
810#ifndef __rtems__
811        struct thread *td;
812#endif /* __rtems__ */
813        int rval;
814
815#ifndef __rtems__
816        td = curthread;
817        MPASS(!(td->td_flags & TDF_SINTR));
818        thread_lock(td);
819#endif /* __rtems__ */
820        sleepq_switch(wchan, pri);
821        rval = sleepq_check_timeout();
822#ifndef __rtems__
823        thread_unlock(td);
824#endif /* __rtems__ */
825
826        return (rval);
827}
828
829#ifndef __rtems__
830/*
831 * Block the current thread until it is awakened from its sleep queue,
832 * it is interrupted by a signal, or it times out waiting to be awakened.
833 */
834int
835sleepq_timedwait_sig(void *wchan, int pri)
836{
837        int rcatch, rvalt, rvals;
838
839        rcatch = sleepq_catch_signals(wchan, pri);
840        rvalt = sleepq_check_timeout();
841        rvals = sleepq_check_signals();
842        thread_unlock(curthread);
843        if (rcatch)
844                return (rcatch);
845        if (rvals)
846                return (rvals);
847        return (rvalt);
848}
849#endif /* __rtems__ */
850
851/*
852 * Returns the type of sleepqueue given a waitchannel.
853 */
854int
855sleepq_type(void *wchan)
856{
857        struct sleepqueue *sq;
858        int type;
859
860        MPASS(wchan != NULL);
861
862        sleepq_lock(wchan);
863        sq = sleepq_lookup(wchan);
864        if (sq == NULL) {
865                sleepq_release(wchan);
866                return (-1);
867        }
868        type = sq->sq_type;
869        sleepq_release(wchan);
870        return (type);
871}
872
873/*
874 * Removes a thread from a sleep queue and makes it
875 * runnable.
876 */
877static int
878sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
879{
880        struct sleepqueue_chain *sc;
881#ifdef __rtems__
882        Thread_Control *thread;
883        ISR_lock_Context lock_context;
884        bool unblock;
885
886        BSD_ASSERT(sq != NULL);
887#endif /* __rtems__ */
888
889        MPASS(td != NULL);
890        MPASS(sq->sq_wchan != NULL);
891        MPASS(td->td_wchan == sq->sq_wchan);
892        MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
893        THREAD_LOCK_ASSERT(td, MA_OWNED);
894        sc = SC_LOOKUP(sq->sq_wchan);
895        mtx_assert(&sc->sc_lock, MA_OWNED);
896
897        SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
898
899        /* Remove the thread from the queue. */
900        sq->sq_blockedcnt[td->td_sqqueue]--;
901        TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
902
903        /*
904         * Get a sleep queue for this thread.  If this is the last waiter,
905         * use the queue itself and take it out of the chain, otherwise,
906         * remove a queue from the free list.
907         */
908        if (LIST_EMPTY(&sq->sq_free)) {
909                td->td_sleepqueue = sq;
910#ifdef INVARIANTS
911                sq->sq_wchan = NULL;
912#endif
913#ifdef SLEEPQUEUE_PROFILING
914                sc->sc_depth--;
915#endif
916        } else
917                td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
918        LIST_REMOVE(td->td_sleepqueue, sq_hash);
919#ifdef __rtems__
920        (void)sc;
921        thread = td->td_thread;
922        _Thread_Lock_acquire_default(thread, &lock_context);
923#endif /* __rtems__ */
924
925        td->td_wmesg = NULL;
926        td->td_wchan = NULL;
927#ifndef __rtems__
928        td->td_flags &= ~TDF_SINTR;
929
930        CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
931            (void *)td, (long)td->td_proc->p_pid, td->td_name);
932
933        /* Adjust priority if requested. */
934        MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
935        if (pri != 0 && td->td_priority > pri &&
936            PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
937                sched_prio(td, pri);
938
939        /*
940         * Note that thread td might not be sleeping if it is running
941         * sleepq_catch_signals() on another CPU or is blocked on its
942         * proc lock to check signals.  There's no need to mark the
943         * thread runnable in that case.
944         */
945        if (TD_IS_SLEEPING(td)) {
946                TD_CLR_SLEEPING(td);
947                return (setrunnable(td));
948        }
949#else /* __rtems__ */
950        unblock = _Watchdog_Is_active(&thread->Timer);
951        switch (td->td_sq_state) {
952        case TD_SQ_SLEEPING:
953                unblock = true;
954                /* FALLTHROUGH */
955        case TD_SQ_TIRED:
956        case TD_SQ_SLEEPY:
957        case TD_SQ_NIGHTMARE:
958                td->td_sq_state = TD_SQ_WAKEUP;
959                break;
960        default:
961                BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC);
962                break;
963        }
964
965        if (unblock) {
966                Per_CPU_Control *cpu_self;
967
968                cpu_self = _Thread_Dispatch_disable_critical();
969                _Thread_Lock_release_default(thread, &lock_context);
970
971                _Watchdog_Remove_ticks(&thread->Timer);
972                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
973
974                _Thread_Dispatch_enable(cpu_self);
975        } else {
976                _Thread_Lock_release_default(thread, &lock_context);
977        }
978#endif /* __rtems__ */
979        return (0);
980}
981
982#ifdef INVARIANTS
983/*
984 * UMA zone item deallocator.
985 */
986static void
987sleepq_dtor(void *mem, int size, void *arg)
988{
989        struct sleepqueue *sq;
990        int i;
991
992        sq = mem;
993        for (i = 0; i < NR_SLEEPQS; i++) {
994                MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
995                MPASS(sq->sq_blockedcnt[i] == 0);
996        }
997}
998#endif
999
1000/*
1001 * UMA zone item initializer.
1002 */
1003static int
1004sleepq_init(void *mem, int size, int flags)
1005{
1006        struct sleepqueue *sq;
1007        int i;
1008
1009        bzero(mem, size);
1010        sq = mem;
1011        for (i = 0; i < NR_SLEEPQS; i++) {
1012                TAILQ_INIT(&sq->sq_blocked[i]);
1013                sq->sq_blockedcnt[i] = 0;
1014        }
1015        LIST_INIT(&sq->sq_free);
1016        return (0);
1017}
1018
1019/*
1020 * Find the highest priority thread sleeping on a wait channel and resume it.
1021 */
1022int
1023sleepq_signal(void *wchan, int flags, int pri, int queue)
1024{
1025        struct sleepqueue *sq;
1026#ifndef __rtems__
1027        struct thread *td, *besttd;
1028#else /* __rtems__ */
1029        struct thread *besttd;
1030#endif /* __rtems__ */
1031        int wakeup_swapper;
1032
1033        CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
1034        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1035        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1036        sq = sleepq_lookup(wchan);
1037        if (sq == NULL)
1038                return (0);
1039        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1040            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1041
1042#ifndef __rtems__
1043        /*
1044         * Find the highest priority thread on the queue.  If there is a
1045         * tie, use the thread that first appears in the queue as it has
1046         * been sleeping the longest since threads are always added to
1047         * the tail of sleep queues.
1048         */
1049        besttd = NULL;
1050        TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
1051                if (besttd == NULL || td->td_priority < besttd->td_priority)
1052                        besttd = td;
1053        }
1054#else /* __rtems__ */
1055        besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
1056#endif /* __rtems__ */
1057        MPASS(besttd != NULL);
1058        thread_lock(besttd);
1059        wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
1060        thread_unlock(besttd);
1061        return (wakeup_swapper);
1062}
1063
1064/*
1065 * Resume all threads sleeping on a specified wait channel.
1066 */
1067int
1068sleepq_broadcast(void *wchan, int flags, int pri, int queue)
1069{
1070        struct sleepqueue *sq;
1071        struct thread *td, *tdn;
1072        int wakeup_swapper;
1073
1074        CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
1075        KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1076        MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1077        sq = sleepq_lookup(wchan);
1078        if (sq == NULL)
1079                return (0);
1080        KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1081            ("%s: mismatch between sleep/wakeup and cv_*", __func__));
1082
1083        /* Resume all blocked threads on the sleep queue. */
1084        wakeup_swapper = 0;
1085        TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1086                thread_lock(td);
1087                if (sleepq_resume_thread(sq, td, pri))
1088                        wakeup_swapper = 1;
1089                thread_unlock(td);
1090        }
1091        return (wakeup_swapper);
1092}
1093
1094#ifndef __rtems__
1095/*
1096 * Time sleeping threads out.  When the timeout expires, the thread is
1097 * removed from the sleep queue and made runnable if it is still asleep.
1098 */
1099static void
1100sleepq_timeout(void *arg)
1101{
1102        struct sleepqueue_chain *sc;
1103        struct sleepqueue *sq;
1104        struct thread *td;
1105        void *wchan;
1106        int wakeup_swapper;
1107
1108        td = arg;
1109        wakeup_swapper = 0;
1110        CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1111            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1112
1113        /*
1114         * First, see if the thread is asleep and get the wait channel if
1115         * it is.
1116         */
1117        thread_lock(td);
1118        if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1119                wchan = td->td_wchan;
1120                sc = SC_LOOKUP(wchan);
1121                THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1122                sq = sleepq_lookup(wchan);
1123                MPASS(sq != NULL);
1124                td->td_flags |= TDF_TIMEOUT;
1125                wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1126                thread_unlock(td);
1127                if (wakeup_swapper)
1128                        kick_proc0();
1129                return;
1130        }
1131
1132        /*
1133         * If the thread is on the SLEEPQ but isn't sleeping yet, it
1134         * can either be on another CPU in between sleepq_add() and
1135         * one of the sleepq_*wait*() routines or it can be in
1136         * sleepq_catch_signals().
1137         */
1138        if (TD_ON_SLEEPQ(td)) {
1139                td->td_flags |= TDF_TIMEOUT;
1140                thread_unlock(td);
1141                return;
1142        }
1143
1144        /*
1145         * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
1146         * then the other thread has already yielded to us, so clear
1147         * the flag and resume it.  If TDF_TIMEOUT is not set, then the
1148         * we know that the other thread is not on a sleep queue, but it
1149         * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
1150         * to let it know that the timeout has already run and doesn't
1151         * need to be canceled.
1152         */
1153        if (td->td_flags & TDF_TIMEOUT) {
1154                MPASS(TD_IS_SLEEPING(td));
1155                td->td_flags &= ~TDF_TIMEOUT;
1156                TD_CLR_SLEEPING(td);
1157                wakeup_swapper = setrunnable(td);
1158        } else
1159                td->td_flags |= TDF_TIMOFAIL;
1160        thread_unlock(td);
1161        if (wakeup_swapper)
1162                kick_proc0();
1163}
1164#else /* __rtems__ */
1165static void
1166sleepq_timeout(Objects_Id id, void *arg)
1167{
1168        Thread_Control *thread;
1169        struct thread *td;
1170        ISR_lock_Context lock_context;
1171        bool unblock;
1172
1173        thread = arg;
1174        td = rtems_bsd_get_thread(thread);
1175        BSD_ASSERT(td != NULL);
1176
1177        _Thread_Lock_acquire_default(thread, &lock_context);
1178
1179        unblock = false;
1180        switch (td->td_sq_state) {
1181        case TD_SQ_SLEEPING:
1182                unblock = true;
1183                /* Fall through */
1184        case TD_SQ_TIRED:
1185        case TD_SQ_SLEEPY:
1186                td->td_sq_state = TD_SQ_NIGHTMARE;
1187                break;
1188        default:
1189                BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
1190                break;
1191        }
1192
1193        if (unblock) {
1194                Per_CPU_Control *cpu_self;
1195
1196                cpu_self = _Thread_Dispatch_disable_critical();
1197                _Thread_Lock_release_default(thread, &lock_context);
1198
1199                _Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
1200
1201                _Thread_Dispatch_enable(cpu_self);
1202        } else {
1203                _Thread_Lock_release_default(thread, &lock_context);
1204        }
1205}
1206#endif /* __rtems__ */
1207
1208/*
1209 * Resumes a specific thread from the sleep queue associated with a specific
1210 * wait channel if it is on that queue.
1211 */
1212void
1213sleepq_remove(struct thread *td, void *wchan)
1214{
1215        struct sleepqueue *sq;
1216        int wakeup_swapper;
1217
1218        /*
1219         * Look up the sleep queue for this wait channel, then re-check
1220         * that the thread is asleep on that channel, if it is not, then
1221         * bail.
1222         */
1223        MPASS(wchan != NULL);
1224        sleepq_lock(wchan);
1225        sq = sleepq_lookup(wchan);
1226        /*
1227         * We can not lock the thread here as it may be sleeping on a
1228         * different sleepq.  However, holding the sleepq lock for this
1229         * wchan can guarantee that we do not miss a wakeup for this
1230         * channel.  The asserts below will catch any false positives.
1231         */
1232        if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1233                sleepq_release(wchan);
1234                return;
1235        }
1236        /* Thread is asleep on sleep queue sq, so wake it up. */
1237        thread_lock(td);
1238        MPASS(sq != NULL);
1239        MPASS(td->td_wchan == wchan);
1240        wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1241        thread_unlock(td);
1242        sleepq_release(wchan);
1243        if (wakeup_swapper)
1244                kick_proc0();
1245}
1246
1247#ifndef __rtems__
1248/*
1249 * Abort a thread as if an interrupt had occurred.  Only abort
1250 * interruptible waits (unfortunately it isn't safe to abort others).
1251 */
1252int
1253sleepq_abort(struct thread *td, int intrval)
1254{
1255        struct sleepqueue *sq;
1256        void *wchan;
1257
1258        THREAD_LOCK_ASSERT(td, MA_OWNED);
1259        MPASS(TD_ON_SLEEPQ(td));
1260        MPASS(td->td_flags & TDF_SINTR);
1261        MPASS(intrval == EINTR || intrval == ERESTART);
1262
1263        /*
1264         * If the TDF_TIMEOUT flag is set, just leave. A
1265         * timeout is scheduled anyhow.
1266         */
1267        if (td->td_flags & TDF_TIMEOUT)
1268                return (0);
1269
1270        CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1271            (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1272        td->td_intrval = intrval;
1273        td->td_flags |= TDF_SLEEPABORT;
1274        /*
1275         * If the thread has not slept yet it will find the signal in
1276         * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1277         * we have to do it here.
1278         */
1279        if (!TD_IS_SLEEPING(td))
1280                return (0);
1281        wchan = td->td_wchan;
1282        MPASS(wchan != NULL);
1283        sq = sleepq_lookup(wchan);
1284        MPASS(sq != NULL);
1285
1286        /* Thread is asleep on sleep queue sq, so wake it up. */
1287        return (sleepq_resume_thread(sq, td, 0));
1288}
1289#endif /* __rtems__ */
1290
1291#ifdef SLEEPQUEUE_PROFILING
1292#define SLEEPQ_PROF_LOCATIONS   1024
1293#define SLEEPQ_SBUFSIZE         512
1294struct sleepq_prof {
1295        LIST_ENTRY(sleepq_prof) sp_link;
1296        const char      *sp_wmesg;
1297        long            sp_count;
1298};
1299
1300LIST_HEAD(sqphead, sleepq_prof);
1301
1302struct sqphead sleepq_prof_free;
1303struct sqphead sleepq_hash[SC_TABLESIZE];
1304static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1305static struct mtx sleepq_prof_lock;
1306MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1307
1308static void
1309sleepq_profile(const char *wmesg)
1310{
1311        struct sleepq_prof *sp;
1312
1313        mtx_lock_spin(&sleepq_prof_lock);
1314        if (prof_enabled == 0)
1315                goto unlock;
1316        LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1317                if (sp->sp_wmesg == wmesg)
1318                        goto done;
1319        sp = LIST_FIRST(&sleepq_prof_free);
1320        if (sp == NULL)
1321                goto unlock;
1322        sp->sp_wmesg = wmesg;
1323        LIST_REMOVE(sp, sp_link);
1324        LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1325done:
1326        sp->sp_count++;
1327unlock:
1328        mtx_unlock_spin(&sleepq_prof_lock);
1329        return;
1330}
1331
1332static void
1333sleepq_prof_reset(void)
1334{
1335        struct sleepq_prof *sp;
1336        int enabled;
1337        int i;
1338
1339        mtx_lock_spin(&sleepq_prof_lock);
1340        enabled = prof_enabled;
1341        prof_enabled = 0;
1342        for (i = 0; i < SC_TABLESIZE; i++)
1343                LIST_INIT(&sleepq_hash[i]);
1344        LIST_INIT(&sleepq_prof_free);
1345        for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1346                sp = &sleepq_profent[i];
1347                sp->sp_wmesg = NULL;
1348                sp->sp_count = 0;
1349                LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1350        }
1351        prof_enabled = enabled;
1352        mtx_unlock_spin(&sleepq_prof_lock);
1353}
1354
1355static int
1356enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1357{
1358        int error, v;
1359
1360        v = prof_enabled;
1361        error = sysctl_handle_int(oidp, &v, v, req);
1362        if (error)
1363                return (error);
1364        if (req->newptr == NULL)
1365                return (error);
1366        if (v == prof_enabled)
1367                return (0);
1368        if (v == 1)
1369                sleepq_prof_reset();
1370        mtx_lock_spin(&sleepq_prof_lock);
1371        prof_enabled = !!v;
1372        mtx_unlock_spin(&sleepq_prof_lock);
1373
1374        return (0);
1375}
1376
1377static int
1378reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1379{
1380        int error, v;
1381
1382        v = 0;
1383        error = sysctl_handle_int(oidp, &v, 0, req);
1384        if (error)
1385                return (error);
1386        if (req->newptr == NULL)
1387                return (error);
1388        if (v == 0)
1389                return (0);
1390        sleepq_prof_reset();
1391
1392        return (0);
1393}
1394
1395static int
1396dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1397{
1398        struct sleepq_prof *sp;
1399        struct sbuf *sb;
1400        int enabled;
1401        int error;
1402        int i;
1403
1404        error = sysctl_wire_old_buffer(req, 0);
1405        if (error != 0)
1406                return (error);
1407        sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1408        sbuf_printf(sb, "\nwmesg\tcount\n");
1409        enabled = prof_enabled;
1410        mtx_lock_spin(&sleepq_prof_lock);
1411        prof_enabled = 0;
1412        mtx_unlock_spin(&sleepq_prof_lock);
1413        for (i = 0; i < SC_TABLESIZE; i++) {
1414                LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1415                        sbuf_printf(sb, "%s\t%ld\n",
1416                            sp->sp_wmesg, sp->sp_count);
1417                }
1418        }
1419        mtx_lock_spin(&sleepq_prof_lock);
1420        prof_enabled = enabled;
1421        mtx_unlock_spin(&sleepq_prof_lock);
1422
1423        error = sbuf_finish(sb);
1424        sbuf_delete(sb);
1425        return (error);
1426}
1427
1428SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1429    NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1430SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1431    NULL, 0, reset_sleepq_prof_stats, "I",
1432    "Reset sleepqueue profiling statistics");
1433SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1434    NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1435#endif
1436
1437#ifdef DDB
1438DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1439{
1440        struct sleepqueue_chain *sc;
1441        struct sleepqueue *sq;
1442#ifdef INVARIANTS
1443        struct lock_object *lock;
1444#endif
1445        struct thread *td;
1446        void *wchan;
1447        int i;
1448
1449        if (!have_addr)
1450                return;
1451
1452        /*
1453         * First, see if there is an active sleep queue for the wait channel
1454         * indicated by the address.
1455         */
1456        wchan = (void *)addr;
1457        sc = SC_LOOKUP(wchan);
1458        LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1459                if (sq->sq_wchan == wchan)
1460                        goto found;
1461
1462        /*
1463         * Second, see if there is an active sleep queue at the address
1464         * indicated.
1465         */
1466        for (i = 0; i < SC_TABLESIZE; i++)
1467                LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1468                        if (sq == (struct sleepqueue *)addr)
1469                                goto found;
1470                }
1471
1472        db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1473        return;
1474found:
1475        db_printf("Wait channel: %p\n", sq->sq_wchan);
1476        db_printf("Queue type: %d\n", sq->sq_type);
1477#ifdef INVARIANTS
1478        if (sq->sq_lock) {
1479                lock = sq->sq_lock;
1480                db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1481                    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1482        }
1483#endif
1484        db_printf("Blocked threads:\n");
1485        for (i = 0; i < NR_SLEEPQS; i++) {
1486                db_printf("\nQueue[%d]:\n", i);
1487                if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1488                        db_printf("\tempty\n");
1489                else
1490                        TAILQ_FOREACH(td, &sq->sq_blocked[0],
1491                                      td_slpq) {
1492                                db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1493                                          td->td_tid, td->td_proc->p_pid,
1494                                          td->td_name);
1495                        }
1496                db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1497        }
1498}
1499
1500/* Alias 'show sleepqueue' to 'show sleepq'. */
1501DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1502#endif
Note: See TracBrowser for help on using the repository browser.