source: rtems-libbsd/freebsd/sys/kern/kern_intr.c

6-freebsd-12
Last change on this file was e56b5cb, checked in by Chris Johns <chrisj@…>, on 07/21/21 at 07:59:40

kern: Add kernel trace support (KTR)

Update #4475

  • Property mode set to 100644
File size: 40.9 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <rtems/bsd/local/opt_ddb.h>
35#include <rtems/bsd/local/opt_kstack_usage_prof.h>
36
37#include <sys/param.h>
38#include <sys/bus.h>
39#include <sys/conf.h>
40#include <sys/cpuset.h>
41#include <sys/rtprio.h>
42#include <sys/systm.h>
43#include <sys/interrupt.h>
44#include <sys/kernel.h>
45#include <sys/kthread.h>
46#include <sys/ktr.h>
47#include <sys/limits.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mutex.h>
51#include <sys/priv.h>
52#include <sys/proc.h>
53#include <sys/random.h>
54#include <sys/resourcevar.h>
55#include <sys/sched.h>
56#include <sys/smp.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <rtems/bsd/sys/unistd.h>
60#include <sys/vmmeter.h>
61#include <machine/atomic.h>
62#include <machine/cpu.h>
63#ifndef __rtems__
64#include <machine/md_var.h>
65#else /* __rtems__ */
66  #include <machine/rtems-bsd-thread.h>
67  #define RTEMSBSD_SWI_WAKEUP_EVENT RTEMS_EVENT_31
68  #include <rtems/score/threadimpl.h>
69#endif /* __rtems__ */
70#include <machine/stdarg.h>
71#ifdef DDB
72#include <ddb/ddb.h>
73#include <ddb/db_sym.h>
74#endif
75
76/*
77 * Describe an interrupt thread.  There is one of these per interrupt event.
78 */
79struct intr_thread {
80        struct intr_event *it_event;
81        struct thread *it_thread;       /* Kernel thread. */
82        int     it_flags;               /* (j) IT_* flags. */
83        int     it_need;                /* Needs service. */
84};
85
86/* Interrupt thread flags kept in it_flags */
87#define IT_DEAD         0x000001        /* Thread is waiting to exit. */
88#define IT_WAIT         0x000002        /* Thread is waiting for completion. */
89
90struct  intr_entropy {
91        struct  thread *td;
92        uintptr_t event;
93};
94
95struct  intr_event *clk_intr_event;
96#ifndef __rtems__
97struct  intr_event *tty_intr_event;
98void    *vm_ih;
99struct proc *intrproc;
100#endif /* __rtems__ */
101
102static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
103
104static int intr_storm_threshold = 1000;
105SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
106    &intr_storm_threshold, 0,
107    "Number of consecutive interrupts before storm protection is enabled");
108static TAILQ_HEAD(, intr_event) event_list =
109    TAILQ_HEAD_INITIALIZER(event_list);
110static struct mtx event_lock;
111MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
112
113static void     intr_event_update(struct intr_event *ie);
114static int      intr_event_schedule_thread(struct intr_event *ie);
115static struct intr_thread *ithread_create(const char *name);
116#ifndef __rtems__
117static void     ithread_destroy(struct intr_thread *ithread);
118#endif /* __rtems__ */
119static void     ithread_execute_handlers(struct proc *p,
120                    struct intr_event *ie);
121static void     ithread_loop(void *);
122static void     ithread_update(struct intr_thread *ithd);
123#ifndef __rtems__
124static void     start_softintr(void *);
125
126/* Map an interrupt type to an ithread priority. */
127u_char
128intr_priority(enum intr_type flags)
129{
130        u_char pri;
131
132        flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
133            INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
134        switch (flags) {
135        case INTR_TYPE_TTY:
136                pri = PI_TTY;
137                break;
138        case INTR_TYPE_BIO:
139                pri = PI_DISK;
140                break;
141        case INTR_TYPE_NET:
142                pri = PI_NET;
143                break;
144        case INTR_TYPE_CAM:
145                pri = PI_DISK;
146                break;
147        case INTR_TYPE_AV:
148                pri = PI_AV;
149                break;
150        case INTR_TYPE_CLK:
151                pri = PI_REALTIME;
152                break;
153        case INTR_TYPE_MISC:
154                pri = PI_DULL;          /* don't care */
155                break;
156        default:
157                /* We didn't specify an interrupt level. */
158                panic("intr_priority: no interrupt type in flags");
159        }
160
161        return pri;
162}
163
164#endif /* __rtems__ */
165/*
166 * Update an ithread based on the associated intr_event.
167 */
168static void
169ithread_update(struct intr_thread *ithd)
170{
171        struct intr_event *ie;
172        struct thread *td;
173        u_char pri;
174
175        ie = ithd->it_event;
176        td = ithd->it_thread;
177        mtx_assert(&ie->ie_lock, MA_OWNED);
178
179        /* Determine the overall priority of this event. */
180        if (CK_SLIST_EMPTY(&ie->ie_handlers))
181                pri = PRI_MAX_ITHD;
182        else
183                pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
184
185        /* Update name and priority. */
186#ifndef __rtems__
187        strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
188#else /* __rtems__ */
189        _Thread_Set_name(td->td_thread, ie->ie_fullname);
190#endif /* __rtems__ */
191#ifdef KTR
192#ifndef __rtems__
193        sched_clear_tdname(td);
194#endif /* __rtems__ */
195#endif
196        thread_lock(td);
197#ifndef __rtems__
198        sched_prio(td, pri);
199#else /* __rtems__ */
200#warning TODO: set thread priority
201#endif /* __rtems__ */
202        thread_unlock(td);
203}
204
205/*
206 * Regenerate the full name of an interrupt event and update its priority.
207 */
208static void
209intr_event_update(struct intr_event *ie)
210{
211        struct intr_handler *ih;
212        char *last;
213        int missed, space;
214
215        /* Start off with no entropy and just the name of the event. */
216        mtx_assert(&ie->ie_lock, MA_OWNED);
217        strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
218        ie->ie_flags &= ~IE_ENTROPY;
219        missed = 0;
220        space = 1;
221
222        /* Run through all the handlers updating values. */
223        CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
224                if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
225                    sizeof(ie->ie_fullname)) {
226                        strcat(ie->ie_fullname, " ");
227                        strcat(ie->ie_fullname, ih->ih_name);
228                        space = 0;
229                } else
230                        missed++;
231                if (ih->ih_flags & IH_ENTROPY)
232                        ie->ie_flags |= IE_ENTROPY;
233        }
234
235        /*
236         * If there is only one handler and its name is too long, just copy in
237         * as much of the end of the name (includes the unit number) as will
238         * fit.  Otherwise, we have multiple handlers and not all of the names
239         * will fit.  Add +'s to indicate missing names.  If we run out of room
240         * and still have +'s to add, change the last character from a + to a *.
241         */
242        if (missed == 1 && space == 1) {
243                ih = CK_SLIST_FIRST(&ie->ie_handlers);
244                missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
245                    sizeof(ie->ie_fullname);
246                strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
247                strcat(ie->ie_fullname, &ih->ih_name[missed]);
248                missed = 0;
249        }
250        last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
251        while (missed-- > 0) {
252                if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
253                        if (*last == '+') {
254                                *last = '*';
255                                break;
256                        } else
257                                *last = '+';
258                } else if (space) {
259                        strcat(ie->ie_fullname, " +");
260                        space = 0;
261                } else
262                        strcat(ie->ie_fullname, "+");
263        }
264
265        /*
266         * If this event has an ithread, update it's priority and
267         * name.
268         */
269        if (ie->ie_thread != NULL)
270                ithread_update(ie->ie_thread);
271        CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
272}
273
274int
275intr_event_create(struct intr_event **event, void *source, int flags, int irq,
276    void (*pre_ithread)(void *), void (*post_ithread)(void *),
277    void (*post_filter)(void *), int (*assign_cpu)(void *, int),
278    const char *fmt, ...)
279{
280        struct intr_event *ie;
281        va_list ap;
282
283        /* The only valid flag during creation is IE_SOFT. */
284        if ((flags & ~IE_SOFT) != 0)
285                return (EINVAL);
286        ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
287        ie->ie_source = source;
288        ie->ie_pre_ithread = pre_ithread;
289        ie->ie_post_ithread = post_ithread;
290        ie->ie_post_filter = post_filter;
291        ie->ie_assign_cpu = assign_cpu;
292        ie->ie_flags = flags;
293        ie->ie_irq = irq;
294        ie->ie_cpu = NOCPU;
295        CK_SLIST_INIT(&ie->ie_handlers);
296        mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
297
298        va_start(ap, fmt);
299        vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
300        va_end(ap);
301        strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
302        mtx_lock(&event_lock);
303        TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
304        mtx_unlock(&event_lock);
305        if (event != NULL)
306                *event = ie;
307        CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
308        return (0);
309}
310
311#ifndef __rtems__
312/*
313 * Bind an interrupt event to the specified CPU.  Note that not all
314 * platforms support binding an interrupt to a CPU.  For those
315 * platforms this request will fail.  Using a cpu id of NOCPU unbinds
316 * the interrupt event.
317 */
318static int
319_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
320{
321        lwpid_t id;
322        int error;
323
324        /* Need a CPU to bind to. */
325        if (cpu != NOCPU && CPU_ABSENT(cpu))
326                return (EINVAL);
327
328        if (ie->ie_assign_cpu == NULL)
329                return (EOPNOTSUPP);
330
331        error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
332        if (error)
333                return (error);
334
335        /*
336         * If we have any ithreads try to set their mask first to verify
337         * permissions, etc.
338         */
339        if (bindithread) {
340                mtx_lock(&ie->ie_lock);
341                if (ie->ie_thread != NULL) {
342                        id = ie->ie_thread->it_thread->td_tid;
343                        mtx_unlock(&ie->ie_lock);
344                        error = cpuset_setithread(id, cpu);
345                        if (error)
346                                return (error);
347                } else
348                        mtx_unlock(&ie->ie_lock);
349        }
350        if (bindirq)
351                error = ie->ie_assign_cpu(ie->ie_source, cpu);
352        if (error) {
353                if (bindithread) {
354                        mtx_lock(&ie->ie_lock);
355                        if (ie->ie_thread != NULL) {
356                                cpu = ie->ie_cpu;
357                                id = ie->ie_thread->it_thread->td_tid;
358                                mtx_unlock(&ie->ie_lock);
359                                (void)cpuset_setithread(id, cpu);
360                        } else
361                                mtx_unlock(&ie->ie_lock);
362                }
363                return (error);
364        }
365
366        if (bindirq) {
367                mtx_lock(&ie->ie_lock);
368                ie->ie_cpu = cpu;
369                mtx_unlock(&ie->ie_lock);
370        }
371
372        return (error);
373}
374
375/*
376 * Bind an interrupt event to the specified CPU.  For supported platforms, any
377 * associated ithreads as well as the primary interrupt context will be bound
378 * to the specificed CPU.
379 */
380int
381intr_event_bind(struct intr_event *ie, int cpu)
382{
383
384        return (_intr_event_bind(ie, cpu, true, true));
385}
386
387/*
388 * Bind an interrupt event to the specified CPU, but do not bind associated
389 * ithreads.
390 */
391int
392intr_event_bind_irqonly(struct intr_event *ie, int cpu)
393{
394
395        return (_intr_event_bind(ie, cpu, true, false));
396}
397
398/*
399 * Bind an interrupt event's ithread to the specified CPU.
400 */
401int
402intr_event_bind_ithread(struct intr_event *ie, int cpu)
403{
404
405        return (_intr_event_bind(ie, cpu, false, true));
406}
407
408static struct intr_event *
409intr_lookup(int irq)
410{
411        struct intr_event *ie;
412
413        mtx_lock(&event_lock);
414        TAILQ_FOREACH(ie, &event_list, ie_list)
415                if (ie->ie_irq == irq &&
416                    (ie->ie_flags & IE_SOFT) == 0 &&
417                    CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
418                        break;
419        mtx_unlock(&event_lock);
420        return (ie);
421}
422
423int
424intr_setaffinity(int irq, int mode, void *m)
425{
426        struct intr_event *ie;
427        cpuset_t *mask;
428        int cpu, n;
429
430        mask = m;
431        cpu = NOCPU;
432        /*
433         * If we're setting all cpus we can unbind.  Otherwise make sure
434         * only one cpu is in the set.
435         */
436        if (CPU_CMP(cpuset_root, mask)) {
437                for (n = 0; n < CPU_SETSIZE; n++) {
438                        if (!CPU_ISSET(n, mask))
439                                continue;
440                        if (cpu != NOCPU)
441                                return (EINVAL);
442                        cpu = n;
443                }
444        }
445        ie = intr_lookup(irq);
446        if (ie == NULL)
447                return (ESRCH);
448        switch (mode) {
449        case CPU_WHICH_IRQ:
450                return (intr_event_bind(ie, cpu));
451        case CPU_WHICH_INTRHANDLER:
452                return (intr_event_bind_irqonly(ie, cpu));
453        case CPU_WHICH_ITHREAD:
454                return (intr_event_bind_ithread(ie, cpu));
455        default:
456                return (EINVAL);
457        }
458}
459
460int
461intr_getaffinity(int irq, int mode, void *m)
462{
463        struct intr_event *ie;
464        struct thread *td;
465        struct proc *p;
466        cpuset_t *mask;
467        lwpid_t id;
468        int error;
469
470        mask = m;
471        ie = intr_lookup(irq);
472        if (ie == NULL)
473                return (ESRCH);
474
475        error = 0;
476        CPU_ZERO(mask);
477        switch (mode) {
478        case CPU_WHICH_IRQ:
479        case CPU_WHICH_INTRHANDLER:
480                mtx_lock(&ie->ie_lock);
481                if (ie->ie_cpu == NOCPU)
482                        CPU_COPY(cpuset_root, mask);
483                else
484                        CPU_SET(ie->ie_cpu, mask);
485                mtx_unlock(&ie->ie_lock);
486                break;
487        case CPU_WHICH_ITHREAD:
488                mtx_lock(&ie->ie_lock);
489                if (ie->ie_thread == NULL) {
490                        mtx_unlock(&ie->ie_lock);
491                        CPU_COPY(cpuset_root, mask);
492                } else {
493                        id = ie->ie_thread->it_thread->td_tid;
494                        mtx_unlock(&ie->ie_lock);
495                        error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
496                        if (error != 0)
497                                return (error);
498                        CPU_COPY(&td->td_cpuset->cs_mask, mask);
499                        PROC_UNLOCK(p);
500                }
501        default:
502                return (EINVAL);
503        }
504        return (0);
505}
506
507int
508intr_event_destroy(struct intr_event *ie)
509{
510
511        mtx_lock(&event_lock);
512        mtx_lock(&ie->ie_lock);
513        if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
514                mtx_unlock(&ie->ie_lock);
515                mtx_unlock(&event_lock);
516                return (EBUSY);
517        }
518        TAILQ_REMOVE(&event_list, ie, ie_list);
519#ifndef notyet
520        if (ie->ie_thread != NULL) {
521                ithread_destroy(ie->ie_thread);
522                ie->ie_thread = NULL;
523        }
524#endif
525        mtx_unlock(&ie->ie_lock);
526        mtx_unlock(&event_lock);
527        mtx_destroy(&ie->ie_lock);
528        free(ie, M_ITHREAD);
529        return (0);
530}
531
532#endif /* __rtems__ */
533static struct intr_thread *
534ithread_create(const char *name)
535{
536#ifdef __rtems__
537        struct proc *intrproc;
538#endif /* __rtems__ */
539        struct intr_thread *ithd;
540        struct thread *td;
541        int error;
542
543        ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
544
545        error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
546                    &td, RFSTOPPED | RFHIGHPID,
547                    0, "intr", "%s", name);
548        if (error)
549                panic("kproc_create() failed with %d", error);
550        thread_lock(td);
551#ifndef __rtems__
552        sched_class(td, PRI_ITHD);
553        TD_SET_IWAIT(td);
554#endif /* __rtems__ */
555        thread_unlock(td);
556#ifndef __rtems__
557        td->td_pflags |= TDP_ITHREAD;
558#endif /* __rtems__ */
559        ithd->it_thread = td;
560        CTR2(KTR_INTR, "%s: created %s", __func__, name);
561        return (ithd);
562}
563#ifndef __rtems__
564
565static void
566ithread_destroy(struct intr_thread *ithread)
567{
568        struct thread *td;
569
570        CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
571        td = ithread->it_thread;
572        thread_lock(td);
573        ithread->it_flags |= IT_DEAD;
574        if (TD_AWAITING_INTR(td)) {
575                TD_CLR_IWAIT(td);
576                sched_add(td, SRQ_INTR);
577        }
578        thread_unlock(td);
579}
580
581#endif /* __rtems__ */
582int
583intr_event_add_handler(struct intr_event *ie, const char *name,
584    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
585    enum intr_type flags, void **cookiep)
586{
587        struct intr_handler *ih, *temp_ih;
588        struct intr_handler **prevptr;
589        struct intr_thread *it;
590
591        if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
592                return (EINVAL);
593
594        /* Allocate and populate an interrupt handler structure. */
595        ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
596        ih->ih_filter = filter;
597        ih->ih_handler = handler;
598        ih->ih_argument = arg;
599        strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
600        ih->ih_event = ie;
601        ih->ih_pri = pri;
602        if (flags & INTR_EXCL)
603                ih->ih_flags = IH_EXCLUSIVE;
604        if (flags & INTR_MPSAFE)
605                ih->ih_flags |= IH_MPSAFE;
606        if (flags & INTR_ENTROPY)
607                ih->ih_flags |= IH_ENTROPY;
608
609        /* We can only have one exclusive handler in a event. */
610        mtx_lock(&ie->ie_lock);
611        if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
612                if ((flags & INTR_EXCL) ||
613                    (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
614                        mtx_unlock(&ie->ie_lock);
615                        free(ih, M_ITHREAD);
616                        return (EINVAL);
617                }
618        }
619
620        /* Create a thread if we need one. */
621        while (ie->ie_thread == NULL && handler != NULL) {
622                if (ie->ie_flags & IE_ADDING_THREAD)
623                        msleep(ie, &ie->ie_lock, 0, "ithread", 0);
624                else {
625                        ie->ie_flags |= IE_ADDING_THREAD;
626                        mtx_unlock(&ie->ie_lock);
627                        it = ithread_create("intr: newborn");
628                        mtx_lock(&ie->ie_lock);
629                        ie->ie_flags &= ~IE_ADDING_THREAD;
630                        ie->ie_thread = it;
631                        it->it_event = ie;
632                        ithread_update(it);
633                        wakeup(ie);
634                }
635        }
636
637        /* Add the new handler to the event in priority order. */
638        CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
639                if (temp_ih->ih_pri > ih->ih_pri)
640                        break;
641        }
642        CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
643
644        intr_event_update(ie);
645
646        CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
647            ie->ie_name);
648        mtx_unlock(&ie->ie_lock);
649
650        if (cookiep != NULL)
651                *cookiep = ih;
652        return (0);
653}
654
655#ifndef __rtems__
656/*
657 * Append a description preceded by a ':' to the name of the specified
658 * interrupt handler.
659 */
660int
661intr_event_describe_handler(struct intr_event *ie, void *cookie,
662    const char *descr)
663{
664        struct intr_handler *ih;
665        size_t space;
666        char *start;
667
668        mtx_lock(&ie->ie_lock);
669#ifdef INVARIANTS
670        CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
671                if (ih == cookie)
672                        break;
673        }
674        if (ih == NULL) {
675                mtx_unlock(&ie->ie_lock);
676                panic("handler %p not found in interrupt event %p", cookie, ie);
677        }
678#endif
679        ih = cookie;
680
681        /*
682         * Look for an existing description by checking for an
683         * existing ":".  This assumes device names do not include
684         * colons.  If one is found, prepare to insert the new
685         * description at that point.  If one is not found, find the
686         * end of the name to use as the insertion point.
687         */
688        start = strchr(ih->ih_name, ':');
689        if (start == NULL)
690                start = strchr(ih->ih_name, 0);
691
692        /*
693         * See if there is enough remaining room in the string for the
694         * description + ":".  The "- 1" leaves room for the trailing
695         * '\0'.  The "+ 1" accounts for the colon.
696         */
697        space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
698        if (strlen(descr) + 1 > space) {
699                mtx_unlock(&ie->ie_lock);
700                return (ENOSPC);
701        }
702
703        /* Append a colon followed by the description. */
704        *start = ':';
705        strcpy(start + 1, descr);
706        intr_event_update(ie);
707        mtx_unlock(&ie->ie_lock);
708        return (0);
709}
710
711/*
712 * Return the ie_source field from the intr_event an intr_handler is
713 * associated with.
714 */
715void *
716intr_handler_source(void *cookie)
717{
718        struct intr_handler *ih;
719        struct intr_event *ie;
720
721        ih = (struct intr_handler *)cookie;
722        if (ih == NULL)
723                return (NULL);
724        ie = ih->ih_event;
725        KASSERT(ie != NULL,
726            ("interrupt handler \"%s\" has a NULL interrupt event",
727            ih->ih_name));
728        return (ie->ie_source);
729}
730
731/*
732 * If intr_event_handle() is running in the ISR context at the time of the call,
733 * then wait for it to complete.
734 */
735static void
736intr_event_barrier(struct intr_event *ie)
737{
738        int phase;
739
740        mtx_assert(&ie->ie_lock, MA_OWNED);
741        phase = ie->ie_phase;
742
743        /*
744         * Switch phase to direct future interrupts to the other active counter.
745         * Make sure that any preceding stores are visible before the switch.
746         */
747        KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
748        atomic_store_rel_int(&ie->ie_phase, !phase);
749
750        /*
751         * This code cooperates with wait-free iteration of ie_handlers
752         * in intr_event_handle.
753         * Make sure that the removal and the phase update are not reordered
754         * with the active count check.
755         * Note that no combination of acquire and release fences can provide
756         * that guarantee as Store->Load sequences can always be reordered.
757         */
758        atomic_thread_fence_seq_cst();
759
760        /*
761         * Now wait on the inactive phase.
762         * The acquire fence is needed so that that all post-barrier accesses
763         * are after the check.
764         */
765        while (ie->ie_active[phase] > 0)
766                cpu_spinwait();
767        atomic_thread_fence_acq();
768}
769
770static void
771intr_handler_barrier(struct intr_handler *handler)
772{
773        struct intr_event *ie;
774
775        ie = handler->ih_event;
776        mtx_assert(&ie->ie_lock, MA_OWNED);
777        KASSERT((handler->ih_flags & IH_DEAD) == 0,
778            ("update for a removed handler"));
779
780        if (ie->ie_thread == NULL) {
781                intr_event_barrier(ie);
782                return;
783        }
784        if ((handler->ih_flags & IH_CHANGED) == 0) {
785                handler->ih_flags |= IH_CHANGED;
786                intr_event_schedule_thread(ie);
787        }
788        while ((handler->ih_flags & IH_CHANGED) != 0)
789                msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
790}
791
792/*
793 * Sleep until an ithread finishes executing an interrupt handler.
794 *
795 * XXX Doesn't currently handle interrupt filters or fast interrupt
796 * handlers.  This is intended for compatibility with linux drivers
797 * only.  Do not use in BSD code.
798 */
799void
800_intr_drain(int irq)
801{
802        struct intr_event *ie;
803        struct intr_thread *ithd;
804        struct thread *td;
805
806        ie = intr_lookup(irq);
807        if (ie == NULL)
808                return;
809        if (ie->ie_thread == NULL)
810                return;
811        ithd = ie->ie_thread;
812        td = ithd->it_thread;
813        /*
814         * We set the flag and wait for it to be cleared to avoid
815         * long delays with potentially busy interrupt handlers
816         * were we to only sample TD_AWAITING_INTR() every tick.
817         */
818        thread_lock(td);
819        if (!TD_AWAITING_INTR(td)) {
820                ithd->it_flags |= IT_WAIT;
821                while (ithd->it_flags & IT_WAIT) {
822                        thread_unlock(td);
823                        pause("idrain", 1);
824                        thread_lock(td);
825                }
826        }
827        thread_unlock(td);
828        return;
829}
830
831int
832intr_event_remove_handler(void *cookie)
833{
834        struct intr_handler *handler = (struct intr_handler *)cookie;
835        struct intr_event *ie;
836        struct intr_handler *ih;
837        struct intr_handler **prevptr;
838#ifdef notyet
839        int dead;
840#endif
841
842        if (handler == NULL)
843                return (EINVAL);
844        ie = handler->ih_event;
845        KASSERT(ie != NULL,
846            ("interrupt handler \"%s\" has a NULL interrupt event",
847            handler->ih_name));
848
849        mtx_lock(&ie->ie_lock);
850        CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
851            ie->ie_name);
852        CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
853                if (ih == handler)
854                        break;
855        }
856        if (ih == NULL) {
857                panic("interrupt handler \"%s\" not found in "
858                    "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
859        }
860
861        /*
862         * If there is no ithread, then directly remove the handler.  Note that
863         * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
864         * care needs to be taken to keep ie_handlers consistent and to free
865         * the removed handler only when ie_handlers is quiescent.
866         */
867        if (ie->ie_thread == NULL) {
868                CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
869                intr_event_barrier(ie);
870                intr_event_update(ie);
871                mtx_unlock(&ie->ie_lock);
872                free(handler, M_ITHREAD);
873                return (0);
874        }
875
876        /*
877         * Let the interrupt thread do the job.
878         * The interrupt source is disabled when the interrupt thread is
879         * running, so it does not have to worry about interaction with
880         * intr_event_handle().
881         */
882        KASSERT((handler->ih_flags & IH_DEAD) == 0,
883            ("duplicate handle remove"));
884        handler->ih_flags |= IH_DEAD;
885        intr_event_schedule_thread(ie);
886        while (handler->ih_flags & IH_DEAD)
887                msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
888        intr_event_update(ie);
889
890#ifdef notyet
891        /*
892         * XXX: This could be bad in the case of ppbus(8).  Also, I think
893         * this could lead to races of stale data when servicing an
894         * interrupt.
895         */
896        dead = 1;
897        CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
898                if (ih->ih_handler != NULL) {
899                        dead = 0;
900                        break;
901                }
902        }
903        if (dead) {
904                ithread_destroy(ie->ie_thread);
905                ie->ie_thread = NULL;
906        }
907#endif
908        mtx_unlock(&ie->ie_lock);
909        free(handler, M_ITHREAD);
910        return (0);
911}
912
913int
914intr_event_suspend_handler(void *cookie)
915{
916        struct intr_handler *handler = (struct intr_handler *)cookie;
917        struct intr_event *ie;
918
919        if (handler == NULL)
920                return (EINVAL);
921        ie = handler->ih_event;
922        KASSERT(ie != NULL,
923            ("interrupt handler \"%s\" has a NULL interrupt event",
924            handler->ih_name));
925        mtx_lock(&ie->ie_lock);
926        handler->ih_flags |= IH_SUSP;
927        intr_handler_barrier(handler);
928        mtx_unlock(&ie->ie_lock);
929        return (0);
930}
931
932int
933intr_event_resume_handler(void *cookie)
934{
935        struct intr_handler *handler = (struct intr_handler *)cookie;
936        struct intr_event *ie;
937
938        if (handler == NULL)
939                return (EINVAL);
940        ie = handler->ih_event;
941        KASSERT(ie != NULL,
942            ("interrupt handler \"%s\" has a NULL interrupt event",
943            handler->ih_name));
944
945        /*
946         * intr_handler_barrier() acts not only as a barrier,
947         * it also allows to check for any pending interrupts.
948         */
949        mtx_lock(&ie->ie_lock);
950        handler->ih_flags &= ~IH_SUSP;
951        intr_handler_barrier(handler);
952        mtx_unlock(&ie->ie_lock);
953        return (0);
954}
955#endif /* __rtems__ */
956
957static int
958intr_event_schedule_thread(struct intr_event *ie)
959{
960        struct intr_entropy entropy;
961        struct intr_thread *it;
962        struct thread *td;
963        struct thread *ctd;
964
965        /*
966         * If no ithread or no handlers, then we have a stray interrupt.
967         */
968        if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
969            ie->ie_thread == NULL)
970                return (EINVAL);
971
972        ctd = curthread;
973        it = ie->ie_thread;
974        td = it->it_thread;
975
976        /*
977         * If any of the handlers for this ithread claim to be good
978         * sources of entropy, then gather some.
979         */
980        if (ie->ie_flags & IE_ENTROPY) {
981                entropy.event = (uintptr_t)ie;
982                entropy.td = ctd;
983                random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
984        }
985
986#ifndef __rtems__
987        KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
988#endif /* __rtems__ */
989
990        /*
991         * Set it_need to tell the thread to keep running if it is already
992         * running.  Then, lock the thread and see if we actually need to
993         * put it on the runqueue.
994         *
995         * Use store_rel to arrange that the store to ih_need in
996         * swi_sched() is before the store to it_need and prepare for
997         * transfer of this order to loads in the ithread.
998         */
999        atomic_store_rel_int(&it->it_need, 1);
1000        thread_lock(td);
1001#ifndef __rtems__
1002        if (TD_AWAITING_INTR(td)) {
1003                CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1004                    td->td_name);
1005                TD_CLR_IWAIT(td);
1006                sched_add(td, SRQ_INTR);
1007        } else {
1008                CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1009                    __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
1010        }
1011#else /* __rtems__ */
1012        /* Send event to wake the thread up.
1013         * TODO: eventually replace event by a better mechanism
1014         */
1015        rtems_status_code sc = rtems_event_send(rtems_bsd_get_task_id(td), RTEMSBSD_SWI_WAKEUP_EVENT);
1016        BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
1017#endif /* __rtems__ */
1018        thread_unlock(td);
1019
1020        return (0);
1021}
1022
1023/*
1024 * Allow interrupt event binding for software interrupt handlers -- a no-op,
1025 * since interrupts are generated in software rather than being directed by
1026 * a PIC.
1027 */
1028static int
1029swi_assign_cpu(void *arg, int cpu)
1030{
1031
1032        return (0);
1033}
1034
1035/*
1036 * Add a software interrupt handler to a specified event.  If a given event
1037 * is not specified, then a new event is created.
1038 */
1039int
1040swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1041            void *arg, int pri, enum intr_type flags, void **cookiep)
1042{
1043        struct intr_event *ie;
1044        int error;
1045
1046        if (flags & INTR_ENTROPY)
1047                return (EINVAL);
1048
1049        ie = (eventp != NULL) ? *eventp : NULL;
1050
1051        if (ie != NULL) {
1052                if (!(ie->ie_flags & IE_SOFT))
1053                        return (EINVAL);
1054        } else {
1055                error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1056                    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1057                if (error)
1058                        return (error);
1059                if (eventp != NULL)
1060                        *eventp = ie;
1061        }
1062        error = intr_event_add_handler(ie, name, NULL, handler, arg,
1063            PI_SWI(pri), flags, cookiep);
1064        return (error);
1065}
1066
1067/*
1068 * Schedule a software interrupt thread.
1069 */
1070void
1071swi_sched(void *cookie, int flags)
1072{
1073        struct intr_handler *ih = (struct intr_handler *)cookie;
1074        struct intr_event *ie = ih->ih_event;
1075        struct intr_entropy entropy;
1076        int error __unused;
1077
1078        CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1079            ih->ih_need);
1080
1081        entropy.event = (uintptr_t)ih;
1082        entropy.td = curthread;
1083        random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1084
1085        /*
1086         * Set ih_need for this handler so that if the ithread is already
1087         * running it will execute this handler on the next pass.  Otherwise,
1088         * it will execute it the next time it runs.
1089         */
1090        ih->ih_need = 1;
1091
1092        if (!(flags & SWI_DELAY)) {
1093#ifndef __rtems__
1094                VM_CNT_INC(v_soft);
1095#endif /* __rtems__ */
1096                error = intr_event_schedule_thread(ie);
1097                KASSERT(error == 0, ("stray software interrupt"));
1098        }
1099}
1100
1101#ifndef __rtems__
1102/*
1103 * Remove a software interrupt handler.  Currently this code does not
1104 * remove the associated interrupt event if it becomes empty.  Calling code
1105 * may do so manually via intr_event_destroy(), but that's not really
1106 * an optimal interface.
1107 */
1108int
1109swi_remove(void *cookie)
1110{
1111
1112        return (intr_event_remove_handler(cookie));
1113}
1114#endif /* __rtems__ */
1115
1116static void
1117intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1118{
1119        struct intr_handler *ih, *ihn, *ihp;
1120
1121        ihp = NULL;
1122        CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1123                /*
1124                 * If this handler is marked for death, remove it from
1125                 * the list of handlers and wake up the sleeper.
1126                 */
1127                if (ih->ih_flags & IH_DEAD) {
1128                        mtx_lock(&ie->ie_lock);
1129                        if (ihp == NULL)
1130                                CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1131                        else
1132                                CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1133                        ih->ih_flags &= ~IH_DEAD;
1134                        wakeup(ih);
1135                        mtx_unlock(&ie->ie_lock);
1136                        continue;
1137                }
1138
1139                /*
1140                 * Now that we know that the current element won't be removed
1141                 * update the previous element.
1142                 */
1143                ihp = ih;
1144
1145                if ((ih->ih_flags & IH_CHANGED) != 0) {
1146                        mtx_lock(&ie->ie_lock);
1147                        ih->ih_flags &= ~IH_CHANGED;
1148                        wakeup(ih);
1149                        mtx_unlock(&ie->ie_lock);
1150                }
1151
1152                /* Skip filter only handlers */
1153                if (ih->ih_handler == NULL)
1154                        continue;
1155
1156                /* Skip suspended handlers */
1157                if ((ih->ih_flags & IH_SUSP) != 0)
1158                        continue;
1159
1160                /*
1161                 * For software interrupt threads, we only execute
1162                 * handlers that have their need flag set.  Hardware
1163                 * interrupt threads always invoke all of their handlers.
1164                 *
1165                 * ih_need can only be 0 or 1.  Failed cmpset below
1166                 * means that there is no request to execute handlers,
1167                 * so a retry of the cmpset is not needed.
1168                 */
1169                if ((ie->ie_flags & IE_SOFT) != 0 &&
1170                    atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1171                        continue;
1172
1173                /* Execute this handler. */
1174                CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1175                    __func__, p->p_pid, (void *)ih->ih_handler,
1176                    ih->ih_argument, ih->ih_name, ih->ih_flags);
1177
1178                if (!(ih->ih_flags & IH_MPSAFE))
1179                        mtx_lock(&Giant);
1180                ih->ih_handler(ih->ih_argument);
1181                if (!(ih->ih_flags & IH_MPSAFE))
1182                        mtx_unlock(&Giant);
1183        }
1184}
1185
1186static void
1187ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1188{
1189
1190#ifndef __rtems__
1191        /* Interrupt handlers should not sleep. */
1192        if (!(ie->ie_flags & IE_SOFT))
1193                THREAD_NO_SLEEPING();
1194        intr_event_execute_handlers(p, ie);
1195        if (!(ie->ie_flags & IE_SOFT))
1196                THREAD_SLEEPING_OK();
1197#else /* __rtems__ */
1198        /* We only have soft-threads, so the two queries are not necessary. */
1199        intr_event_execute_handlers(p, ie);
1200#endif /* __rtems__ */
1201
1202        /*
1203         * Interrupt storm handling:
1204         *
1205         * If this interrupt source is currently storming, then throttle
1206         * it to only fire the handler once  per clock tick.
1207         *
1208         * If this interrupt source is not currently storming, but the
1209         * number of back to back interrupts exceeds the storm threshold,
1210         * then enter storming mode.
1211         */
1212        if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1213            !(ie->ie_flags & IE_SOFT)) {
1214#ifndef __rtems__
1215                /* Report the message only once every second. */
1216                if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1217                        printf(
1218        "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1219                            ie->ie_name);
1220                }
1221#endif /* __rtems__ */
1222                pause("istorm", 1);
1223        } else
1224                ie->ie_count++;
1225
1226        /*
1227         * Now that all the handlers have had a chance to run, reenable
1228         * the interrupt source.
1229         */
1230        if (ie->ie_post_ithread != NULL)
1231                ie->ie_post_ithread(ie->ie_source);
1232}
1233
1234/*
1235 * This is the main code for interrupt threads.
1236 */
1237static void
1238ithread_loop(void *arg)
1239{
1240        struct intr_thread *ithd;
1241        struct intr_event *ie;
1242        struct thread *td;
1243        struct proc *p;
1244        int wake;
1245
1246        td = curthread;
1247        p = td->td_proc;
1248        ithd = (struct intr_thread *)arg;
1249        KASSERT(ithd->it_thread == td,
1250            ("%s: ithread and proc linkage out of sync", __func__));
1251        ie = ithd->it_event;
1252        ie->ie_count = 0;
1253        wake = 0;
1254
1255        /*
1256         * As long as we have interrupts outstanding, go through the
1257         * list of handlers, giving each one a go at it.
1258         */
1259        for (;;) {
1260                /*
1261                 * If we are an orphaned thread, then just die.
1262                 */
1263                if (ithd->it_flags & IT_DEAD) {
1264                        CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1265                            p->p_pid, td->td_name);
1266                        free(ithd, M_ITHREAD);
1267                        kthread_exit();
1268                }
1269
1270                /*
1271                 * Service interrupts.  If another interrupt arrives while
1272                 * we are running, it will set it_need to note that we
1273                 * should make another pass.
1274                 *
1275                 * The load_acq part of the following cmpset ensures
1276                 * that the load of ih_need in ithread_execute_handlers()
1277                 * is ordered after the load of it_need here.
1278                 */
1279                while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
1280                        ithread_execute_handlers(p, ie);
1281                WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1282                mtx_assert(&Giant, MA_NOTOWNED);
1283
1284                /*
1285                 * Processed all our interrupts.  Now get the sched
1286                 * lock.  This may take a while and it_need may get
1287                 * set again, so we have to check it again.
1288                 */
1289                thread_lock(td);
1290                if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1291                    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1292#ifndef __rtems__
1293                        TD_SET_IWAIT(td);
1294                        ie->ie_count = 0;
1295                        mi_switch(SW_VOL | SWT_IWAIT, NULL);
1296#else /* __rtems__ */
1297                        /* wait for wakeup event
1298                         * TODO: eventually replace event by a better mechanism
1299                         */
1300                        rtems_event_set event_out;
1301                        rtems_status_code sc = rtems_event_receive(
1302                                RTEMSBSD_SWI_WAKEUP_EVENT,
1303                                RTEMS_WAIT | RTEMS_EVENT_ALL,
1304                                RTEMS_NO_TIMEOUT,
1305                                &event_out);
1306                        BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
1307#endif /* __rtems__ */
1308                }
1309                if (ithd->it_flags & IT_WAIT) {
1310                        wake = 1;
1311                        ithd->it_flags &= ~IT_WAIT;
1312                }
1313                thread_unlock(td);
1314                if (wake) {
1315                        wakeup(ithd);
1316                        wake = 0;
1317                }
1318        }
1319}
1320#ifndef __rtems__
1321
1322/*
1323 * Main interrupt handling body.
1324 *
1325 * Input:
1326 * o ie:                        the event connected to this interrupt.
1327 * o frame:                     some archs (i.e. i386) pass a frame to some.
1328 *                              handlers as their main argument.
1329 * Return value:
1330 * o 0:                         everything ok.
1331 * o EINVAL:                    stray interrupt.
1332 */
1333int
1334intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1335{
1336        struct intr_handler *ih;
1337        struct trapframe *oldframe;
1338        struct thread *td;
1339        int phase;
1340        int ret;
1341        bool filter, thread;
1342
1343        td = curthread;
1344
1345#ifdef KSTACK_USAGE_PROF
1346        intr_prof_stack_use(td, frame);
1347#endif
1348
1349        /* An interrupt with no event or handlers is a stray interrupt. */
1350        if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1351                return (EINVAL);
1352
1353        /*
1354         * Execute fast interrupt handlers directly.
1355         * To support clock handlers, if a handler registers
1356         * with a NULL argument, then we pass it a pointer to
1357         * a trapframe as its argument.
1358         */
1359        td->td_intr_nesting_level++;
1360        filter = false;
1361        thread = false;
1362        ret = 0;
1363        critical_enter();
1364        oldframe = td->td_intr_frame;
1365        td->td_intr_frame = frame;
1366
1367        phase = ie->ie_phase;
1368        atomic_add_int(&ie->ie_active[phase], 1);
1369
1370        /*
1371         * This fence is required to ensure that no later loads are
1372         * re-ordered before the ie_active store.
1373         */
1374        atomic_thread_fence_seq_cst();
1375
1376        CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1377                if ((ih->ih_flags & IH_SUSP) != 0)
1378                        continue;
1379                if (ih->ih_filter == NULL) {
1380                        thread = true;
1381                        continue;
1382                }
1383                CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1384                    ih->ih_filter, ih->ih_argument == NULL ? frame :
1385                    ih->ih_argument, ih->ih_name);
1386                if (ih->ih_argument == NULL)
1387                        ret = ih->ih_filter(frame);
1388                else
1389                        ret = ih->ih_filter(ih->ih_argument);
1390                KASSERT(ret == FILTER_STRAY ||
1391                    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1392                    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1393                    ("%s: incorrect return value %#x from %s", __func__, ret,
1394                    ih->ih_name));
1395                filter = filter || ret == FILTER_HANDLED;
1396
1397                /*
1398                 * Wrapper handler special handling:
1399                 *
1400                 * in some particular cases (like pccard and pccbb),
1401                 * the _real_ device handler is wrapped in a couple of
1402                 * functions - a filter wrapper and an ithread wrapper.
1403                 * In this case (and just in this case), the filter wrapper
1404                 * could ask the system to schedule the ithread and mask
1405                 * the interrupt source if the wrapped handler is composed
1406                 * of just an ithread handler.
1407                 *
1408                 * TODO: write a generic wrapper to avoid people rolling
1409                 * their own.
1410                 */
1411                if (!thread) {
1412                        if (ret == FILTER_SCHEDULE_THREAD)
1413                                thread = true;
1414                }
1415        }
1416        atomic_add_rel_int(&ie->ie_active[phase], -1);
1417
1418        td->td_intr_frame = oldframe;
1419
1420        if (thread) {
1421                if (ie->ie_pre_ithread != NULL)
1422                        ie->ie_pre_ithread(ie->ie_source);
1423        } else {
1424                if (ie->ie_post_filter != NULL)
1425                        ie->ie_post_filter(ie->ie_source);
1426        }
1427
1428        /* Schedule the ithread if needed. */
1429        if (thread) {
1430                int error __unused;
1431
1432                error =  intr_event_schedule_thread(ie);
1433                KASSERT(error == 0, ("bad stray interrupt"));
1434        }
1435        critical_exit();
1436        td->td_intr_nesting_level--;
1437#ifdef notyet
1438        /* The interrupt is not aknowledged by any filter and has no ithread. */
1439        if (!thread && !filter)
1440                return (EINVAL);
1441#endif
1442        return (0);
1443}
1444
1445#ifdef DDB
1446/*
1447 * Dump details about an interrupt handler
1448 */
1449static void
1450db_dump_intrhand(struct intr_handler *ih)
1451{
1452        int comma;
1453
1454        db_printf("\t%-10s ", ih->ih_name);
1455        switch (ih->ih_pri) {
1456        case PI_REALTIME:
1457                db_printf("CLK ");
1458                break;
1459        case PI_AV:
1460                db_printf("AV  ");
1461                break;
1462        case PI_TTY:
1463                db_printf("TTY ");
1464                break;
1465        case PI_NET:
1466                db_printf("NET ");
1467                break;
1468        case PI_DISK:
1469                db_printf("DISK");
1470                break;
1471        case PI_DULL:
1472                db_printf("DULL");
1473                break;
1474        default:
1475                if (ih->ih_pri >= PI_SOFT)
1476                        db_printf("SWI ");
1477                else
1478                        db_printf("%4u", ih->ih_pri);
1479                break;
1480        }
1481        db_printf(" ");
1482        if (ih->ih_filter != NULL) {
1483                db_printf("[F]");
1484                db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1485        }
1486        if (ih->ih_handler != NULL) {
1487                if (ih->ih_filter != NULL)
1488                        db_printf(",");
1489                db_printf("[H]");
1490                db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1491        }
1492        db_printf("(%p)", ih->ih_argument);
1493        if (ih->ih_need ||
1494            (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1495            IH_MPSAFE)) != 0) {
1496                db_printf(" {");
1497                comma = 0;
1498                if (ih->ih_flags & IH_EXCLUSIVE) {
1499                        if (comma)
1500                                db_printf(", ");
1501                        db_printf("EXCL");
1502                        comma = 1;
1503                }
1504                if (ih->ih_flags & IH_ENTROPY) {
1505                        if (comma)
1506                                db_printf(", ");
1507                        db_printf("ENTROPY");
1508                        comma = 1;
1509                }
1510                if (ih->ih_flags & IH_DEAD) {
1511                        if (comma)
1512                                db_printf(", ");
1513                        db_printf("DEAD");
1514                        comma = 1;
1515                }
1516                if (ih->ih_flags & IH_MPSAFE) {
1517                        if (comma)
1518                                db_printf(", ");
1519                        db_printf("MPSAFE");
1520                        comma = 1;
1521                }
1522                if (ih->ih_need) {
1523                        if (comma)
1524                                db_printf(", ");
1525                        db_printf("NEED");
1526                }
1527                db_printf("}");
1528        }
1529        db_printf("\n");
1530}
1531
1532/*
1533 * Dump details about a event.
1534 */
1535void
1536db_dump_intr_event(struct intr_event *ie, int handlers)
1537{
1538        struct intr_handler *ih;
1539        struct intr_thread *it;
1540        int comma;
1541
1542        db_printf("%s ", ie->ie_fullname);
1543        it = ie->ie_thread;
1544        if (it != NULL)
1545                db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1546        else
1547                db_printf("(no thread)");
1548        if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1549            (it != NULL && it->it_need)) {
1550                db_printf(" {");
1551                comma = 0;
1552                if (ie->ie_flags & IE_SOFT) {
1553                        db_printf("SOFT");
1554                        comma = 1;
1555                }
1556                if (ie->ie_flags & IE_ENTROPY) {
1557                        if (comma)
1558                                db_printf(", ");
1559                        db_printf("ENTROPY");
1560                        comma = 1;
1561                }
1562                if (ie->ie_flags & IE_ADDING_THREAD) {
1563                        if (comma)
1564                                db_printf(", ");
1565                        db_printf("ADDING_THREAD");
1566                        comma = 1;
1567                }
1568                if (it != NULL && it->it_need) {
1569                        if (comma)
1570                                db_printf(", ");
1571                        db_printf("NEED");
1572                }
1573                db_printf("}");
1574        }
1575        db_printf("\n");
1576
1577        if (handlers)
1578                CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1579                    db_dump_intrhand(ih);
1580}
1581
1582/*
1583 * Dump data about interrupt handlers
1584 */
1585DB_SHOW_COMMAND(intr, db_show_intr)
1586{
1587        struct intr_event *ie;
1588        int all, verbose;
1589
1590        verbose = strchr(modif, 'v') != NULL;
1591        all = strchr(modif, 'a') != NULL;
1592        TAILQ_FOREACH(ie, &event_list, ie_list) {
1593                if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1594                        continue;
1595                db_dump_intr_event(ie, verbose);
1596                if (db_pager_quit)
1597                        break;
1598        }
1599}
1600#endif /* DDB */
1601
1602/*
1603 * Start standard software interrupt threads
1604 */
1605static void
1606start_softintr(void *dummy)
1607{
1608
1609        if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1610                panic("died while creating vm swi ithread");
1611}
1612SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1613    NULL);
1614
1615/*
1616 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1617 * The data for this machine dependent, and the declarations are in machine
1618 * dependent code.  The layout of intrnames and intrcnt however is machine
1619 * independent.
1620 *
1621 * We do not know the length of intrcnt and intrnames at compile time, so
1622 * calculate things at run time.
1623 */
1624static int
1625sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1626{
1627        return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1628}
1629
1630SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1631    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1632
1633static int
1634sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1635{
1636#ifdef SCTL_MASK32
1637        uint32_t *intrcnt32;
1638        unsigned i;
1639        int error;
1640
1641        if (req->flags & SCTL_MASK32) {
1642                if (!req->oldptr)
1643                        return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1644                intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1645                if (intrcnt32 == NULL)
1646                        return (ENOMEM);
1647                for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1648                        intrcnt32[i] = intrcnt[i];
1649                error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1650                free(intrcnt32, M_TEMP);
1651                return (error);
1652        }
1653#endif
1654        return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1655}
1656
1657SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1658    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1659
1660#ifdef DDB
1661/*
1662 * DDB command to dump the interrupt statistics.
1663 */
1664DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1665{
1666        u_long *i;
1667        char *cp;
1668        u_int j;
1669
1670        cp = intrnames;
1671        j = 0;
1672        for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1673            i++, j++) {
1674                if (*cp == '\0')
1675                        break;
1676                if (*i != 0)
1677                        db_printf("%s\t%lu\n", cp, *i);
1678                cp += strlen(cp) + 1;
1679        }
1680}
1681#endif
1682#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.