source: rtems/c/src/lib/libcpu/powerpc/mpc5xx/irq/irq.c @ f62c7daa

4.115
Last change on this file since f62c7daa was f62c7daa, checked in by Joel Sherrill <joel.sherrill@…>, on 10/14/14 at 19:10:22

mpc5xx libcpu and ss555 BSP: Fix warnings

  • Property mode set to 100644
File size: 11.6 KB
Line 
1/*
2 *  This file contains the implementation of the function described in irq.h
3 */
4
5/*
6 *  MPC5xx port sponsored by Defence Research and Development Canada - Suffield
7 *  Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
8 *
9 *  Derived from libbsp/powerpc/mbx8xx/irq/irq.c:
10 *
11 *  Copyright (C) 1998, 1999 valette@crf.canon.fr
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#include <rtems.h>
19#include <rtems/score/apiext.h>
20#include <mpc5xx.h>
21#include <libcpu/vectors.h>
22#include <libcpu/raw_exception.h>
23#include <libcpu/irq.h>
24#include <bsp/irq.h>
25
26/*
27 * Convert an rtems_irq_number constant to an interrupt level
28 * suitable for programming into an I/O device's interrupt level field.
29 */
30int CPU_irq_level_from_symbolic_name(const rtems_irq_number name)
31{
32  if (CPU_USIU_EXT_IRQ_0 <= name && name <= CPU_USIU_INT_IRQ_7)
33    return (name - CPU_USIU_EXT_IRQ_0) / 2;
34
35  if (CPU_UIMB_IRQ_8 <= name && name <= CPU_UIMB_IRQ_31)
36    return 8 + (name - CPU_UIMB_IRQ_8);
37
38  return 31;                    /* reasonable default */
39}
40
41/*
42 * default handler connected on each irq after bsp initialization
43 */
44static rtems_irq_connect_data           default_rtems_entry;
45
46/*
47 * location used to store initial tables used for interrupt
48 * management.
49 */
50static rtems_irq_global_settings*       internal_config;
51static rtems_irq_connect_data*          rtems_hdl_tbl;
52
53/*
54 * Check if symbolic IRQ name is an USIU IRQ
55 */
56static inline int is_usiu_irq(const rtems_irq_number irqLine)
57{
58  return (((int) irqLine <= CPU_USIU_IRQ_MAX_OFFSET) &&
59          ((int) irqLine >= CPU_USIU_IRQ_MIN_OFFSET)
60         );
61}
62
63/*
64 * Check if symbolic IRQ name is an UIMB IRQ
65 */
66static inline int is_uimb_irq(const rtems_irq_number irqLine)
67{
68  return (((int) irqLine <= CPU_UIMB_IRQ_MAX_OFFSET) &&
69          ((int) irqLine >= CPU_UIMB_IRQ_MIN_OFFSET)
70         );
71}
72
73/*
74 * Check if symbolic IRQ name is a Processor IRQ
75 */
76static inline int is_proc_irq(const rtems_irq_number irqLine)
77{
78  return (((int) irqLine <= CPU_PROC_IRQ_MAX_OFFSET) &&
79          ((int) irqLine >= CPU_PROC_IRQ_MIN_OFFSET)
80         );
81}
82
83
84/*
85 * Masks used to mask off the interrupts. For exmaple, for ILVL2, the
86 * mask is used to mask off interrupts ILVL2, IRQ3, ILVL3, ... IRQ7
87 * and ILVL7.
88 *
89 */
90const static unsigned int USIU_IvectMask[CPU_USIU_IRQ_COUNT] =
91{
92  0,                            /* external IRQ 0 */
93  0xFFFFFFFF << 31,             /* internal level 0 */
94  0xFFFFFFFF << 30,             /* external IRQ 1 */
95  0xFFFFFFFF << 29,             /* internal level 1 */
96  0xFFFFFFFF << 28,             /* external IRQ 2 */
97  0xFFFFFFFF << 27,             /* internal level 2 */
98  0xFFFFFFFF << 26,             /* external IRQ 3 */
99  0xFFFFFFFF << 25,             /* internal level 3 */
100  0xFFFFFFFF << 24,             /* external IRQ 4 */
101  0xFFFFFFFF << 23,             /* internal level 4 */
102  0xFFFFFFFF << 22,             /* external IRQ 5 */
103  0xFFFFFFFF << 21,             /* internal level 5 */
104  0xFFFFFFFF << 20,             /* external IRQ 6 */
105  0xFFFFFFFF << 19,             /* internal level 6 */
106  0xFFFFFFFF << 18,             /* external IRQ 7 */
107  0xFFFFFFFF << 17              /* internal level 7 */
108};
109
110
111/*
112 * ------------------------ RTEMS Irq helper functions ----------------
113 */
114
115/*
116 * Caution : this function assumes the variable "internal_config"
117 * is already set and that the tables it contains are still valid
118 * and accessible.
119 */
120static void compute_USIU_IvectMask_from_prio (void)
121{
122  /*
123   * In theory this is feasible. No time to code it yet. See i386/shared/irq.c
124   * for an example based on 8259 controller mask. The actual masks defined
125   * correspond to the priorities defined for the USIU in irq_init.c.
126   */
127}
128
129/*
130 * This function check that the value given for the irq line
131 * is valid.
132 */
133static int isValidInterrupt(int irq)
134{
135  if ( (irq < CPU_MIN_OFFSET) || (irq > CPU_MAX_OFFSET)
136        || (irq == CPU_UIMB_INTERRUPT) )
137    return 0;
138  return 1;
139}
140
141static int CPU_irq_enable_at_uimb(const rtems_irq_number irqLine)
142{
143  if (!is_uimb_irq(irqLine))
144    return 1;
145  return 0;
146}
147
148static int CPU_irq_disable_at_uimb(const rtems_irq_number irqLine)
149{
150  if (!is_uimb_irq(irqLine))
151    return 1;
152  return 0;
153}
154
155static int CPU_irq_enable_at_usiu(const rtems_irq_number irqLine)
156{
157  int usiu_irq_index;
158
159  if (!is_usiu_irq(irqLine))
160    return 1;
161
162  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
163  ppc_cached_irq_mask |= (1 << (31-usiu_irq_index));
164  usiu.simask = ppc_cached_irq_mask;
165
166  return 0;
167}
168
169static int CPU_irq_disable_at_usiu(const rtems_irq_number irqLine)
170{
171  int usiu_irq_index;
172
173  if (!is_usiu_irq(irqLine))
174    return 1;
175
176  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
177  ppc_cached_irq_mask &= ~(1 << (31-usiu_irq_index));
178  usiu.simask = ppc_cached_irq_mask;
179
180  return 0;
181}
182
183/*
184 * --------------- RTEMS Single Irq Handler Mngt Routines ----------------
185 */
186
187int CPU_install_rtems_irq_handler       (const rtems_irq_connect_data* irq)
188{
189    rtems_interrupt_level       level;
190
191    if (!isValidInterrupt(irq->name)) {
192      return 0;
193    }
194    /*
195     * Check if default handler is actually connected. If not issue an error.
196     * You must first get the current handler via CPU_get_current_idt_entry
197     * and then disconnect it using CPU_delete_idt_entry.
198     * RATIONALE : to always have the same transition by forcing the user
199     * to get the previous handler before accepting to disconnect.
200     */
201    if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
202      return 0;
203    }
204
205    rtems_interrupt_disable(level);
206
207    /*
208     * store the data provided by user
209     */
210    rtems_hdl_tbl[irq->name] = *irq;
211
212    if (is_uimb_irq(irq->name)) {
213      /*
214       * Enable interrupt at UIMB level
215       */
216      CPU_irq_enable_at_uimb (irq->name);
217    }
218
219    if (is_usiu_irq(irq->name)) {
220      /*
221       * Enable interrupt at USIU level
222       */
223      CPU_irq_enable_at_usiu (irq->name);
224    }
225
226    if (is_proc_irq(irq->name)) {
227      /*
228       * Should Enable exception at processor level but not needed.  Will restore
229       * EE flags at the end of the routine anyway.
230       */
231    }
232    /*
233     * Enable interrupt on device
234     */
235        if (irq->on)
236        irq->on(irq);
237
238    rtems_interrupt_enable(level);
239
240    return 1;
241}
242
243
244int CPU_get_current_rtems_irq_handler   (rtems_irq_connect_data* irq)
245{
246     if (!isValidInterrupt(irq->name)) {
247       return 0;
248     }
249     *irq = rtems_hdl_tbl[irq->name];
250     return 1;
251}
252
253int CPU_remove_rtems_irq_handler  (const rtems_irq_connect_data* irq)
254{
255    rtems_interrupt_level       level;
256
257    if (!isValidInterrupt(irq->name)) {
258      return 0;
259    }
260    /*
261     * Check if default handler is actually connected. If not issue an error.
262     * You must first get the current handler via CPU_get_current_idt_entry
263     * and then disconnect it using CPU_delete_idt_entry.
264     * RATIONALE : to always have the same transition by forcing the user
265     * to get the previous handler before accepting to disconnect.
266     */
267    if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
268      return 0;
269    }
270    rtems_interrupt_disable(level);
271
272    /*
273     * Disable interrupt on device
274     */
275        if (irq->off)
276        irq->off(irq);
277
278    if (is_uimb_irq(irq->name)) {
279      /*
280       * disable interrupt at UIMB level
281       */
282      CPU_irq_disable_at_uimb (irq->name);
283    }
284    if (is_usiu_irq(irq->name)) {
285      /*
286       * disable interrupt at USIU level
287       */
288      CPU_irq_disable_at_usiu (irq->name);
289    }
290    if (is_proc_irq(irq->name)) {
291      /*
292       * disable exception at processor level
293       */
294    }
295
296    /*
297     * restore the default irq value
298     */
299    rtems_hdl_tbl[irq->name] = default_rtems_entry;
300
301    rtems_interrupt_enable(level);
302
303    return 1;
304}
305
306/*
307 * ---------------- RTEMS Global Irq Handler Mngt Routines ----------------
308 */
309
310int CPU_rtems_irq_mngt_set      (rtems_irq_global_settings* config)
311{
312    int                    i;
313    rtems_interrupt_level  level;
314
315   /*
316    * Store various code accelerators
317    */
318    internal_config             = config;
319    default_rtems_entry         = config->defaultEntry;
320    rtems_hdl_tbl               = config->irqHdlTbl;
321
322    rtems_interrupt_disable(level);
323
324    /*
325     * Start with UIMB IRQ
326     */
327    for (i = CPU_UIMB_IRQ_MIN_OFFSET; i <= CPU_UIMB_IRQ_MAX_OFFSET ; i++) {
328      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
329        CPU_irq_enable_at_uimb (i);
330        if (rtems_hdl_tbl[i].on)
331                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
332      }
333      else {
334        if (rtems_hdl_tbl[i].off)
335                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
336        CPU_irq_disable_at_uimb (i);
337      }
338    }
339
340    /*
341     * Continue with USIU IRQ
342     * Set up internal tables used by rtems interrupt prologue
343     */
344    compute_USIU_IvectMask_from_prio ();
345
346    for (i = CPU_USIU_IRQ_MIN_OFFSET; i <= CPU_USIU_IRQ_MAX_OFFSET ; i++) {
347      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
348        CPU_irq_enable_at_usiu (i);
349        if (rtems_hdl_tbl[i].on)
350                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
351      }
352      else {
353        if (rtems_hdl_tbl[i].off)
354                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
355        CPU_irq_disable_at_usiu (i);
356       }
357    }
358
359    /*
360     * Enable all UIMB interrupt lines, then enable at USIU.
361     */
362    imb.uimb.umcr |= UIMB_UMCR_IRQMUX(3);
363    CPU_irq_enable_at_usiu (CPU_UIMB_INTERRUPT);
364
365    /*
366     * finish with Processor exceptions handled like IRQ
367     */
368    for (i = CPU_PROC_IRQ_MIN_OFFSET; i <= CPU_PROC_IRQ_MAX_OFFSET; i++) {
369      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
370        if (rtems_hdl_tbl[i].on)
371                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
372      }
373      else {
374        if (rtems_hdl_tbl[i].off)
375                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
376      }
377    }
378    rtems_interrupt_enable(level);
379    return 1;
380}
381
382int CPU_rtems_irq_mngt_get(rtems_irq_global_settings** config)
383{
384    *config = internal_config;
385    return 0;
386}
387
388
389/*
390 * High level IRQ handler called from shared_raw_irq_code_entry
391 */
392void C_dispatch_irq_handler (CPU_Interrupt_frame *frame, unsigned int excNum)
393{
394  register unsigned int irq;
395  register unsigned uimbIntr;                 /* boolean */
396  register unsigned oldMask;                  /* old siu pic masks */
397  register unsigned msr;
398  register unsigned new_msr;
399
400  /*
401   * Handle decrementer interrupt
402   */
403  if (excNum == ASM_DEC_VECTOR) {
404    _CPU_MSR_GET(msr);
405    new_msr = msr | MSR_EE;
406    _CPU_MSR_SET(new_msr);
407
408    rtems_hdl_tbl[CPU_DECREMENTER].hdl(rtems_hdl_tbl[CPU_DECREMENTER].handle);
409
410    _CPU_MSR_SET(msr);
411    return;
412  }
413
414  /*
415   * Handle external interrupt generated by USIU on PPC core
416   */
417  while ((ppc_cached_irq_mask & usiu.sipend) != 0) {
418    irq = (usiu.sivec >> 26);
419    uimbIntr = (irq == CPU_UIMB_INTERRUPT);
420    /*
421     * Disable the interrupt of the same and lower priority.
422     */
423    oldMask = ppc_cached_irq_mask;
424    ppc_cached_irq_mask = oldMask & USIU_IvectMask[irq];
425    usiu.simask = ppc_cached_irq_mask;
426    /*
427     * Acknowledge current interrupt. This has no effect on internal level
428     * interrupts.
429     */
430    usiu.sipend = (1 << (31 - irq));
431
432    if (uimbIntr)  {
433      /*
434       * Look at the bits set in the UIMB interrupt-pending register.  The
435       * highest-order set bit indicates the handler we will run.
436       *
437       * Unfortunately, we can't easily mask individual UIMB interrupts
438       * unless they use USIU levels 0 to 6, so we must mask all low-level
439       * (level > 7) UIMB interrupts while we service any interrupt.
440       */
441      int uipend = imb.uimb.uipend << 8;
442
443      if (uipend == 0) {        /* spurious interrupt?  use last vector */
444        irq = CPU_UIMB_IRQ_MAX_OFFSET;
445      }
446      else {
447        irq = CPU_UIMB_IRQ_MIN_OFFSET;
448        for ( ; (uipend & 0x8000000) == 0; uipend <<= 1) {
449          irq++;
450        }
451      }
452    }
453    _CPU_MSR_GET(msr);
454    new_msr = msr | MSR_EE;
455    _CPU_MSR_SET(new_msr);
456
457    rtems_hdl_tbl[irq].hdl(rtems_hdl_tbl[irq].handle);
458
459    _CPU_MSR_SET(msr);
460
461    ppc_cached_irq_mask = oldMask;
462    usiu.simask = ppc_cached_irq_mask;
463  }
464}
Note: See TracBrowser for help on using the repository browser.