source: rtems/c/src/lib/libcpu/powerpc/mpc5xx/irq/irq.c @ 297d99b1

4.104.114.84.95
Last change on this file since 297d99b1 was 8430205, checked in by Joel Sherrill <joel.sherrill@…>, on 04/12/04 at 22:04:28

2004-04-12 David Querbach <querbach@…>

  • README, configure.ac, mpc5xx/Makefile.am, mpc5xx/exceptions/raw_exception.c, mpc5xx/exceptions/raw_exception.h, mpc5xx/timer/timer.c, shared/include/cpuIdent.h: addition of a significant amount of MPC5xx support as part of the addition of the SS555 BSP.
  • mpc5xx/README, mpc5xx/clock/clock.c, mpc5xx/console-generic/console-generic.c, mpc5xx/include/console.h, mpc5xx/include/mpc5xx.h, mpc5xx/irq/irq.c, mpc5xx/irq/irq.h, mpc5xx/irq/irq_asm.S, mpc5xx/irq/irq_init.c, mpc5xx/vectors/vectors.S, mpc5xx/vectors/vectors.h, mpc5xx/vectors/vectors_init.c: New files.
  • mpc5xx/exceptions/asm_utils.S: Removed.
  • Property mode set to 100644
File size: 12.4 KB
Line 
1/*
2 * irq.c
3 *
4 *  This file contains the implementation of the function described in irq.h
5 *
6 *  MPC5xx port sponsored by Defence Research and Development Canada - Suffield
7 *  Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
8 *
9 *  Derived from libbsp/powerpc/mbx8xx/irq/irq.c:
10 *
11 *  Copyright (C) 1998, 1999 valette@crf.canon.fr
12 *
13 *  The license and distribution terms for this file may be
14 *  found in found in the file LICENSE in this distribution or at
15 *  http://www.rtems.com/license/LICENSE.
16 *
17 *  $Id$
18 */
19 
20#include <rtems.h>
21#include <rtems/score/apiext.h>
22#include <mpc5xx.h>
23#include <libcpu/vectors.h>
24#include <libcpu/raw_exception.h>
25#include <libcpu/irq.h>
26
27/*
28 * Convert an rtems_irq_symbolic_name constant to an interrupt level
29 * suitable for programming into an I/O device's interrupt level field.
30 */
31 
32int CPU_irq_level_from_symbolic_name(const rtems_irq_symbolic_name name)
33{
34  if (CPU_USIU_EXT_IRQ_0 <= name && name <= CPU_USIU_INT_IRQ_7)
35    return (name - CPU_USIU_EXT_IRQ_0) / 2;
36   
37  if (CPU_UIMB_IRQ_8 <= name && name <= CPU_UIMB_IRQ_31)
38    return 8 + (name - CPU_UIMB_IRQ_8);
39
40  return 31;                    /* reasonable default */
41}
42
43/*
44 * default handler connected on each irq after bsp initialization
45 */
46static rtems_irq_connect_data           default_rtems_entry;
47
48/*
49 * location used to store initial tables used for interrupt
50 * management.
51 */
52static rtems_irq_global_settings*       internal_config;
53static rtems_irq_connect_data*          rtems_hdl_tbl;
54
55/*
56 * Check if symbolic IRQ name is an USIU IRQ
57 */
58static inline int is_usiu_irq(const rtems_irq_symbolic_name irqLine)
59{
60  return (((int) irqLine <= CPU_USIU_IRQ_MAX_OFFSET) &&
61          ((int) irqLine >= CPU_USIU_IRQ_MIN_OFFSET)
62         );
63}
64
65/*
66 * Check if symbolic IRQ name is an UIMB IRQ
67 */
68static inline int is_uimb_irq(const rtems_irq_symbolic_name irqLine)
69{
70  return (((int) irqLine <= CPU_UIMB_IRQ_MAX_OFFSET) &&
71          ((int) irqLine >= CPU_UIMB_IRQ_MIN_OFFSET)
72         );
73}
74
75/*
76 * Check if symbolic IRQ name is a Processor IRQ
77 */
78static inline int is_proc_irq(const rtems_irq_symbolic_name irqLine)
79{
80  return (((int) irqLine <= CPU_PROC_IRQ_MAX_OFFSET) &&
81          ((int) irqLine >= CPU_PROC_IRQ_MIN_OFFSET)
82         );
83}
84
85
86/*
87 * Masks used to mask off the interrupts. For exmaple, for ILVL2, the 
88 * mask is used to mask off interrupts ILVL2, IRQ3, ILVL3, ... IRQ7   
89 * and ILVL7.                                                         
90 *
91 */
92const static unsigned int USIU_IvectMask[CPU_USIU_IRQ_COUNT] =
93{
94  0,                            /* external IRQ 0 */
95  0xFFFFFFFF << 31,             /* internal level 0 */
96  0xFFFFFFFF << 30,             /* external IRQ 1 */
97  0xFFFFFFFF << 29,             /* internal level 1 */
98  0xFFFFFFFF << 28,             /* external IRQ 2 */
99  0xFFFFFFFF << 27,             /* internal level 2 */
100  0xFFFFFFFF << 26,             /* external IRQ 3 */
101  0xFFFFFFFF << 25,             /* internal level 3 */
102  0xFFFFFFFF << 24,             /* external IRQ 4 */
103  0xFFFFFFFF << 23,             /* internal level 4 */
104  0xFFFFFFFF << 22,             /* external IRQ 5 */
105  0xFFFFFFFF << 21,             /* internal level 5 */
106  0xFFFFFFFF << 20,             /* external IRQ 6 */
107  0xFFFFFFFF << 19,             /* internal level 6 */
108  0xFFFFFFFF << 18,             /* external IRQ 7 */
109  0xFFFFFFFF << 17              /* internal level 7 */
110};
111
112
113/*
114 * ------------------------ RTEMS Irq helper functions ----------------
115 */
116
117/*
118 * Caution : this function assumes the variable "internal_config"
119 * is already set and that the tables it contains are still valid
120 * and accessible.
121 */
122static void compute_USIU_IvectMask_from_prio ()
123{
124  /*
125   * In theory this is feasible. No time to code it yet. See i386/shared/irq.c
126   * for an example based on 8259 controller mask. The actual masks defined
127   * correspond to the priorities defined for the USIU in irq_init.c.
128   */
129}
130
131/*
132 * This function check that the value given for the irq line
133 * is valid.
134 */
135static int isValidInterrupt(int irq)
136{
137  if ( (irq < CPU_MIN_OFFSET) || (irq > CPU_MAX_OFFSET)
138        || (irq == CPU_UIMB_INTERRUPT) )
139    return 0;
140  return 1;
141}
142
143int CPU_irq_enable_at_uimb(const rtems_irq_symbolic_name irqLine)
144{
145  if (!is_uimb_irq(irqLine))
146    return 1;
147  return 0;
148}
149
150int CPU_irq_disable_at_uimb(const rtems_irq_symbolic_name irqLine)
151{
152  if (!is_uimb_irq(irqLine))
153    return 1;
154  return 0;
155}
156
157int CPU_irq_enabled_at_uimb(const rtems_irq_symbolic_name irqLine)
158{
159  if (!is_uimb_irq(irqLine))
160    return 0;
161  return 1;
162}
163
164int CPU_irq_enable_at_usiu(const rtems_irq_symbolic_name irqLine)
165{
166  int usiu_irq_index;
167 
168  if (!is_usiu_irq(irqLine))
169    return 1;
170
171  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
172  ppc_cached_irq_mask |= (1 << (31-usiu_irq_index));
173  usiu.simask = ppc_cached_irq_mask;
174
175  return 0;
176}
177
178int CPU_irq_disable_at_usiu(const rtems_irq_symbolic_name irqLine)
179{
180  int usiu_irq_index;
181
182  if (!is_usiu_irq(irqLine))
183    return 1;
184 
185  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
186  ppc_cached_irq_mask &= ~(1 << (31-usiu_irq_index));
187  usiu.simask = ppc_cached_irq_mask;
188
189  return 0;
190}
191
192int CPU_irq_enabled_at_usiu(const rtems_irq_symbolic_name irqLine)
193{
194  int usiu_irq_index;
195
196  if (!is_usiu_irq(irqLine))
197    return 0;
198
199  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
200  return ppc_cached_irq_mask & (1 << (31-usiu_irq_index));
201}
202
203/*
204 * --------------- RTEMS Single Irq Handler Mngt Routines ----------------
205 */
206
207int CPU_install_rtems_irq_handler       (const rtems_irq_connect_data* irq)
208{
209    unsigned int level;
210 
211    if (!isValidInterrupt(irq->name)) {
212      return 0;
213    }
214    /*
215     * Check if default handler is actually connected. If not issue an error.
216     * You must first get the current handler via CPU_get_current_idt_entry
217     * and then disconnect it using CPU_delete_idt_entry.
218     * RATIONALE : to always have the same transition by forcing the user
219     * to get the previous handler before accepting to disconnect.
220     */
221    if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
222      return 0;
223    }
224
225    _CPU_ISR_Disable(level);
226
227    /*
228     * store the data provided by user
229     */
230    rtems_hdl_tbl[irq->name] = *irq;
231   
232    if (is_uimb_irq(irq->name)) {
233      /*
234       * Enable interrupt at UIMB level
235       */
236      CPU_irq_enable_at_uimb (irq->name);
237    }
238   
239    if (is_usiu_irq(irq->name)) {
240      /*
241       * Enable interrupt at USIU level
242       */
243      CPU_irq_enable_at_usiu (irq->name);
244    }
245
246    if (is_proc_irq(irq->name)) {
247      /*
248       * Should Enable exception at processor level but not needed.  Will restore
249       * EE flags at the end of the routine anyway.
250       */
251    }
252    /*
253     * Enable interrupt on device
254     */
255    irq->on(irq);
256   
257    _CPU_ISR_Enable(level);
258
259    return 1;
260}
261
262
263int CPU_get_current_rtems_irq_handler   (rtems_irq_connect_data* irq)
264{
265     if (!isValidInterrupt(irq->name)) {
266       return 0;
267     }
268     *irq = rtems_hdl_tbl[irq->name];
269     return 1;
270}
271
272int CPU_remove_rtems_irq_handler  (const rtems_irq_connect_data* irq)
273{
274    unsigned int level;
275 
276    if (!isValidInterrupt(irq->name)) {
277      return 0;
278    }
279    /*
280     * Check if default handler is actually connected. If not issue an error.
281     * You must first get the current handler via CPU_get_current_idt_entry
282     * and then disconnect it using CPU_delete_idt_entry.
283     * RATIONALE : to always have the same transition by forcing the user
284     * to get the previous handler before accepting to disconnect.
285     */
286    if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
287      return 0;
288    }
289    _CPU_ISR_Disable(level);
290
291    /*
292     * Disable interrupt on device
293     */
294    irq->off(irq);
295
296    if (is_uimb_irq(irq->name)) {
297      /*
298       * disable interrupt at UIMB level
299       */
300      CPU_irq_disable_at_uimb (irq->name);
301    }
302    if (is_usiu_irq(irq->name)) {
303      /*
304       * disable interrupt at USIU level
305       */
306      CPU_irq_disable_at_usiu (irq->name);
307    }
308    if (is_proc_irq(irq->name)) {
309      /*
310       * disable exception at processor level
311       */
312    }   
313
314    /*
315     * restore the default irq value
316     */
317    rtems_hdl_tbl[irq->name] = default_rtems_entry;
318
319    _CPU_ISR_Enable(level);
320
321    return 1;
322}
323
324/*
325 * ---------------- RTEMS Global Irq Handler Mngt Routines ----------------
326 */
327
328int CPU_rtems_irq_mngt_set      (rtems_irq_global_settings* config)
329{
330    int i;
331    unsigned int level;
332
333   /*
334    * Store various code accelerators
335    */
336    internal_config             = config;
337    default_rtems_entry         = config->defaultEntry;
338    rtems_hdl_tbl               = config->irqHdlTbl;
339
340    _CPU_ISR_Disable(level);
341
342    /*
343     * Start with UIMB IRQ
344     */
345    for (i = CPU_UIMB_IRQ_MIN_OFFSET; i <= CPU_UIMB_IRQ_MAX_OFFSET ; i++) {
346      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
347        CPU_irq_enable_at_uimb (i);
348        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
349      }
350      else {
351        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
352        CPU_irq_disable_at_uimb (i);
353      }
354    }
355
356    /*
357     * Continue with USIU IRQ
358     * Set up internal tables used by rtems interrupt prologue
359     */
360    compute_USIU_IvectMask_from_prio ();
361
362    for (i = CPU_USIU_IRQ_MIN_OFFSET; i <= CPU_USIU_IRQ_MAX_OFFSET ; i++) {
363      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
364        CPU_irq_enable_at_usiu (i);
365        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
366      }
367      else {
368        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
369        CPU_irq_disable_at_usiu (i);
370       }
371    }
372
373    /*
374     * Enable all UIMB interrupt lines, then enable at USIU.
375     */
376    imb.uimb.umcr |= UIMB_UMCR_IRQMUX(3);
377    CPU_irq_enable_at_usiu (CPU_UIMB_INTERRUPT);
378
379    /*
380     * finish with Processor exceptions handled like IRQ
381     */
382    for (i = CPU_PROC_IRQ_MIN_OFFSET; i <= CPU_PROC_IRQ_MAX_OFFSET; i++) {
383      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
384        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
385      }
386      else {
387        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
388      }
389    }
390    _CPU_ISR_Enable(level);
391    return 1;
392}
393
394int CPU_rtems_irq_mngt_get(rtems_irq_global_settings** config)
395{
396    *config = internal_config;
397    return 0;
398}
399
400
401/*
402 * High level IRQ handler called from shared_raw_irq_code_entry
403 */
404void C_dispatch_irq_handler (CPU_Interrupt_frame *frame, unsigned int excNum)
405{
406  register unsigned int irq;
407  register unsigned uimbIntr;                 /* boolean */
408  register unsigned oldMask;                  /* old siu pic masks */
409  register unsigned msr;
410  register unsigned new_msr;
411
412  /*
413   * Handle decrementer interrupt
414   */
415  if (excNum == ASM_DEC_VECTOR) {
416    _CPU_MSR_GET(msr);
417    new_msr = msr | MSR_EE;
418    _CPU_MSR_SET(new_msr);
419   
420    rtems_hdl_tbl[CPU_DECREMENTER].hdl();
421
422    _CPU_MSR_SET(msr);
423    return;
424  }
425
426  /*
427   * Handle external interrupt generated by USIU on PPC core
428   */
429  while ((ppc_cached_irq_mask & usiu.sipend) != 0) {
430    irq = (usiu.sivec >> 26);
431    uimbIntr = (irq == CPU_UIMB_INTERRUPT);
432    /*
433     * Disable the interrupt of the same and lower priority.
434     */
435    oldMask = ppc_cached_irq_mask;
436    ppc_cached_irq_mask = oldMask & USIU_IvectMask[irq];
437    usiu.simask = ppc_cached_irq_mask;
438    /*
439     * Acknowledge current interrupt. This has no effect on internal level
440     * interrupts.
441     */
442    usiu.sipend = (1 << (31 - irq));
443   
444    if (uimbIntr)  {
445      /*
446       * Look at the bits set in the UIMB interrupt-pending register.  The
447       * highest-order set bit indicates the handler we will run.
448       *
449       * Unfortunately, we can't easily mask individual UIMB interrupts
450       * unless they use USIU levels 0 to 6, so we must mask all low-level
451       * (level > 7) UIMB interrupts while we service any interrupt.
452       */
453      int uipend = imb.uimb.uipend << 8;
454     
455      if (uipend == 0) {        /* spurious interrupt?  use last vector */
456        irq = CPU_UIMB_IRQ_MAX_OFFSET; 
457      }
458      else {
459        irq = CPU_UIMB_IRQ_MIN_OFFSET;
460        for ( ; (uipend & 0x8000000) == 0; uipend <<= 1) {
461          irq++;
462        }
463      }
464    }
465    _CPU_MSR_GET(msr);
466    new_msr = msr | MSR_EE;
467    _CPU_MSR_SET(new_msr);
468   
469    rtems_hdl_tbl[irq].hdl();
470
471    _CPU_MSR_SET(msr);
472
473    ppc_cached_irq_mask = oldMask;
474    usiu.simask = ppc_cached_irq_mask;
475  }
476}
477 
478void _ThreadProcessSignalsFromIrq (CPU_Exception_frame* ctx)
479{
480  /*
481   * Process pending signals that have not already been
482   * processed by _Thread_Displatch. This happens quite
483   * unfrequently : the ISR must have posted an action
484   * to the current running thread.
485   */
486  if ( _Thread_Do_post_task_switch_extension ||
487       _Thread_Executing->do_post_task_switch_extension ) {
488    _Thread_Executing->do_post_task_switch_extension = FALSE;
489    _API_extensions_Run_postswitch();
490  }
491  /*
492   * I plan to process other thread related events here.
493   * This will include DEBUG session requested from keyboard...
494   */
495}
Note: See TracBrowser for help on using the repository browser.