source: rtems/bsps/powerpc/ss555/start/irq.c @ 511dc4b

5
Last change on this file since 511dc4b was 09dd82a5, checked in by Sebastian Huber <sebastian.huber@…>, on 03/13/18 at 15:43:25

bsp/ss555: Move libcpu content to bsps

This patch is a part of the BSP source reorganization.

Update #3285.

  • Property mode set to 100644
File size: 11.6 KB
Line 
1/*
2 *  This file contains the implementation of the function described in irq.h
3 */
4
5/*
6 *  MPC5xx port sponsored by Defence Research and Development Canada - Suffield
7 *  Copyright (C) 2004, Real-Time Systems Inc. (querbach@realtime.bc.ca)
8 *
9 *  Derived from libbsp/powerpc/mbx8xx/irq/irq.c:
10 *
11 *  Copyright (C) 1998, 1999 valette@crf.canon.fr
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#include <rtems.h>
19#include <mpc5xx.h>
20#include <libcpu/vectors.h>
21#include <libcpu/raw_exception.h>
22#include <libcpu/irq.h>
23#include <bsp/irq.h>
24
25/*
26 * Convert an rtems_irq_number constant to an interrupt level
27 * suitable for programming into an I/O device's interrupt level field.
28 */
29int CPU_irq_level_from_symbolic_name(const rtems_irq_number name)
30{
31  if (CPU_USIU_EXT_IRQ_0 <= name && name <= CPU_USIU_INT_IRQ_7)
32    return (name - CPU_USIU_EXT_IRQ_0) / 2;
33
34  if (CPU_UIMB_IRQ_8 <= name && name <= CPU_UIMB_IRQ_31)
35    return 8 + (name - CPU_UIMB_IRQ_8);
36
37  return 31;                    /* reasonable default */
38}
39
40/*
41 * default handler connected on each irq after bsp initialization
42 */
43static rtems_irq_connect_data           default_rtems_entry;
44
45/*
46 * location used to store initial tables used for interrupt
47 * management.
48 */
49static rtems_irq_global_settings*       internal_config;
50static rtems_irq_connect_data*          rtems_hdl_tbl;
51
52/*
53 * Check if symbolic IRQ name is an USIU IRQ
54 */
55static inline int is_usiu_irq(const rtems_irq_number irqLine)
56{
57  return (((int) irqLine <= CPU_USIU_IRQ_MAX_OFFSET) &&
58          ((int) irqLine >= CPU_USIU_IRQ_MIN_OFFSET)
59         );
60}
61
62/*
63 * Check if symbolic IRQ name is an UIMB IRQ
64 */
65static inline int is_uimb_irq(const rtems_irq_number irqLine)
66{
67  return (((int) irqLine <= CPU_UIMB_IRQ_MAX_OFFSET) &&
68          ((int) irqLine >= CPU_UIMB_IRQ_MIN_OFFSET)
69         );
70}
71
72/*
73 * Check if symbolic IRQ name is a Processor IRQ
74 */
75static inline int is_proc_irq(const rtems_irq_number irqLine)
76{
77  return (((int) irqLine <= CPU_PROC_IRQ_MAX_OFFSET) &&
78          ((int) irqLine >= CPU_PROC_IRQ_MIN_OFFSET)
79         );
80}
81
82
83/*
84 * Masks used to mask off the interrupts. For exmaple, for ILVL2, the
85 * mask is used to mask off interrupts ILVL2, IRQ3, ILVL3, ... IRQ7
86 * and ILVL7.
87 *
88 */
89const static unsigned int USIU_IvectMask[CPU_USIU_IRQ_COUNT] =
90{
91  0,                            /* external IRQ 0 */
92  0xFFFFFFFF << 31,             /* internal level 0 */
93  0xFFFFFFFF << 30,             /* external IRQ 1 */
94  0xFFFFFFFF << 29,             /* internal level 1 */
95  0xFFFFFFFF << 28,             /* external IRQ 2 */
96  0xFFFFFFFF << 27,             /* internal level 2 */
97  0xFFFFFFFF << 26,             /* external IRQ 3 */
98  0xFFFFFFFF << 25,             /* internal level 3 */
99  0xFFFFFFFF << 24,             /* external IRQ 4 */
100  0xFFFFFFFF << 23,             /* internal level 4 */
101  0xFFFFFFFF << 22,             /* external IRQ 5 */
102  0xFFFFFFFF << 21,             /* internal level 5 */
103  0xFFFFFFFF << 20,             /* external IRQ 6 */
104  0xFFFFFFFF << 19,             /* internal level 6 */
105  0xFFFFFFFF << 18,             /* external IRQ 7 */
106  0xFFFFFFFF << 17              /* internal level 7 */
107};
108
109
110/*
111 * ------------------------ RTEMS Irq helper functions ----------------
112 */
113
114/*
115 * Caution : this function assumes the variable "internal_config"
116 * is already set and that the tables it contains are still valid
117 * and accessible.
118 */
119static void compute_USIU_IvectMask_from_prio (void)
120{
121  /*
122   * In theory this is feasible. No time to code it yet. See i386/shared/irq.c
123   * for an example based on 8259 controller mask. The actual masks defined
124   * correspond to the priorities defined for the USIU in irq_init.c.
125   */
126}
127
128/*
129 * This function check that the value given for the irq line
130 * is valid.
131 */
132static int isValidInterrupt(int irq)
133{
134  if ( (irq < CPU_MIN_OFFSET) || (irq > CPU_MAX_OFFSET)
135        || (irq == CPU_UIMB_INTERRUPT) )
136    return 0;
137  return 1;
138}
139
140static int CPU_irq_enable_at_uimb(const rtems_irq_number irqLine)
141{
142  if (!is_uimb_irq(irqLine))
143    return 1;
144  return 0;
145}
146
147static int CPU_irq_disable_at_uimb(const rtems_irq_number irqLine)
148{
149  if (!is_uimb_irq(irqLine))
150    return 1;
151  return 0;
152}
153
154static int CPU_irq_enable_at_usiu(const rtems_irq_number irqLine)
155{
156  int usiu_irq_index;
157
158  if (!is_usiu_irq(irqLine))
159    return 1;
160
161  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
162  ppc_cached_irq_mask |= (1 << (31-usiu_irq_index));
163  usiu.simask = ppc_cached_irq_mask;
164
165  return 0;
166}
167
168static int CPU_irq_disable_at_usiu(const rtems_irq_number irqLine)
169{
170  int usiu_irq_index;
171
172  if (!is_usiu_irq(irqLine))
173    return 1;
174
175  usiu_irq_index = ((int) (irqLine) - CPU_USIU_IRQ_MIN_OFFSET);
176  ppc_cached_irq_mask &= ~(1 << (31-usiu_irq_index));
177  usiu.simask = ppc_cached_irq_mask;
178
179  return 0;
180}
181
182/*
183 * --------------- RTEMS Single Irq Handler Mngt Routines ----------------
184 */
185
186int CPU_install_rtems_irq_handler       (const rtems_irq_connect_data* irq)
187{
188    rtems_interrupt_level       level;
189
190    if (!isValidInterrupt(irq->name)) {
191      return 0;
192    }
193    /*
194     * Check if default handler is actually connected. If not issue an error.
195     * You must first get the current handler via CPU_get_current_idt_entry
196     * and then disconnect it using CPU_delete_idt_entry.
197     * RATIONALE : to always have the same transition by forcing the user
198     * to get the previous handler before accepting to disconnect.
199     */
200    if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
201      return 0;
202    }
203
204    rtems_interrupt_disable(level);
205
206    /*
207     * store the data provided by user
208     */
209    rtems_hdl_tbl[irq->name] = *irq;
210
211    if (is_uimb_irq(irq->name)) {
212      /*
213       * Enable interrupt at UIMB level
214       */
215      CPU_irq_enable_at_uimb (irq->name);
216    }
217
218    if (is_usiu_irq(irq->name)) {
219      /*
220       * Enable interrupt at USIU level
221       */
222      CPU_irq_enable_at_usiu (irq->name);
223    }
224
225    if (is_proc_irq(irq->name)) {
226      /*
227       * Should Enable exception at processor level but not needed.  Will restore
228       * EE flags at the end of the routine anyway.
229       */
230    }
231    /*
232     * Enable interrupt on device
233     */
234        if (irq->on)
235        irq->on(irq);
236
237    rtems_interrupt_enable(level);
238
239    return 1;
240}
241
242
243int CPU_get_current_rtems_irq_handler   (rtems_irq_connect_data* irq)
244{
245     if (!isValidInterrupt(irq->name)) {
246       return 0;
247     }
248     *irq = rtems_hdl_tbl[irq->name];
249     return 1;
250}
251
252int CPU_remove_rtems_irq_handler  (const rtems_irq_connect_data* irq)
253{
254    rtems_interrupt_level       level;
255
256    if (!isValidInterrupt(irq->name)) {
257      return 0;
258    }
259    /*
260     * Check if default handler is actually connected. If not issue an error.
261     * You must first get the current handler via CPU_get_current_idt_entry
262     * and then disconnect it using CPU_delete_idt_entry.
263     * RATIONALE : to always have the same transition by forcing the user
264     * to get the previous handler before accepting to disconnect.
265     */
266    if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
267      return 0;
268    }
269    rtems_interrupt_disable(level);
270
271    /*
272     * Disable interrupt on device
273     */
274        if (irq->off)
275        irq->off(irq);
276
277    if (is_uimb_irq(irq->name)) {
278      /*
279       * disable interrupt at UIMB level
280       */
281      CPU_irq_disable_at_uimb (irq->name);
282    }
283    if (is_usiu_irq(irq->name)) {
284      /*
285       * disable interrupt at USIU level
286       */
287      CPU_irq_disable_at_usiu (irq->name);
288    }
289    if (is_proc_irq(irq->name)) {
290      /*
291       * disable exception at processor level
292       */
293    }
294
295    /*
296     * restore the default irq value
297     */
298    rtems_hdl_tbl[irq->name] = default_rtems_entry;
299
300    rtems_interrupt_enable(level);
301
302    return 1;
303}
304
305/*
306 * ---------------- RTEMS Global Irq Handler Mngt Routines ----------------
307 */
308
309int CPU_rtems_irq_mngt_set      (rtems_irq_global_settings* config)
310{
311    int                    i;
312    rtems_interrupt_level  level;
313
314   /*
315    * Store various code accelerators
316    */
317    internal_config             = config;
318    default_rtems_entry         = config->defaultEntry;
319    rtems_hdl_tbl               = config->irqHdlTbl;
320
321    rtems_interrupt_disable(level);
322
323    /*
324     * Start with UIMB IRQ
325     */
326    for (i = CPU_UIMB_IRQ_MIN_OFFSET; i <= CPU_UIMB_IRQ_MAX_OFFSET ; i++) {
327      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
328        CPU_irq_enable_at_uimb (i);
329        if (rtems_hdl_tbl[i].on)
330                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
331      }
332      else {
333        if (rtems_hdl_tbl[i].off)
334                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
335        CPU_irq_disable_at_uimb (i);
336      }
337    }
338
339    /*
340     * Continue with USIU IRQ
341     * Set up internal tables used by rtems interrupt prologue
342     */
343    compute_USIU_IvectMask_from_prio ();
344
345    for (i = CPU_USIU_IRQ_MIN_OFFSET; i <= CPU_USIU_IRQ_MAX_OFFSET ; i++) {
346      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
347        CPU_irq_enable_at_usiu (i);
348        if (rtems_hdl_tbl[i].on)
349                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
350      }
351      else {
352        if (rtems_hdl_tbl[i].off)
353                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
354        CPU_irq_disable_at_usiu (i);
355       }
356    }
357
358    /*
359     * Enable all UIMB interrupt lines, then enable at USIU.
360     */
361    imb.uimb.umcr |= UIMB_UMCR_IRQMUX(3);
362    CPU_irq_enable_at_usiu (CPU_UIMB_INTERRUPT);
363
364    /*
365     * finish with Processor exceptions handled like IRQ
366     */
367    for (i = CPU_PROC_IRQ_MIN_OFFSET; i <= CPU_PROC_IRQ_MAX_OFFSET; i++) {
368      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
369        if (rtems_hdl_tbl[i].on)
370                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
371      }
372      else {
373        if (rtems_hdl_tbl[i].off)
374                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
375      }
376    }
377    rtems_interrupt_enable(level);
378    return 1;
379}
380
381int CPU_rtems_irq_mngt_get(rtems_irq_global_settings** config)
382{
383    *config = internal_config;
384    return 0;
385}
386
387
388/*
389 * High level IRQ handler called from shared_raw_irq_code_entry
390 */
391void C_dispatch_irq_handler (MPC5XX_Interrupt_frame *frame, unsigned int excNum)
392{
393  register unsigned int irq;
394  register unsigned uimbIntr;                 /* boolean */
395  register unsigned oldMask;                  /* old siu pic masks */
396  register unsigned msr;
397  register unsigned new_msr;
398
399  /*
400   * Handle decrementer interrupt
401   */
402  if (excNum == ASM_DEC_VECTOR) {
403    _CPU_MSR_GET(msr);
404    new_msr = msr | MSR_EE;
405    _CPU_MSR_SET(new_msr);
406
407    rtems_hdl_tbl[CPU_DECREMENTER].hdl(rtems_hdl_tbl[CPU_DECREMENTER].handle);
408
409    _CPU_MSR_SET(msr);
410    return;
411  }
412
413  /*
414   * Handle external interrupt generated by USIU on PPC core
415   */
416  while ((ppc_cached_irq_mask & usiu.sipend) != 0) {
417    irq = (usiu.sivec >> 26);
418    uimbIntr = (irq == CPU_UIMB_INTERRUPT);
419    /*
420     * Disable the interrupt of the same and lower priority.
421     */
422    oldMask = ppc_cached_irq_mask;
423    ppc_cached_irq_mask = oldMask & USIU_IvectMask[irq];
424    usiu.simask = ppc_cached_irq_mask;
425    /*
426     * Acknowledge current interrupt. This has no effect on internal level
427     * interrupts.
428     */
429    usiu.sipend = (1 << (31 - irq));
430
431    if (uimbIntr)  {
432      /*
433       * Look at the bits set in the UIMB interrupt-pending register.  The
434       * highest-order set bit indicates the handler we will run.
435       *
436       * Unfortunately, we can't easily mask individual UIMB interrupts
437       * unless they use USIU levels 0 to 6, so we must mask all low-level
438       * (level > 7) UIMB interrupts while we service any interrupt.
439       */
440      int uipend = imb.uimb.uipend << 8;
441
442      if (uipend == 0) {        /* spurious interrupt?  use last vector */
443        irq = CPU_UIMB_IRQ_MAX_OFFSET;
444      }
445      else {
446        irq = CPU_UIMB_IRQ_MIN_OFFSET;
447        for ( ; (uipend & 0x8000000) == 0; uipend <<= 1) {
448          irq++;
449        }
450      }
451    }
452    _CPU_MSR_GET(msr);
453    new_msr = msr | MSR_EE;
454    _CPU_MSR_SET(new_msr);
455
456    rtems_hdl_tbl[irq].hdl(rtems_hdl_tbl[irq].handle);
457
458    _CPU_MSR_SET(msr);
459
460    ppc_cached_irq_mask = oldMask;
461    usiu.simask = ppc_cached_irq_mask;
462  }
463}
Note: See TracBrowser for help on using the repository browser.