source: rtems/c/src/lib/libbsp/powerpc/mbx8xx/irq/irq.c @ bc98089

4.104.115
Last change on this file since bc98089 was 39d08d55, checked in by Ralf Corsepius <ralf.corsepius@…>, on 09/06/08 at 17:36:55

Convert to "bool".

  • Property mode set to 100644
File size: 13.5 KB
Line 
1/*
2 *
3 *  This file contains the implementation of the function described in irq.h
4 *
5 *  Copyright (C) 1998, 1999 valette@crf.canon.fr
6 *
7 *  The license and distribution terms for this file may be
8 *  found in found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  $Id$
12 */
13
14#include <rtems/system.h>
15#include <bsp.h>
16#include <bsp/irq.h>
17#include <rtems/score/thread.h>
18#include <rtems/score/apiext.h>
19#include <libcpu/raw_exception.h>
20#include <bsp/vectors.h>
21#include <bsp/8xx_immap.h>
22#include <bsp/mbx.h>
23#include <bsp/commproc.h>
24
25/*
26 * default handler connected on each irq after bsp initialization
27 */
28static rtems_irq_connect_data   default_rtems_entry;
29
30/*
31 * location used to store initial tables used for interrupt
32 * management.
33 */
34static rtems_irq_global_settings*       internal_config;
35static rtems_irq_connect_data*          rtems_hdl_tbl;
36
37/*
38 * Check if symbolic IRQ name is an SIU IRQ
39 */
40static inline int is_siu_irq(const rtems_irq_number irqLine)
41{
42  return (((int) irqLine <= BSP_SIU_IRQ_MAX_OFFSET) &
43          ((int) irqLine >= BSP_SIU_IRQ_LOWEST_OFFSET)
44         );
45}
46
47/*
48 * Check if symbolic IRQ name is an CPM IRQ
49 */
50static inline int is_cpm_irq(const rtems_irq_number irqLine)
51{
52  return (((int) irqLine <= BSP_CPM_IRQ_MAX_OFFSET) &
53          ((int) irqLine >= BSP_CPM_IRQ_LOWEST_OFFSET)
54         );
55}
56
57/*
58 * Check if symbolic IRQ name is a Processor IRQ
59 */
60static inline int is_processor_irq(const rtems_irq_number irqLine)
61{
62  return (((int) irqLine <= BSP_PROCESSOR_IRQ_MAX_OFFSET) &
63          ((int) irqLine >= BSP_PROCESSOR_IRQ_LOWEST_OFFSET)
64         );
65}
66
67/*
68 * masks used to mask off the interrupts. For exmaple, for ILVL2, the
69 * mask is used to mask off interrupts ILVL2, IRQ3, ILVL3, ... IRQ7
70 * and ILVL7.
71 *
72 */
73const static unsigned int SIU_IvectMask[BSP_SIU_IRQ_NUMBER] =
74{
75     /* IRQ0      ILVL0       IRQ1        ILVL1  */
76     0x00000000, 0x80000000, 0xC0000000, 0xE0000000,
77
78     /* IRQ2      ILVL2       IRQ3        ILVL3  */
79     0xF0000000, 0xF8000000, 0xFC000000, 0xFE000000,
80
81     /* IRQ4      ILVL4       IRQ5        ILVL5  */
82     0xFF000000, 0xFF800000, 0xFFC00000, 0xFFE00000,
83
84     /* IRQ6      ILVL6       IRQ7        ILVL7  */
85     0xFFF00000, 0xFFF80000, 0xFFFC0000, 0xFFFE0000
86};
87
88/*
89 * ------------------------ RTEMS Irq helper functions ----------------
90 */
91
92/*
93 * Caution : this function assumes the variable "internal_config"
94 * is already set and that the tables it contains are still valid
95 * and accessible.
96 */
97static void compute_SIU_IvectMask_from_prio (void)
98{
99  /*
100   * In theory this is feasible. No time to code it yet. See i386/shared/irq.c
101   * for an example based on 8259 controller mask. The actual masks defined
102   * correspond to the priorities defined for the SIU in irq_init.c.
103   */
104}
105
106/*
107 * This function check that the value given for the irq line
108 * is valid.
109 */
110
111static int isValidInterrupt(int irq)
112{
113  if ( (irq < BSP_LOWEST_OFFSET) || (irq > BSP_MAX_OFFSET) || (irq == BSP_CPM_INTERRUPT) )
114    return 0;
115  return 1;
116}
117
118int BSP_irq_enable_at_cpm(const rtems_irq_number irqLine)
119{
120  int cpm_irq_index;
121
122  if (!is_cpm_irq(irqLine))
123    return 1;
124
125  cpm_irq_index = ((int) (irqLine) - BSP_CPM_IRQ_LOWEST_OFFSET);
126  ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr |= (1 << cpm_irq_index);
127
128  return 0;
129}
130
131int BSP_irq_disable_at_cpm(const rtems_irq_number irqLine)
132{
133  int cpm_irq_index;
134
135  if (!is_cpm_irq(irqLine))
136    return 1;
137
138  cpm_irq_index = ((int) (irqLine) - BSP_CPM_IRQ_LOWEST_OFFSET);
139  ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr &= ~(1 << cpm_irq_index);
140
141  return 0;
142}
143
144int BSP_irq_enabled_at_cpm(const rtems_irq_number irqLine)
145{
146  int cpm_irq_index;
147
148  if (!is_cpm_irq(irqLine))
149    return 0;
150
151  cpm_irq_index = ((int) (irqLine) - BSP_CPM_IRQ_LOWEST_OFFSET);
152  return (((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr & (1 << cpm_irq_index));
153}
154
155int BSP_irq_enable_at_siu(const rtems_irq_number irqLine)
156{
157  int siu_irq_index;
158
159  if (!is_siu_irq(irqLine))
160    return 1;
161
162  siu_irq_index = ((int) (irqLine) - BSP_SIU_IRQ_LOWEST_OFFSET);
163  ppc_cached_irq_mask |= (1 << (31-siu_irq_index));
164  ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = ppc_cached_irq_mask;
165
166  return 0;
167}
168
169int BSP_irq_disable_at_siu(const rtems_irq_number irqLine)
170{
171  int siu_irq_index;
172
173  if (!is_siu_irq(irqLine))
174    return 1;
175
176  siu_irq_index = ((int) (irqLine) - BSP_SIU_IRQ_LOWEST_OFFSET);
177  ppc_cached_irq_mask &= ~(1 << (31-siu_irq_index));
178  ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = ppc_cached_irq_mask;
179
180  return 0;
181}
182
183int BSP_irq_enabled_at_siu      (const rtems_irq_number irqLine)
184{
185  int siu_irq_index;
186
187  if (!is_siu_irq(irqLine))
188    return 0;
189
190  siu_irq_index = ((int) (irqLine) - BSP_SIU_IRQ_LOWEST_OFFSET);
191  return ppc_cached_irq_mask & (1 << (31-siu_irq_index));
192}
193
194/*
195 * ------------------------ RTEMS Single Irq Handler Mngt Routines ----------------
196 */
197
198int BSP_install_rtems_irq_handler  (const rtems_irq_connect_data* irq)
199{
200    rtems_interrupt_level       level;
201
202    if (!isValidInterrupt(irq->name)) {
203      return 0;
204    }
205    /*
206     * Check if default handler is actually connected. If not issue an error.
207     * You must first get the current handler via i386_get_current_idt_entry
208     * and then disconnect it using i386_delete_idt_entry.
209     * RATIONALE : to always have the same transition by forcing the user
210     * to get the previous handler before accepting to disconnect.
211     */
212    if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
213      return 0;
214    }
215
216    rtems_interrupt_disable(level);
217
218    /*
219     * store the data provided by user
220     */
221    rtems_hdl_tbl[irq->name] = *irq;
222
223    if (is_cpm_irq(irq->name)) {
224      /*
225       * Enable interrupt at PIC level
226       */
227      BSP_irq_enable_at_cpm (irq->name);
228    }
229
230    if (is_siu_irq(irq->name)) {
231      /*
232       * Enable interrupt at SIU level
233       */
234      BSP_irq_enable_at_siu (irq->name);
235    }
236
237    if (is_processor_irq(irq->name)) {
238      /*
239       * Should Enable exception at processor level but not needed.  Will restore
240       * EE flags at the end of the routine anyway.
241       */
242    }
243    /*
244     * Enable interrupt on device
245     */
246        if (irq->on)
247        irq->on(irq);
248
249    rtems_interrupt_enable(level);
250
251    return 1;
252}
253
254int BSP_get_current_rtems_irq_handler   (rtems_irq_connect_data* irq)
255{
256     if (!isValidInterrupt(irq->name)) {
257      return 0;
258     }
259     *irq = rtems_hdl_tbl[irq->name];
260     return 1;
261}
262
263int BSP_remove_rtems_irq_handler  (const rtems_irq_connect_data* irq)
264{
265    rtems_interrupt_level       level;
266
267    if (!isValidInterrupt(irq->name)) {
268      return 0;
269    }
270    /*
271     * Check if default handler is actually connected. If not issue an error.
272     * You must first get the current handler via i386_get_current_idt_entry
273     * and then disconnect it using i386_delete_idt_entry.
274     * RATIONALE : to always have the same transition by forcing the user
275     * to get the previous handler before accepting to disconnect.
276     */
277    if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
278      return 0;
279    }
280    rtems_interrupt_disable(level);
281
282    if (is_cpm_irq(irq->name)) {
283      /*
284       * disable interrupt at PIC level
285       */
286      BSP_irq_disable_at_cpm (irq->name);
287    }
288    if (is_siu_irq(irq->name)) {
289      /*
290       * disable interrupt at OPENPIC level
291       */
292      BSP_irq_disable_at_siu (irq->name);
293    }
294    if (is_processor_irq(irq->name)) {
295      /*
296       * disable exception at processor level
297       */
298    }
299
300    /*
301     * Disable interrupt on device
302     */
303        if (irq->off)
304        irq->off(irq);
305
306    /*
307     * restore the default irq value
308     */
309    rtems_hdl_tbl[irq->name] = default_rtems_entry;
310
311    rtems_interrupt_enable(level);
312
313    return 1;
314}
315
316/*
317 * ------------------------ RTEMS Global Irq Handler Mngt Routines ----------------
318 */
319
320int BSP_rtems_irq_mngt_set(rtems_irq_global_settings* config)
321{
322    int                    i;
323    rtems_interrupt_level  level;
324
325    /*
326     * Store various code accelerators
327     */
328    internal_config             = config;
329    default_rtems_entry         = config->defaultEntry;
330    rtems_hdl_tbl               = config->irqHdlTbl;
331
332    rtems_interrupt_disable(level);
333    /*
334     * start with CPM IRQ
335     */
336    for (i=BSP_CPM_IRQ_LOWEST_OFFSET; i < BSP_CPM_IRQ_LOWEST_OFFSET + BSP_CPM_IRQ_NUMBER ; i++) {
337      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
338        BSP_irq_enable_at_cpm (i);
339        if (rtems_hdl_tbl[i].on)
340                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
341      }
342      else {
343        if (rtems_hdl_tbl[i].off)
344                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
345        BSP_irq_disable_at_cpm (i);
346      }
347    }
348
349    /*
350     * continue with PCI IRQ
351     */
352    /*
353     * set up internal tables used by rtems interrupt prologue
354     */
355    compute_SIU_IvectMask_from_prio ();
356
357    for (i=BSP_SIU_IRQ_LOWEST_OFFSET; i < BSP_SIU_IRQ_LOWEST_OFFSET + BSP_SIU_IRQ_NUMBER ; i++) {
358      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
359        BSP_irq_enable_at_siu (i);
360        if (rtems_hdl_tbl[i].on)
361                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
362      }
363      else {
364        if (rtems_hdl_tbl[i].off)
365                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
366        BSP_irq_disable_at_siu (i);
367       }
368    }
369    /*
370     * Must enable CPM interrupt on SIU. CPM on SIU Interrupt level has already been
371     * set up in BSP_CPM_irq_init.
372     */
373    ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr |= CICR_IEN;
374    BSP_irq_enable_at_siu (BSP_CPM_INTERRUPT);
375    /*
376     * finish with Processor exceptions handled like IRQ
377     */
378    for (i=BSP_PROCESSOR_IRQ_LOWEST_OFFSET; i < BSP_PROCESSOR_IRQ_LOWEST_OFFSET + BSP_PROCESSOR_IRQ_NUMBER; i++) {
379      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
380        if (rtems_hdl_tbl[i].on)
381                rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
382      }
383      else {
384        if (rtems_hdl_tbl[i].off)
385                rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
386      }
387    }
388    rtems_interrupt_enable(level);
389    return 1;
390}
391
392int BSP_rtems_irq_mngt_get(rtems_irq_global_settings** config)
393{
394    *config = internal_config;
395    return 0;
396}
397
398#ifdef DISPATCH_HANDLER_STAT
399volatile unsigned int maxLoop = 0;
400#endif
401
402/*
403 * High level IRQ handler called from shared_raw_irq_code_entry
404 */
405int C_dispatch_irq_handler (CPU_Interrupt_frame *frame, unsigned int excNum)
406{
407  register unsigned int irq;
408  register unsigned cpmIntr;                  /* boolean */
409  register unsigned oldMask;                  /* old siu pic masks */
410  register unsigned msr;
411  register unsigned new_msr;
412#ifdef DISPATCH_HANDLER_STAT
413  unsigned loopCounter;
414#endif
415  /*
416   * Handle decrementer interrupt
417   */
418  if (excNum == ASM_DEC_VECTOR) {
419    _CPU_MSR_GET(msr);
420    new_msr = msr | MSR_EE;
421    _CPU_MSR_SET(new_msr);
422
423    rtems_hdl_tbl[BSP_DECREMENTER].hdl(rtems_hdl_tbl[BSP_DECREMENTER].handle);
424
425    _CPU_MSR_SET(msr);
426    return 0;
427  }
428  /*
429   * Handle external interrupt generated by SIU on PPC core
430   */
431#ifdef DISPATCH_HANDLER_STAT
432  loopCounter = 0;
433#endif
434  while (1) {
435    if ((ppc_cached_irq_mask & ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend) == 0) {
436#ifdef DISPATCH_HANDLER_STAT
437      if (loopCounter >  maxLoop) maxLoop = loopCounter;
438#endif
439      break;
440    }
441    irq = (((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec >> 26);
442    cpmIntr = (irq == BSP_CPM_INTERRUPT);
443    /*
444     * Disable the interrupt of the same and lower priority.
445     */
446    oldMask = ppc_cached_irq_mask;
447    ppc_cached_irq_mask = oldMask & SIU_IvectMask[irq];
448    ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = ppc_cached_irq_mask;
449    /*
450     * Acknowledge current interrupt. This has no effect on internal level interrupt.
451     */
452    ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend = (1 << (31 - irq));
453
454    if (cpmIntr)  {
455      /*
456       * We will reenable the SIU CPM interrupt to allow nesting of CPM interrupt.
457       * We must before acknowledege the current irq at CPM level to avoid trigerring
458       * the interrupt again.
459       */
460      /*
461       * Acknowledge and get the vector.
462       */
463      ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr = 1;
464      irq = (((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr >> 11);
465      /*
466       * transform IRQ to normalized irq table index.
467       */
468      irq += BSP_CPM_IRQ_LOWEST_OFFSET;
469      /*
470       * Unmask CPM interrupt at SIU level
471       */
472      ppc_cached_irq_mask |= (1 << (31 - BSP_CPM_INTERRUPT));
473      ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = ppc_cached_irq_mask;
474    }
475    /*
476     * make sure, that the masking operations in
477     * ICTL and MSR are executed in order
478     */
479    asm volatile("sync":::"memory");
480
481    _CPU_MSR_GET(msr);
482    new_msr = msr | MSR_EE;
483    _CPU_MSR_SET(new_msr);
484
485    rtems_hdl_tbl[irq].hdl(rtems_hdl_tbl[irq].handle);
486
487    _CPU_MSR_SET(msr);
488
489    /*
490     * make sure, that the masking operations in
491     * ICTL and MSR are executed in order
492     */
493    asm volatile("sync":::"memory");
494
495    if (cpmIntr)  {
496      irq -= BSP_CPM_IRQ_LOWEST_OFFSET;
497      ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr = (1 << irq);
498    }
499    ppc_cached_irq_mask = oldMask;
500    ((volatile immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = ppc_cached_irq_mask;
501#ifdef DISPATCH_HANDLER_STAT
502    ++ loopCounter;
503#endif
504  }
505  return 0;
506}
507
508void _ThreadProcessSignalsFromIrq (BSP_Exception_frame* ctx)
509{
510  /*
511   * Process pending signals that have not already been
512   * processed by _Thread_Displatch. This happens quite
513   * unfrequently : the ISR must have posted an action
514   * to the current running thread.
515   */
516  if ( _Thread_Do_post_task_switch_extension ||
517       _Thread_Executing->do_post_task_switch_extension ) {
518    _Thread_Executing->do_post_task_switch_extension = false;
519    _API_extensions_Run_postswitch();
520  }
521  /*
522   * I plan to process other thread related events here.
523   * This will include DEBUG session requested from keyboard...
524   */
525}
Note: See TracBrowser for help on using the repository browser.