source: rtems/c/src/lib/libbsp/powerpc/mvme5500/irq/irq.c @ dc7271f

4.104.114.84.95
Last change on this file since dc7271f was dc7271f, checked in by Joel Sherrill <joel.sherrill@…>, on 09/12/07 at 15:16:32

2007-09-12 Joel Sherrill <joel.sherrill@…>

PR 1257/bsps

  • irq/GT64260Int.c, irq/irq.c: Code outside of cpukit should use the public API for rtems_interrupt_disable/rtems_interrupt_enable. By bypassing the public API and directly accessing _CPU_ISR_Disable and _CPU_ISR_Enable, they were bypassing the compiler memory barrier directive which could lead to problems. This patch also changes the type of the variable passed into these routines and addresses minor style issues.
  • Property mode set to 100644
File size: 14.8 KB
Line 
1/*  irq.c
2 *
3 *  This file contains the implementation of the function described in irq.h
4 *
5 *  Copyright (C) 1998, 1999 valette@crf.canon.fr
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  Special acknowledgement to Till Straumann <strauman@slac.stanford.edu>
12 *  for providing inputs to the IRQ handling and optimization.
13 *
14 *  Modified and added support for the MVME5500 board
15 *  Copyright 2003, 2004, Shuchen Kate Feng <feng1@bnl.gov>,
16 *                  NSLS,Brookhaven National Laboratory
17 *
18 */
19 
20#include <rtems/system.h>
21#include <bsp.h>
22#include <bsp/irq.h>
23#include <rtems/score/thread.h>
24#include <rtems/score/apiext.h>
25#include <libcpu/raw_exception.h>
26#include <libcpu/io.h>
27#include <bsp/vectors.h>
28
29#include <rtems/bspIo.h> /* for printk */
30#include "bsp/gtreg.h"
31
32#define HI_INT_CAUSE 0x40000000
33
34/*#define DEBUG*/
35
36int gpp_int_error =0;
37
38/*
39 * pointer to the mask representing the additionnal irq vectors
40 * that must be disabled when a particular entry is activated.
41 * They will be dynamically computed from teh prioruty table given
42 * in BSP_rtems_irq_mngt_set();
43 * CAUTION : this table is accessed directly by interrupt routine
44 *           prologue.
45 */
46static unsigned int irq_prio_maskLO_tbl[BSP_MAIN_IRQ_NUMBER];
47static unsigned int irq_prio_maskHI_tbl[BSP_MAIN_IRQ_NUMBER];
48
49/*
50 * default handler connected on each irq after bsp initialization
51 */
52static rtems_irq_connect_data   default_rtems_entry;
53
54/*
55 * location used to store initial tables used for interrupt
56 * management.
57 */
58static rtems_irq_global_settings*       internal_config;
59static rtems_irq_connect_data*          rtems_hdl_tbl;
60
61static unsigned int irqCAUSE[20], irqLOW[20], irqHIGH[20];
62static int irqIndex=0;
63
64/*
65 * Check if IRQ is a MAIN CPU internal IRQ
66 */
67static inline int is_main_irq(const rtems_irq_number irqLine)
68{
69  return (((int) irqLine <= BSP_MICH_IRQ_MAX_OFFSET) &
70          ((int) irqLine >= BSP_MICL_IRQ_LOWEST_OFFSET)
71         );
72}
73
74/*
75 * Check if IRQ is a GPP IRQ
76 */
77static inline int is_gpp_irq(const rtems_irq_number irqLine)
78{
79  return (((int) irqLine <= BSP_GPP_IRQ_MAX_OFFSET) &
80          ((int) irqLine >= BSP_GPP_IRQ_LOWEST_OFFSET)
81         );
82}
83
84/*
85 * Check if IRQ is a Porcessor IRQ
86 */
87static inline int is_processor_irq(const rtems_irq_number irqLine)
88{
89  return (((int) irqLine <= BSP_PROCESSOR_IRQ_MAX_OFFSET) &
90          ((int) irqLine >= BSP_PROCESSOR_IRQ_LOWEST_OFFSET)
91         );
92}
93
94#define GT_GPP_Int1_Cause GT_GPP_Interrupt_Cause+1
95#define GT_GPP_Int2_Cause GT_GPP_Interrupt_Cause+2
96#define GT_GPP_Int3_Cause GT_GPP_Interrupt_Cause+3
97
98void GT_GPP_IntHandler0()
99{
100
101  unsigned  gppCause, irqNum, bitNum;
102  int i, found=0;
103
104  gppCause = inb(GT_GPP_Interrupt_Cause) & GT_GPPirq_cache;
105
106  for (i=0; GPP7_0IrqTbl[i]!=-1;i++){
107    bitNum =GPP7_0IrqTbl[i];
108    if (gppCause & (1<<bitNum)) {
109        /* Clear the GPP interrupt cause bit */
110      outb( ~(1<<bitNum), GT_GPP_Interrupt_Cause);/* Till Straumann */
111        found = 1;
112        irqNum = bitNum+BSP_GPP_IRQ_LOWEST_OFFSET;
113        /* call the necessary interrupt handlers */
114        if (rtems_hdl_tbl[irqNum].hdl != default_rtems_entry.hdl)
115           rtems_hdl_tbl[irqNum].hdl(rtems_hdl_tbl[irqNum].handle);
116        else
117           gpp_int_error= bitNum; /*GPP interrupt bitNum not connected */
118    }
119  }
120  if ( !found) gpp_int_error = 33; /* spurious GPP interrupt  */
121}
122
123void GT_GPP_IntHandler1()
124{
125  unsigned  gppCause, irqNum, bitNum;
126  int i, found=0;
127
128  gppCause = inb(GT_GPP_Int1_Cause) & (GT_GPPirq_cache>>8);
129
130  for (i=0; GPP15_8IrqTbl[i]!=-1;i++){
131    bitNum =GPP15_8IrqTbl[i];
132    if (gppCause & (1<<bitNum)) {
133        /* Clear the GPP interrupt cause bit */
134        outb( ~(1<<bitNum), GT_GPP_Int1_Cause); /* Till Straumann */
135        found = 1;
136        irqNum = bitNum+BSP_GPP8_IRQ_OFFSET;
137        /* call the necessary interrupt handlers */
138        if (rtems_hdl_tbl[irqNum].hdl != default_rtems_entry.hdl)
139           rtems_hdl_tbl[irqNum].hdl(rtems_hdl_tbl[irqNum].handle);
140        else
141           gpp_int_error= bitNum+8; /*GPP interrupt bitNum not connected */
142    }
143  }
144  if ( !found) gpp_int_error = 33; /* spurious GPP interrupt  */
145}
146void GT_GPP_IntHandler2()
147{
148  unsigned  gppCause, irqNum, bitNum;
149  int i, found=0;
150
151  gppCause = inb(GT_GPP_Int2_Cause) & (GT_GPPirq_cache>>16);
152
153  for (i=0; GPP23_16IrqTbl[i]!=-1;i++){
154    bitNum =GPP23_16IrqTbl[i];
155    if (gppCause & (1<<bitNum)) {
156        /* Clear the GPP interrupt cause bit */
157        outb( ~(1<<bitNum), GT_GPP_Int2_Cause);
158        found = 1;
159        irqNum = bitNum+BSP_GPP16_IRQ_OFFSET;
160        /* call the necessary interrupt handlers */
161        if (rtems_hdl_tbl[irqNum].hdl != default_rtems_entry.hdl)
162            rtems_hdl_tbl[irqNum].hdl(rtems_hdl_tbl[irqNum].handle);
163        else
164           gpp_int_error= bitNum+16; /*GPP interrupt bitNum not connected */
165    }
166  }
167  if ( !found) gpp_int_error = 33; /* spurious GPP interrupt  */
168}
169
170void GT_GPP_IntHandler3()
171{
172  unsigned  gppCause, irqNum, bitNum;
173  int i, found=0;
174
175  gppCause = inb(GT_GPP_Int3_Cause) & (GT_GPPirq_cache>>24);
176
177  for (i=0; GPP31_24IrqTbl[i]!=-1;i++){
178    bitNum=GPP31_24IrqTbl[i];
179    if (gppCause & (1<<bitNum)) {
180        /* Clear the GPP interrupt cause bit */
181        outb(~(1<<bitNum), GT_GPP_Int3_Cause);
182        found = 1;
183        irqNum = bitNum+BSP_GPP24_IRQ_OFFSET;
184        /* call the necessary interrupt handlers */
185        if (rtems_hdl_tbl[irqNum].hdl != default_rtems_entry.hdl)
186            rtems_hdl_tbl[irqNum].hdl(rtems_hdl_tbl[irqNum].handle);
187        else
188           gpp_int_error= bitNum+24; /*GPP interrupt bitNum not connected */
189    }
190  }
191  if ( !found) gpp_int_error = 33; /* spurious GPP interrupt  */
192}
193
194/*
195 * ------------------------ RTEMS Irq helper functions ----------------
196 */
197 
198/*
199 * Caution : this function assumes the variable "internal_config"
200 * is already set and that the tables it contains are still valid
201 * and accessible.
202 */
203static void compute_GT64260int_masks_from_prio ()
204{
205  int i,j;
206  unsigned long long irq_prio_mask=0;
207
208  /*
209   * Always mask at least current interrupt to prevent re-entrance
210   */
211  for (i=0; i <BSP_MAIN_IRQ_NUMBER; i++) {
212    irq_prio_mask = (unsigned long long) (1LLU << i);
213    for (j = 0; j <BSP_MAIN_IRQ_NUMBER; j++) {
214      /*
215       * Mask interrupts at GT64260int level that have a lower priority
216       * or <Till Straumann> a equal priority.
217       */
218      if (internal_config->irqPrioTbl [i] >= internal_config->irqPrioTbl [j]) {
219         irq_prio_mask |= (unsigned long long)(1LLU << j);
220      }
221    }
222
223    irq_prio_maskLO_tbl[i] = irq_prio_mask & 0xffffffff;
224    irq_prio_maskHI_tbl[i] = (irq_prio_mask>>32) & 0xffffffff;
225#ifdef DEBUG
226    printk("irq_mask_prio_tbl[%d]:0x%8x%8x\n",i,irq_prio_maskHI_tbl[i],
227           irq_prio_maskLO_tbl[i]);
228#endif 
229  }
230}
231
232/*
233 * This function check that the value given for the irq line
234 * is valid.
235 */
236
237static int isValidInterrupt(int irq)
238{
239  if ( (irq < BSP_LOWEST_OFFSET) || (irq > BSP_MAX_OFFSET))
240    return 0;
241  return 1;
242}
243
244/*
245 * ------------------------ RTEMS Single Irq Handler Mngt Routines ----------------
246 */
247
248int BSP_install_rtems_irq_handler  (const rtems_irq_connect_data* irq)
249{
250    rtems_interrupt_level       level;
251 
252    if (!isValidInterrupt(irq->name)) {
253      printk("Invalid interrupt vector %d\n",irq->name);
254      return 0;
255    }
256    /*
257     * Check if default handler is actually connected. If not issue an error.
258     * You must first get the current handler via i386_get_current_idt_entry
259     * and then disconnect it using i386_delete_idt_entry.
260     * RATIONALE : to always have the same transition by forcing the user
261     * to get the previous handler before accepting to disconnect.
262     */
263    rtems_interrupt_disable(level);
264    if (rtems_hdl_tbl[irq->name].hdl != default_rtems_entry.hdl) {
265      rtems_interrupt_enable(level);
266      printk("IRQ vector %d already connected\n",irq->name);
267      return 0;
268    }
269
270    /*
271     * store the data provided by user
272     */
273    rtems_hdl_tbl[irq->name] = *irq;
274
275    if (is_main_irq(irq->name)) {
276      /*
277       * Enable (internal ) Main Interrupt Cause Low and High
278       */
279#ifdef DEBUG_IRQ
280      printk("main irq %d\n",irq->name);
281#endif
282      BSP_enable_main_irq(irq->name);
283    }
284   
285    if (is_gpp_irq(irq->name)) {
286      /*
287       * Enable (external) GPP[x] interrupt
288       */
289      BSP_enable_gpp_irq((int) irq->name);
290    }
291
292    if (is_processor_irq(irq->name)) {
293      /*
294       * Enable exception at processor level
295       */
296    }
297    /*
298     * Enable interrupt on device
299     
300    irq->on(irq);*/
301   
302    rtems_interrupt_enable(level);
303
304    return 1;
305}
306
307
308int BSP_get_current_rtems_irq_handler   (rtems_irq_connect_data* irq)
309{
310     if (!isValidInterrupt(irq->name)) {
311      return 0;
312     }
313     *irq = rtems_hdl_tbl[irq->name];
314     return 1;
315}
316
317int BSP_remove_rtems_irq_handler  (const rtems_irq_connect_data* irq)
318{
319    rtems_interrupt_level       level;
320 
321    if (!isValidInterrupt(irq->name)) {
322      return 0;
323    }
324    /*
325     * Check if default handler is actually connected. If not issue an error.
326     * You must first get the current handler via i386_get_current_idt_entry
327     * and then disconnect it using i386_delete_idt_entry.
328     * RATIONALE : to always have the same transition by forcing the user
329     * to get the previous handler before accepting to disconnect.
330     */
331    if (rtems_hdl_tbl[irq->name].hdl != irq->hdl) {
332      return 0;
333    }
334    rtems_interrupt_disable(level);
335
336    if (is_main_irq(irq->name)) {
337      /*
338       * disable CPU main interrupt
339       */
340      BSP_disable_main_irq(irq->name);
341    }
342    if (is_gpp_irq(irq->name)) {
343      /*
344       * disable external interrupt
345       */
346      BSP_disable_gpp_irq(irq->name);
347    }
348    if (is_processor_irq(irq->name)) {
349      /*
350       * disable exception at processor level
351       */
352    }   
353
354    /*
355     * Disable interrupt on device
356     */
357    irq->off(irq);
358
359    /*
360     * restore the default irq value
361     */
362    rtems_hdl_tbl[irq->name] = default_rtems_entry;
363
364    rtems_interrupt_enable(level);
365
366    return 1;
367}
368
369/*
370 * ------------------------ RTEMS Global Irq Handler Mngt Routines ----------------
371 */
372
373int BSP_rtems_irq_mngt_set(rtems_irq_global_settings* config)
374{
375    int                    i;
376    rtems_interrupt_level  level;
377
378    /*
379     * Store various code accelerators
380     */
381    internal_config             = config;
382    default_rtems_entry         = config->defaultEntry;
383    rtems_hdl_tbl               = config->irqHdlTbl;
384
385    rtems_interrupt_disable(level);
386    compute_GT64260int_masks_from_prio();
387
388    /*
389     * set up internal tables used by rtems interrupt prologue
390     */
391    /*
392     * start with MAIN CPU IRQ
393     */
394    for (i=BSP_MICL_IRQ_LOWEST_OFFSET; i < BSP_GPP_IRQ_LOWEST_OFFSET ; i++) {
395      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
396        BSP_enable_main_irq(i);
397        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
398      }
399      else {
400        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
401        BSP_disable_main_irq(i);
402      }
403    }
404    /*
405     * continue with  external IRQ
406     */
407    for (i=BSP_GPP_IRQ_LOWEST_OFFSET; i<BSP_PROCESSOR_IRQ_LOWEST_OFFSET; i++) {
408      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
409        BSP_enable_gpp_irq(i);
410        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
411      }
412      else {
413        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
414        BSP_disable_gpp_irq(i);
415      }
416    }
417
418    /*
419     * finish with Processor exceptions handled like IRQ
420     */
421    for (i=BSP_PROCESSOR_IRQ_LOWEST_OFFSET; i < BSP_PROCESSOR_IRQ_MAX_OFFSET+1; i++) {
422      if (rtems_hdl_tbl[i].hdl != default_rtems_entry.hdl) {
423        rtems_hdl_tbl[i].on(&rtems_hdl_tbl[i]);
424      }
425      else {
426        rtems_hdl_tbl[i].off(&rtems_hdl_tbl[i]);
427      }
428    }
429    rtems_interrupt_enable(level);
430    return 1;
431}
432
433int BSP_rtems_irq_mngt_get(rtems_irq_global_settings** config)
434{
435    *config = internal_config;
436    return 0;
437}   
438
439int _BSP_vme_bridge_irq = -1;
440
441/*
442 * High level IRQ handler called from shared_raw_irq_code_entry
443 */
444void C_dispatch_irq_handler (CPU_Interrupt_frame *frame, unsigned int excNum)
445{
446  register unsigned msr;
447  register unsigned new_msr;
448  unsigned mainCause[2];
449  register unsigned selectCause;
450  register unsigned oldMask[2]={0,0};
451  unsigned i, regNum, irq, bitNum, startIrqNum=0;
452
453  if (excNum == ASM_DEC_VECTOR) {
454    _CPU_MSR_GET(msr);
455    new_msr = msr | MSR_EE;
456    _CPU_MSR_SET(new_msr);
457   
458    rtems_hdl_tbl[BSP_DECREMENTER].hdl(rtems_hdl_tbl[BSP_DECREMENTER].handle);
459
460    _CPU_MSR_SET(msr);
461    return;
462   
463  }
464  selectCause = inl( GT_CPU_SEL_CAUSE);
465  if (selectCause & HI_INT_CAUSE ) {
466    mainCause[1]= selectCause & inl(GT_CPU_INT_MASK_HI);
467    startIrqNum=32;
468  }
469  else {
470    mainCause[0] =inl(GT_MAIN_INT_CAUSE_LO)&inl(GT_CPU_INT_MASK_LO);
471    mainCause[1] =inl(GT_MAIN_INT_CAUSE_HI)&inl(GT_CPU_INT_MASK_HI);
472  }
473
474#if 0
475  /* very bad practice to put printk here, use only if for debug */
476  printk("main 0 %x, main 1 %x \n", mainCause[0],mainCause[1]);
477#endif
478  oldMask[0]= GT_MAINirqLO_cache;
479  oldMask[1]= GT_MAINirqHI_cache;
480
481  for (i=0;mainIrqTbl[i]!=-1;i++) {
482    irq=mainIrqTbl[i];
483    if ( irq < startIrqNum ) continue;
484    regNum = irq/32;
485    bitNum = irq % 32;
486    if ( mainCause[regNum] & (1<<bitNum)) {
487      GT_MAINirqLO_cache=oldMask[0]&(~irq_prio_maskLO_tbl[irq]);
488      outl(GT_MAINirqLO_cache, GT_CPU_INT_MASK_LO);
489      __asm __volatile("sync");
490      GT_MAINirqHI_cache=oldMask[1]&(~irq_prio_maskHI_tbl[irq]);
491      outl(GT_MAINirqHI_cache, GT_CPU_INT_MASK_HI);
492      __asm __volatile("sync");
493
494      /* <skf> It seems that reading back is necessary to ensure the
495       * interrupt mask updated. Otherwise, spurious interrupt will
496       * happen.  However, I do not want to use "while loop" to risk
497       * the CPU stuck.  I wound rather keep track of the interrupt
498       * mask if not updated.
499       */
500      if (((irqLOW[irqIndex]= inl(GT_CPU_INT_MASK_LO))!=GT_MAINirqLO_cache)||
501          ((irqHIGH[irqIndex]= inl(GT_CPU_INT_MASK_HI))!=GT_MAINirqHI_cache)){
502         irqIndex++;
503         irqIndex %=20;
504         irqCAUSE[irqIndex] = irq;
505      }
506      _CPU_MSR_GET(msr);
507      new_msr = msr | MSR_EE;
508      _CPU_MSR_SET(new_msr);
509      rtems_hdl_tbl[irq].hdl(rtems_hdl_tbl[irq].handle);
510      _CPU_MSR_SET(msr);
511      break;
512    }
513  }
514  GT_MAINirqLO_cache=oldMask[0];
515  outl(GT_MAINirqLO_cache, GT_CPU_INT_MASK_LO);
516  GT_MAINirqHI_cache=oldMask[1];
517  outl(GT_MAINirqHI_cache, GT_CPU_INT_MASK_HI);
518}
519
520void _ThreadProcessSignalsFromIrq (BSP_Exception_frame* ctx)
521{
522  /*
523   * Process pending signals that have not already been
524   * processed by _Thread_Displatch. This happens quite
525   * unfrequently : the ISR must have posted an action
526   * to the current running thread.
527   */
528  if ( _Thread_Do_post_task_switch_extension ||
529       _Thread_Executing->do_post_task_switch_extension ) {
530    _Thread_Executing->do_post_task_switch_extension = FALSE;
531    _API_extensions_Run_postswitch();
532  }
533  /*
534   * I plan to process other thread related events here.
535   * This will include DEBUG session requested from keyboard...
536   */
537}
538
539void BSP_printIRQMask()
540{
541  int i;
542
543  for (i=0; i< 20; i++)
544    printk("IRQ%d : 0x%x %x \n", irqCAUSE[i], irqHIGH[i],irqLOW[i]);
545}
Note: See TracBrowser for help on using the repository browser.