Changeset e6dec71c in rtems


Ignore:
Timestamp:
02/01/02 15:00:30 (22 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
2835b3a5
Parents:
6d41a874
Message:

2001-02-01 Greg Menke <gregory.menke@…>

  • cpu.c: Enhancements and fixes for modifying the SR when changing the interrupt level.
  • cpu_asm.S: Fixed handling of FP enable bit so it is properly managed on a per-task basis, improved handling of interrupt levels, and made deferred FP contexts work on the MIPS.
  • rtems/score/cpu.h: Modified to support above changes.
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/cpu/mips/ChangeLog

    r6d41a874 re6dec71c  
     12001-02-01      Greg Menke <gregory.menke@gsfc.nasa.gov>
     2
     3        * cpu.c: Enhancements and fixes for modifying the SR when changing
     4        the interrupt level.
     5        * cpu_asm.S: Fixed handling of FP enable bit so it is properly
     6        managed on a per-task basis, improved handling of interrupt levels,
     7        and made deferred FP contexts work on the MIPS.
     8        * rtems/score/cpu.h: Modified to support above changes.
     9
    1102002-01-28      Ralf Corsepius <corsepiu@faw.uni-ulm.de>
    211
  • c/src/exec/score/cpu/mips/cpu.c

    r6d41a874 re6dec71c  
    8383
    8484#if __mips == 3
    85   return ((sr & SR_EXL) >> 1);
     85/* EXL bit and shift down hardware ints into bits 1 thru 6 */
     86  return ((sr & SR_EXL) >> 1) + ((sr & 0xfc00) >> 9);
    8687
    8788#elif __mips == 1
    88   return ((sr & SR_IEC) ? 0 : 1);
     89/* IEC bit and shift down hardware ints into bits 1 thru 6 */
     90  return (sr & SR_IEC) + ((sr & 0xfc00) >> 9);
    8991
    9092#else
     
    9395}
    9496
     97
    9598void _CPU_ISR_Set_level( unsigned32 new_level )
    9699{
    97   unsigned int sr;
     100  unsigned int sr, srbits;
     101
     102  /*
     103  ** mask off the int level bits only so we can
     104  ** preserve software int settings and FP enable
     105  ** for this thread.  Note we don't force software ints
     106  ** enabled when changing level, they were turned on
     107  ** when this task was created, but may have been turned
     108  ** off since, so we'll just leave them alone.
     109  */
     110
    98111
    99112  mips_get_sr(sr);
    100113
    101114#if __mips == 3
     115  mips_set_sr(sr & ~SR_IE);                 /* first disable ie bit (recommended) */
     116
     117  srbits = sr & ~(0xfc00 | SR_EXL | SR_IE);
     118
     119  sr = srbits | ((new_level==0)? (0xfc00 | SR_EXL | SR_IE): \
     120                 (((new_level<<9) & 0xfc000) | \
     121                  (new_level & 1)?(SR_EXL | SR_IE):0));
     122/*
    102123  if ( (new_level & SR_EXL) == (sr & SR_EXL) )
    103124    return;
    104125
    105126  if ( (new_level & SR_EXL) == 0 ) {
    106     sr &= ~SR_EXL;                    /* clear the EXL bit */
     127    sr &= ~SR_EXL;                    * clear the EXL bit *
    107128    mips_set_sr(sr);
    108129  } else {
    109     sr &= ~SR_IE;
    110     mips_set_sr(sr);                 /* first disable ie bit (recommended) */
    111 
    112     sr |= SR_EXL|SR_IE;              /* enable exception level */
    113     mips_set_sr(sr);                 /* first disable ie bit (recommended) */
     130
     131    sr |= SR_EXL|SR_IE;              * enable exception level *
     132    mips_set_sr(sr);                 * first disable ie bit (recommended) *
    114133  }
     134*/
    115135 
    116136#elif __mips == 1
    117   if ( (new_level & SR_IEC) == (sr & SR_IEC) )
    118     return;
    119 
    120   sr &= ~SR_IEC;                    /* clear the IEC bit */
    121   if ( !new_level )
    122     sr |= SR_IEC;                   /* enable interrupts */
    123 
    124   mips_set_sr(sr);
    125 
     137  mips_set_sr( (sr & ~SR_IEC) );       
     138
     139  srbits = sr & ~(0xfc00 | SR_IEC);
     140  sr = srbits | ((new_level==0)?0xfc01:( ((new_level<<9) & 0xfc000) | (new_level & 1)));
    126141#else
    127142#error "CPU ISR level: unknown MIPS level for SR handling"
    128143#endif
    129 
     144  mips_set_sr( sr );
    130145}
    131146
     
    154169   *
    155170   *  Because all interrupts are vectored through the same exception handler
    156    *  this is not necessary on this port.
     171   *  this is not necessary on thi sport.
    157172   */
    158173}
  • c/src/exec/score/cpu/mips/cpu_asm.S

    r6d41a874 re6dec71c  
    2424 *          rewriting as much as possible in C and added the JMR3904 BSP
    2525 *          so testing could be performed on a simulator.
     26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
     27 *          performance, tweaking this code and the isr vectoring routines
     28 *          to reduce overhead & latencies.  Added optional
     29 *          instrumentation as well.
     30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
     31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
     32 *          and deferred FP contexts.
    2633 * 
    2734 *  COPYRIGHT (c) 1989-2000.
     
    4350* left it in...
    4451*/
    45 /* #define INSTRUMENT */
    46        
     52
     53#define INSTRUMENT_ISR_VECTORING
     54//#define INSTRUMENT_EXECUTING_THREAD
    4755
    4856
     
    166174 */
    167175
    168 #if ( CPU_HARDWARE_FP == TRUE )
     176#if ( CPU_HARDWARE_FP == FALSE )
    169177FRAME(_CPU_Context_save_fp,sp,0,ra)
    170178        .set noat
    171         ld   a1,(a0)
     179
     180#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
     181        /*
     182        ** Make sure the FPU is on before we save state.  This code is here
     183        ** because the FPU context switch might occur when an integer
     184        ** task is switching out w/ an FP task switching in, but the current
     185        ** FPU state was left by a sometime previously scheduled FP task.
     186        **
     187        ** In non-deferred FP context switch, if the exiting task is FP, then
     188        ** the FPU is already on so we don't need to do this.
     189        */
     190       
     191        MFC0    t0,C0_SR       
     192        li      k0,SR_CU1       
     193        or      t0,k0
     194        MTC0    t0,C0_SR       
     195#endif
     196               
     197        ld   a1,(a0)
    172198        NOP
    173199        swc1 $f0,FP0_OFFSET*F_SZ(a1)
     
    227253 */
    228254
    229 #if ( CPU_HARDWARE_FP == TRUE )
     255#if ( CPU_HARDWARE_FP == FALSE )
    230256FRAME(_CPU_Context_restore_fp,sp,0,ra)
    231257        .set noat
    232         ld a1,(a0)
     258       
     259        /*
     260        ** Make sure the FPU is on before we retrieve state.  This code
     261        ** is here because the FPU context switch might occur when an
     262        ** integer task is switching out with a FP task switching in.
     263        */
     264       
     265        MFC0    t0,C0_SR       
     266        li      k0,SR_CU1       
     267        or      t0,k0
     268        MTC0    t0,C0_SR       
     269
     270        ld a1,(a0)
    233271        NOP
    234272        lwc1 $f0,FP0_OFFSET*4(a1)
     
    285323        MFC0  t0,C0_SR
    286324        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
    287         STREG t0,C0_SR_OFFSET*4(a0)     /* save status register */
     325        STREG t0,C0_SR_OFFSET*R_SZ(a0)
    288326        and   t0,t1
    289         MTC0  t0,C0_SR                  /* first disable ie bit (recommended) */
    290327#if __mips == 3
    291         ori   t0,SR_EXL|SR_IE           /* enable exception level to disable interrupts */
     328        ori   t0,(SR_EXL|SR_IE)         /* enable exception level to disable interrupts */
     329#endif
    292330        MTC0  t0,C0_SR
    293 #endif
    294331
    295332        STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
     
    305342        STREG s7,S7_OFFSET*R_SZ(a0)
    306343
    307         /*
     344        /*  EPC is readonly...
    308345        MFC0  t0,C0_EPC
    309346        NOP
     
    324361        LDREG s7,S7_OFFSET*R_SZ(a1)
    325362
    326         /*
     363        /*  EPC is readonly...
    327364        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
    328365        NOP
    329366        MTC0  t0,C0_EPC
    330367        */
    331        
     368
    332369        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
    333         NOP
    334        
     370       
     371//      NOP
     372//#if __mips == 3
     373//        andi  t0,SR_EXL
     374//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
     375//        li    t0,~SR_EXL
     376//        MFC0  t1,C0_SR
     377//        NOP
     378//        and   t1,t0
     379//        MTC0  t1,C0_SR
     380//
     381//#elif __mips == 1
     382//
     383//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
     384//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
     385//        MFC0  t0,C0_SR
     386//        NOP
     387//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
     388//        MTC0  t0,C0_SR                      /* set with enabled */
     389//        NOP
     390
     391       
     392/*
     393** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
     394** into the status register.  We jump thru the requisite hoops to ensure we
     395** maintain all other SR bits as global values.
     396**
     397** Get the thread's FPU enable, int mask & int enable bits.  Although we keep the
     398** software int enables on a per-task basis, the rtems_task_create
     399** Interrupt Level & int level manipulation functions cannot enable/disable them,
     400** so they are automatically enabled for all tasks.  To turn them off, a thread 
     401** must itself manipulate the SR register.
     402*/
     403
    335404#if __mips == 3
    336         andi  t0,SR_EXL
    337         bnez  t0,_CPU_Context_1   /* set exception level from restore context */
    338         li    t0,~SR_EXL
    339         MFC0  t1,C0_SR
    340         NOP
    341         and   t1,t0
    342         MTC0  t1,C0_SR
    343 
     405        li      k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
    344406#elif __mips == 1
    345         andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
    346         beq   t0,$0,_CPU_Context_1          /* set level from restore context */
    347         MFC0  t0,C0_SR
    348         NOP
    349         or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
    350         MTC0  t0,C0_SR                      /* set with enabled */
    351 #endif
    352 
    353        
    354 _CPU_Context_1:
     407        li      k0,(SR_CU1 | SR_IMASK | SR_IEC)
     408#endif
     409        and     t0,k0           
     410               
     411        MFC0    t1,C0_SR        /* grab the current SR */
     412        not     k0              /* invert k0 so we can clear out the SR bits */
     413        and     t1,k0
     414
     415        or      t0,t1           /* setup the new task's SR value */
     416
     417        MTC0    t0,C0_SR        /* and load the new SR */
     418        NOP
     419       
     420/* _CPU_Context_1: */
    355421        j ra
    356422        NOP
     
    451517        STREG    t0,R_SR*R_SZ(sp)
    452518        STREG    t1,R_EPC*R_SZ(sp)
    453 
    454 
    455 #ifdef INSTRUMENT
     519       
     520
     521#ifdef INSTRUMENT_EXECUTING_THREAD
    456522        lw t2, _Thread_Executing
    457523        nop
    458524        sw t2, 0x8001FFF0
    459 
    460         sw t0, 0x8001F050
    461         sw t1, 0x8001F054
    462 
    463         li t0, 0xdeadbeef
    464         li t1, 0xdeadbeef
    465         li t2, 0xdeadbeef
    466                        
    467         sw ra, 0x8001F000
    468         sw v0, 0x8001F004
    469         sw v1, 0x8001F008
    470         sw a0, 0x8001F00c
    471         sw a1, 0x8001F010
    472         sw a2, 0x8001F014
    473         sw a3, 0x8001F018
    474         sw t0, 0x8001F01c
    475         sw t1, 0x8001F020
    476         sw t2, 0x8001F024
    477         sw t3, 0x8001F028
    478         sw t4, 0x8001F02c
    479         sw t5, 0x8001F030
    480         sw t6, 0x8001F034
    481         sw t7, 0x8001F038
    482         sw t8, 0x8001F03c
    483         sw t9, 0x8001F040
    484         sw gp, 0x8001F044
    485         sw fp, 0x8001F048
    486 #endif
    487        
    488 /* determine if an interrupt generated this exception */
     525#endif
     526       
     527        /* determine if an interrupt generated this exception */
    489528
    490529        MFC0     k0,C0_CAUSE
     
    516555       
    517556        beq      k0,zero,_ISR_Handler_exit
    518        
    519         li       t2,1           /* set a flag so we process interrupts */
    520        
     557
     558       
     559       
     560       
     561               
    521562  /*
    522563   *  save some or all context on stack
     
    548589   *  interrupt source and actually vector to device ISR handlers.
    549590   */
     591       
     592#ifdef INSTRUMENT_ISR_VECTORING
     593        nop
     594        li      t1, 1
     595        sw      t1, 0x8001e000
     596#endif
     597
    550598        move     a0,sp
    551599        jal      mips_vector_isr_handlers
    552600        nop
    553 
     601       
     602#ifdef INSTRUMENT_ISR_VECTORING
     603        li      t1, 0
     604        sw      t1, 0x8001e000
     605        nop
     606#endif
     607               
    554608  /*
    555609   *  --_ISR_Nest_level;
     
    573627        bne t0,zero,_ISR_Handler_exit
    574628        nop
     629
     630
     631
     632
     633
     634       
     635
     636       
    575637  /*
    576638   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
     
    590652       
    591653       
    592 #ifdef INSTRUMENT
    593         li      t0,0x11111111
    594         sw      t0,0x8001F104
    595 #endif
    596        
    597   /* restore interrupt state from the saved status register,
    598    * if the isr vectoring didn't so we allow nested interrupts to
    599    * occur */
     654#ifdef INSTRUMENT_EXECUTING_THREAD
     655        lw      t0,_Thread_Executing
     656        nop
     657        sw      t0,0x8001FFF4
     658#endif
     659
     660/*
     661** Turn on interrupts before entering Thread_Dispatch which
     662** will run for a while, thus allowing new interrupts to
     663** be serviced.  Observe the Thread_Dispatch_disable_level interlock
     664** that prevents recursive entry into Thread_Dispatch.
     665*/
     666
     667        MFC0    t0, C0_SR
     668        NOP
     669        or      t0, SR_INTERRUPT_ENABLE_BITS
     670        MTC0    t0, C0_SR
     671        NOP
    600672               
    601         LDREG    t0,R_SR*R_SZ(sp)
    602         NOP
    603         MTC0     t0,C0_SR
    604         rfe
    605        
    606 
    607         jal _Thread_Dispatch
    608         nop
    609 
    610 #ifdef INSTRUMENT
    611         li      t0,0x22222222
    612         sw      t0,0x8001F100
    613 #endif
    614 
    615        
    616                        
    617 
     673        jal     _Thread_Dispatch
     674        NOP
     675
     676#ifdef INSTRUMENT_EXECUTING_THREAD
     677        lw      t0,_Thread_Executing
     678        nop
     679        sw      t0,0x8001FFF8
     680#endif
     681
     682       
    618683  /*
    619684   *  prepare to get out of interrupt
     
    626691
    627692_ISR_Handler_exit:
    628         LDREG    t0, R_SR*R_SZ(sp)
    629         NOP
    630         MTC0     t0, C0_SR
    631 
    632 /* restore context from stack */
    633 
    634 #ifdef INSTRUMENT
     693/*
     694** Skip the SR restore because its a global register. _CPU_Context_switch_restore
     695** adjusts it according to each task's configuration.  If we didn't dispatch, the
     696** SR value isn't changing, so all we need to do is return.
     697**
     698*/
     699
     700        /* restore context from stack */
     701       
     702#ifdef INSTRUMENT_EXECUTING_THREAD
    635703        lw      t0,_Thread_Executing
    636704        nop
    637         sw      t0, 0x8001FFF4
     705        sw      t0, 0x8001FFFC
    638706#endif
    639707
     
    661729        LDREG v1, R_V1*R_SZ(sp)
    662730        LDREG v0, R_V0*R_SZ(sp)
    663 
    664 #ifdef INSTRUMENT
    665         sw ra, 0x8001F000
    666         sw v0, 0x8001F004
    667         sw v1, 0x8001F008
    668         sw a0, 0x8001F00c
    669         sw a1, 0x8001F010
    670         sw a2, 0x8001F014
    671         sw a3, 0x8001F018
    672         sw t0, 0x8001F01c
    673         sw t1, 0x8001F020
    674         sw t2, 0x8001F024
    675         sw t3, 0x8001F028
    676         sw t4, 0x8001F02c
    677         sw t5, 0x8001F030
    678         sw t6, 0x8001F034
    679         sw t7, 0x8001F038
    680         sw t8, 0x8001F03c
    681         sw t9, 0x8001F040
    682         sw gp, 0x8001F044
    683         sw fp, 0x8001F048
    684 #endif
    685731       
    686732        LDREG     k0, R_EPC*R_SZ(sp)
    687733       
    688734        .set noat
    689         LDREG AT, R_AT*R_SZ(sp)
     735        LDREG     AT, R_AT*R_SZ(sp)
    690736        .set at
    691737
     
    698744ENDFRAME(_ISR_Handler)
    699745
     746       
    700747FRAME(mips_break,sp,0,ra)
    701748#if 1
  • c/src/exec/score/cpu/mips/rtems/score/cpu.h

    r6d41a874 re6dec71c  
    6868 */
    6969
    70 #define CPU_INLINE_ENABLE_DISPATCH       TRUE
     70#define CPU_INLINE_ENABLE_DISPATCH       FALSE
    7171
    7272/*
     
    208208 */
    209209
    210 #define CPU_IDLE_TASK_IS_FP      FALSE
     210#define CPU_IDLE_TASK_IS_FP      TRUE
    211211
    212212/*
     
    556556
    557557/*
    558  *  This is defined if the port has a special way to report the ISR nesting
    559  *  level.  Most ports maintain the variable _ISR_Nest_level.
    560  */
    561 
    562 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
    563 
    564 /*
    565558 *  Should be large enough to run all RTEMS tests.  This insures
    566559 *  that a "reasonable" small application should not have any problems.
     
    633626  do { \
    634627    mips_get_sr( _level ); \
    635     mips_set_sr( (_level) & ~SR_INTERRUPT_ENABLE_BITS ); \
     628    mips_set_sr( _level & ~SR_INTERRUPT_ENABLE_BITS ); \
     629    _level &= SR_INTERRUPT_ENABLE_BITS; \
    636630  } while(0)
    637631
     
    644638#define _CPU_ISR_Enable( _level )  \
    645639  do { \
    646     mips_set_sr(_level); \
     640    unsigned int _scratch; \
     641    mips_get_sr( _scratch ); \
     642    mips_set_sr( (_scratch & ~SR_INTERRUPT_ENABLE_BITS) | (_level & SR_INTERRUPT_ENABLE_BITS) ); \
    647643  } while(0)
    648644
     
    651647 *  disabling them again.  This is used to divide long RTEMS critical
    652648 *  sections into two or more parts.  The parameter _level is not
    653  * modified.
     649 *  modified.
    654650 */
    655651
    656652#define _CPU_ISR_Flash( _xlevel ) \
    657653  do { \
    658     unsigned int _scratch; \
    659654    _CPU_ISR_Enable( _xlevel ); \
    660     _CPU_ISR_Disable( _scratch ); \
     655    _CPU_ISR_Disable( _xlevel ); \
    661656  } while(0)
    662657
     
    702697 *        FPU may be easily disabled by software such as on the SPARC
    703698 *        where the PSR contains an enable FPU bit.
    704  */
    705 
    706 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
    707                                  _isr, _entry_point, _is_fp ) \
     699 *
     700 *  The per-thread status register holds the interrupt enable, FP enable
     701 *  and global interrupt enable for that thread.  It means each thread can
     702 *  enable its own set of interrupts.  If interrupts are disabled, RTEMS
     703 *  can still dispatch via blocking calls.  This is the function of the
     704 *  "Interrupt Level", and on the MIPS, it controls the IEC bit and all
     705 *  the hardware interrupts as defined in the SR.  Software ints
     706 *  are automatically enabled for all threads, as they will only occur under
     707 *  program control anyhow.  Besides, the interrupt level parm is only 8 bits,
     708 *  and controlling the software ints plus the others would require 9.
     709 *
     710 *  If the Interrupt Level is 0, all ints are on.  Otherwise, the
     711 *  Interrupt Level should supply a bit pattern to impose on the SR
     712 *  interrupt bits; bit 0 applies to the mips1 IEC bit/mips3 EXL&IE, bits 1 thru 6
     713 *  apply to the SR register Intr bits from bit 10 thru bit 15.  Bit 7 of
     714 *  the Interrupt Level parameter is unused at this time.
     715 *
     716 *  These are the only per-thread SR bits, the others are maintained
     717 *  globally & explicitly preserved by the Context Switch code in cpu_asm.s
     718 */
     719
     720
     721#if __mips == 3
     722#define _INTON  (SR_EXL | SR_IE)
     723#endif
     724#if __mips == 1
     725#define _INTON  SR_IEC
     726#endif
     727
     728#define _CPU_Context_Initialize( _the_context, _stack_base, _size, _isr, _entry_point, _is_fp ) \
    708729  { \
    709730        unsigned32 _stack_tmp = \
    710731           (unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
     732        unsigned32 _intlvl = _isr & 0xff; \
    711733        _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
    712734        (_the_context)->sp = _stack_tmp; \
    713735        (_the_context)->fp = _stack_tmp; \
    714736        (_the_context)->ra = (unsigned64)_entry_point; \
    715         (_the_context)->c0_sr = ((_the_context)->c0_sr & 0x0fff0000) | \
    716                                 ((_isr)?0xff00:0xff01) | \
    717                                 ((_is_fp)?0x20000000:0x10000000); \
     737        (_the_context)->c0_sr = ((_intlvl==0)?(0xFF00 | _INTON):( ((_intlvl<<9) & 0xfc00) | \
     738                                                       0x300 | \
     739                                                       ((_intlvl & 1)?_INTON:0)) ) | \
     740                                SR_CU0 | ((_is_fp)?SR_CU1:0); \
    718741  }
     742
     743
    719744
    720745/*
  • cpukit/score/cpu/mips/ChangeLog

    r6d41a874 re6dec71c  
     12001-02-01      Greg Menke <gregory.menke@gsfc.nasa.gov>
     2
     3        * cpu.c: Enhancements and fixes for modifying the SR when changing
     4        the interrupt level.
     5        * cpu_asm.S: Fixed handling of FP enable bit so it is properly
     6        managed on a per-task basis, improved handling of interrupt levels,
     7        and made deferred FP contexts work on the MIPS.
     8        * rtems/score/cpu.h: Modified to support above changes.
     9
    1102002-01-28      Ralf Corsepius <corsepiu@faw.uni-ulm.de>
    211
  • cpukit/score/cpu/mips/cpu.c

    r6d41a874 re6dec71c  
    8383
    8484#if __mips == 3
    85   return ((sr & SR_EXL) >> 1);
     85/* EXL bit and shift down hardware ints into bits 1 thru 6 */
     86  return ((sr & SR_EXL) >> 1) + ((sr & 0xfc00) >> 9);
    8687
    8788#elif __mips == 1
    88   return ((sr & SR_IEC) ? 0 : 1);
     89/* IEC bit and shift down hardware ints into bits 1 thru 6 */
     90  return (sr & SR_IEC) + ((sr & 0xfc00) >> 9);
    8991
    9092#else
     
    9395}
    9496
     97
    9598void _CPU_ISR_Set_level( unsigned32 new_level )
    9699{
    97   unsigned int sr;
     100  unsigned int sr, srbits;
     101
     102  /*
     103  ** mask off the int level bits only so we can
     104  ** preserve software int settings and FP enable
     105  ** for this thread.  Note we don't force software ints
     106  ** enabled when changing level, they were turned on
     107  ** when this task was created, but may have been turned
     108  ** off since, so we'll just leave them alone.
     109  */
     110
    98111
    99112  mips_get_sr(sr);
    100113
    101114#if __mips == 3
     115  mips_set_sr(sr & ~SR_IE);                 /* first disable ie bit (recommended) */
     116
     117  srbits = sr & ~(0xfc00 | SR_EXL | SR_IE);
     118
     119  sr = srbits | ((new_level==0)? (0xfc00 | SR_EXL | SR_IE): \
     120                 (((new_level<<9) & 0xfc000) | \
     121                  (new_level & 1)?(SR_EXL | SR_IE):0));
     122/*
    102123  if ( (new_level & SR_EXL) == (sr & SR_EXL) )
    103124    return;
    104125
    105126  if ( (new_level & SR_EXL) == 0 ) {
    106     sr &= ~SR_EXL;                    /* clear the EXL bit */
     127    sr &= ~SR_EXL;                    * clear the EXL bit *
    107128    mips_set_sr(sr);
    108129  } else {
    109     sr &= ~SR_IE;
    110     mips_set_sr(sr);                 /* first disable ie bit (recommended) */
    111 
    112     sr |= SR_EXL|SR_IE;              /* enable exception level */
    113     mips_set_sr(sr);                 /* first disable ie bit (recommended) */
     130
     131    sr |= SR_EXL|SR_IE;              * enable exception level *
     132    mips_set_sr(sr);                 * first disable ie bit (recommended) *
    114133  }
     134*/
    115135 
    116136#elif __mips == 1
    117   if ( (new_level & SR_IEC) == (sr & SR_IEC) )
    118     return;
    119 
    120   sr &= ~SR_IEC;                    /* clear the IEC bit */
    121   if ( !new_level )
    122     sr |= SR_IEC;                   /* enable interrupts */
    123 
    124   mips_set_sr(sr);
    125 
     137  mips_set_sr( (sr & ~SR_IEC) );       
     138
     139  srbits = sr & ~(0xfc00 | SR_IEC);
     140  sr = srbits | ((new_level==0)?0xfc01:( ((new_level<<9) & 0xfc000) | (new_level & 1)));
    126141#else
    127142#error "CPU ISR level: unknown MIPS level for SR handling"
    128143#endif
    129 
     144  mips_set_sr( sr );
    130145}
    131146
     
    154169   *
    155170   *  Because all interrupts are vectored through the same exception handler
    156    *  this is not necessary on this port.
     171   *  this is not necessary on thi sport.
    157172   */
    158173}
  • cpukit/score/cpu/mips/cpu_asm.S

    r6d41a874 re6dec71c  
    2424 *          rewriting as much as possible in C and added the JMR3904 BSP
    2525 *          so testing could be performed on a simulator.
     26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
     27 *          performance, tweaking this code and the isr vectoring routines
     28 *          to reduce overhead & latencies.  Added optional
     29 *          instrumentation as well.
     30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
     31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
     32 *          and deferred FP contexts.
    2633 * 
    2734 *  COPYRIGHT (c) 1989-2000.
     
    4350* left it in...
    4451*/
    45 /* #define INSTRUMENT */
    46        
     52
     53#define INSTRUMENT_ISR_VECTORING
     54//#define INSTRUMENT_EXECUTING_THREAD
    4755
    4856
     
    166174 */
    167175
    168 #if ( CPU_HARDWARE_FP == TRUE )
     176#if ( CPU_HARDWARE_FP == FALSE )
    169177FRAME(_CPU_Context_save_fp,sp,0,ra)
    170178        .set noat
    171         ld   a1,(a0)
     179
     180#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
     181        /*
     182        ** Make sure the FPU is on before we save state.  This code is here
     183        ** because the FPU context switch might occur when an integer
     184        ** task is switching out w/ an FP task switching in, but the current
     185        ** FPU state was left by a sometime previously scheduled FP task.
     186        **
     187        ** In non-deferred FP context switch, if the exiting task is FP, then
     188        ** the FPU is already on so we don't need to do this.
     189        */
     190       
     191        MFC0    t0,C0_SR       
     192        li      k0,SR_CU1       
     193        or      t0,k0
     194        MTC0    t0,C0_SR       
     195#endif
     196               
     197        ld   a1,(a0)
    172198        NOP
    173199        swc1 $f0,FP0_OFFSET*F_SZ(a1)
     
    227253 */
    228254
    229 #if ( CPU_HARDWARE_FP == TRUE )
     255#if ( CPU_HARDWARE_FP == FALSE )
    230256FRAME(_CPU_Context_restore_fp,sp,0,ra)
    231257        .set noat
    232         ld a1,(a0)
     258       
     259        /*
     260        ** Make sure the FPU is on before we retrieve state.  This code
     261        ** is here because the FPU context switch might occur when an
     262        ** integer task is switching out with a FP task switching in.
     263        */
     264       
     265        MFC0    t0,C0_SR       
     266        li      k0,SR_CU1       
     267        or      t0,k0
     268        MTC0    t0,C0_SR       
     269
     270        ld a1,(a0)
    233271        NOP
    234272        lwc1 $f0,FP0_OFFSET*4(a1)
     
    285323        MFC0  t0,C0_SR
    286324        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
    287         STREG t0,C0_SR_OFFSET*4(a0)     /* save status register */
     325        STREG t0,C0_SR_OFFSET*R_SZ(a0)
    288326        and   t0,t1
    289         MTC0  t0,C0_SR                  /* first disable ie bit (recommended) */
    290327#if __mips == 3
    291         ori   t0,SR_EXL|SR_IE           /* enable exception level to disable interrupts */
     328        ori   t0,(SR_EXL|SR_IE)         /* enable exception level to disable interrupts */
     329#endif
    292330        MTC0  t0,C0_SR
    293 #endif
    294331
    295332        STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
     
    305342        STREG s7,S7_OFFSET*R_SZ(a0)
    306343
    307         /*
     344        /*  EPC is readonly...
    308345        MFC0  t0,C0_EPC
    309346        NOP
     
    324361        LDREG s7,S7_OFFSET*R_SZ(a1)
    325362
    326         /*
     363        /*  EPC is readonly...
    327364        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
    328365        NOP
    329366        MTC0  t0,C0_EPC
    330367        */
    331        
     368
    332369        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
    333         NOP
    334        
     370       
     371//      NOP
     372//#if __mips == 3
     373//        andi  t0,SR_EXL
     374//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
     375//        li    t0,~SR_EXL
     376//        MFC0  t1,C0_SR
     377//        NOP
     378//        and   t1,t0
     379//        MTC0  t1,C0_SR
     380//
     381//#elif __mips == 1
     382//
     383//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
     384//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
     385//        MFC0  t0,C0_SR
     386//        NOP
     387//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
     388//        MTC0  t0,C0_SR                      /* set with enabled */
     389//        NOP
     390
     391       
     392/*
     393** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
     394** into the status register.  We jump thru the requisite hoops to ensure we
     395** maintain all other SR bits as global values.
     396**
     397** Get the thread's FPU enable, int mask & int enable bits.  Although we keep the
     398** software int enables on a per-task basis, the rtems_task_create
     399** Interrupt Level & int level manipulation functions cannot enable/disable them,
     400** so they are automatically enabled for all tasks.  To turn them off, a thread 
     401** must itself manipulate the SR register.
     402*/
     403
    335404#if __mips == 3
    336         andi  t0,SR_EXL
    337         bnez  t0,_CPU_Context_1   /* set exception level from restore context */
    338         li    t0,~SR_EXL
    339         MFC0  t1,C0_SR
    340         NOP
    341         and   t1,t0
    342         MTC0  t1,C0_SR
    343 
     405        li      k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
    344406#elif __mips == 1
    345         andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
    346         beq   t0,$0,_CPU_Context_1          /* set level from restore context */
    347         MFC0  t0,C0_SR
    348         NOP
    349         or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
    350         MTC0  t0,C0_SR                      /* set with enabled */
    351 #endif
    352 
    353        
    354 _CPU_Context_1:
     407        li      k0,(SR_CU1 | SR_IMASK | SR_IEC)
     408#endif
     409        and     t0,k0           
     410               
     411        MFC0    t1,C0_SR        /* grab the current SR */
     412        not     k0              /* invert k0 so we can clear out the SR bits */
     413        and     t1,k0
     414
     415        or      t0,t1           /* setup the new task's SR value */
     416
     417        MTC0    t0,C0_SR        /* and load the new SR */
     418        NOP
     419       
     420/* _CPU_Context_1: */
    355421        j ra
    356422        NOP
     
    451517        STREG    t0,R_SR*R_SZ(sp)
    452518        STREG    t1,R_EPC*R_SZ(sp)
    453 
    454 
    455 #ifdef INSTRUMENT
     519       
     520
     521#ifdef INSTRUMENT_EXECUTING_THREAD
    456522        lw t2, _Thread_Executing
    457523        nop
    458524        sw t2, 0x8001FFF0
    459 
    460         sw t0, 0x8001F050
    461         sw t1, 0x8001F054
    462 
    463         li t0, 0xdeadbeef
    464         li t1, 0xdeadbeef
    465         li t2, 0xdeadbeef
    466                        
    467         sw ra, 0x8001F000
    468         sw v0, 0x8001F004
    469         sw v1, 0x8001F008
    470         sw a0, 0x8001F00c
    471         sw a1, 0x8001F010
    472         sw a2, 0x8001F014
    473         sw a3, 0x8001F018
    474         sw t0, 0x8001F01c
    475         sw t1, 0x8001F020
    476         sw t2, 0x8001F024
    477         sw t3, 0x8001F028
    478         sw t4, 0x8001F02c
    479         sw t5, 0x8001F030
    480         sw t6, 0x8001F034
    481         sw t7, 0x8001F038
    482         sw t8, 0x8001F03c
    483         sw t9, 0x8001F040
    484         sw gp, 0x8001F044
    485         sw fp, 0x8001F048
    486 #endif
    487        
    488 /* determine if an interrupt generated this exception */
     525#endif
     526       
     527        /* determine if an interrupt generated this exception */
    489528
    490529        MFC0     k0,C0_CAUSE
     
    516555       
    517556        beq      k0,zero,_ISR_Handler_exit
    518        
    519         li       t2,1           /* set a flag so we process interrupts */
    520        
     557
     558       
     559       
     560       
     561               
    521562  /*
    522563   *  save some or all context on stack
     
    548589   *  interrupt source and actually vector to device ISR handlers.
    549590   */
     591       
     592#ifdef INSTRUMENT_ISR_VECTORING
     593        nop
     594        li      t1, 1
     595        sw      t1, 0x8001e000
     596#endif
     597
    550598        move     a0,sp
    551599        jal      mips_vector_isr_handlers
    552600        nop
    553 
     601       
     602#ifdef INSTRUMENT_ISR_VECTORING
     603        li      t1, 0
     604        sw      t1, 0x8001e000
     605        nop
     606#endif
     607               
    554608  /*
    555609   *  --_ISR_Nest_level;
     
    573627        bne t0,zero,_ISR_Handler_exit
    574628        nop
     629
     630
     631
     632
     633
     634       
     635
     636       
    575637  /*
    576638   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
     
    590652       
    591653       
    592 #ifdef INSTRUMENT
    593         li      t0,0x11111111
    594         sw      t0,0x8001F104
    595 #endif
    596        
    597   /* restore interrupt state from the saved status register,
    598    * if the isr vectoring didn't so we allow nested interrupts to
    599    * occur */
     654#ifdef INSTRUMENT_EXECUTING_THREAD
     655        lw      t0,_Thread_Executing
     656        nop
     657        sw      t0,0x8001FFF4
     658#endif
     659
     660/*
     661** Turn on interrupts before entering Thread_Dispatch which
     662** will run for a while, thus allowing new interrupts to
     663** be serviced.  Observe the Thread_Dispatch_disable_level interlock
     664** that prevents recursive entry into Thread_Dispatch.
     665*/
     666
     667        MFC0    t0, C0_SR
     668        NOP
     669        or      t0, SR_INTERRUPT_ENABLE_BITS
     670        MTC0    t0, C0_SR
     671        NOP
    600672               
    601         LDREG    t0,R_SR*R_SZ(sp)
    602         NOP
    603         MTC0     t0,C0_SR
    604         rfe
    605        
    606 
    607         jal _Thread_Dispatch
    608         nop
    609 
    610 #ifdef INSTRUMENT
    611         li      t0,0x22222222
    612         sw      t0,0x8001F100
    613 #endif
    614 
    615        
    616                        
    617 
     673        jal     _Thread_Dispatch
     674        NOP
     675
     676#ifdef INSTRUMENT_EXECUTING_THREAD
     677        lw      t0,_Thread_Executing
     678        nop
     679        sw      t0,0x8001FFF8
     680#endif
     681
     682       
    618683  /*
    619684   *  prepare to get out of interrupt
     
    626691
    627692_ISR_Handler_exit:
    628         LDREG    t0, R_SR*R_SZ(sp)
    629         NOP
    630         MTC0     t0, C0_SR
    631 
    632 /* restore context from stack */
    633 
    634 #ifdef INSTRUMENT
     693/*
     694** Skip the SR restore because its a global register. _CPU_Context_switch_restore
     695** adjusts it according to each task's configuration.  If we didn't dispatch, the
     696** SR value isn't changing, so all we need to do is return.
     697**
     698*/
     699
     700        /* restore context from stack */
     701       
     702#ifdef INSTRUMENT_EXECUTING_THREAD
    635703        lw      t0,_Thread_Executing
    636704        nop
    637         sw      t0, 0x8001FFF4
     705        sw      t0, 0x8001FFFC
    638706#endif
    639707
     
    661729        LDREG v1, R_V1*R_SZ(sp)
    662730        LDREG v0, R_V0*R_SZ(sp)
    663 
    664 #ifdef INSTRUMENT
    665         sw ra, 0x8001F000
    666         sw v0, 0x8001F004
    667         sw v1, 0x8001F008
    668         sw a0, 0x8001F00c
    669         sw a1, 0x8001F010
    670         sw a2, 0x8001F014
    671         sw a3, 0x8001F018
    672         sw t0, 0x8001F01c
    673         sw t1, 0x8001F020
    674         sw t2, 0x8001F024
    675         sw t3, 0x8001F028
    676         sw t4, 0x8001F02c
    677         sw t5, 0x8001F030
    678         sw t6, 0x8001F034
    679         sw t7, 0x8001F038
    680         sw t8, 0x8001F03c
    681         sw t9, 0x8001F040
    682         sw gp, 0x8001F044
    683         sw fp, 0x8001F048
    684 #endif
    685731       
    686732        LDREG     k0, R_EPC*R_SZ(sp)
    687733       
    688734        .set noat
    689         LDREG AT, R_AT*R_SZ(sp)
     735        LDREG     AT, R_AT*R_SZ(sp)
    690736        .set at
    691737
     
    698744ENDFRAME(_ISR_Handler)
    699745
     746       
    700747FRAME(mips_break,sp,0,ra)
    701748#if 1
  • cpukit/score/cpu/mips/rtems/score/cpu.h

    r6d41a874 re6dec71c  
    6868 */
    6969
    70 #define CPU_INLINE_ENABLE_DISPATCH       TRUE
     70#define CPU_INLINE_ENABLE_DISPATCH       FALSE
    7171
    7272/*
     
    208208 */
    209209
    210 #define CPU_IDLE_TASK_IS_FP      FALSE
     210#define CPU_IDLE_TASK_IS_FP      TRUE
    211211
    212212/*
     
    556556
    557557/*
    558  *  This is defined if the port has a special way to report the ISR nesting
    559  *  level.  Most ports maintain the variable _ISR_Nest_level.
    560  */
    561 
    562 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
    563 
    564 /*
    565558 *  Should be large enough to run all RTEMS tests.  This insures
    566559 *  that a "reasonable" small application should not have any problems.
     
    633626  do { \
    634627    mips_get_sr( _level ); \
    635     mips_set_sr( (_level) & ~SR_INTERRUPT_ENABLE_BITS ); \
     628    mips_set_sr( _level & ~SR_INTERRUPT_ENABLE_BITS ); \
     629    _level &= SR_INTERRUPT_ENABLE_BITS; \
    636630  } while(0)
    637631
     
    644638#define _CPU_ISR_Enable( _level )  \
    645639  do { \
    646     mips_set_sr(_level); \
     640    unsigned int _scratch; \
     641    mips_get_sr( _scratch ); \
     642    mips_set_sr( (_scratch & ~SR_INTERRUPT_ENABLE_BITS) | (_level & SR_INTERRUPT_ENABLE_BITS) ); \
    647643  } while(0)
    648644
     
    651647 *  disabling them again.  This is used to divide long RTEMS critical
    652648 *  sections into two or more parts.  The parameter _level is not
    653  * modified.
     649 *  modified.
    654650 */
    655651
    656652#define _CPU_ISR_Flash( _xlevel ) \
    657653  do { \
    658     unsigned int _scratch; \
    659654    _CPU_ISR_Enable( _xlevel ); \
    660     _CPU_ISR_Disable( _scratch ); \
     655    _CPU_ISR_Disable( _xlevel ); \
    661656  } while(0)
    662657
     
    702697 *        FPU may be easily disabled by software such as on the SPARC
    703698 *        where the PSR contains an enable FPU bit.
    704  */
    705 
    706 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
    707                                  _isr, _entry_point, _is_fp ) \
     699 *
     700 *  The per-thread status register holds the interrupt enable, FP enable
     701 *  and global interrupt enable for that thread.  It means each thread can
     702 *  enable its own set of interrupts.  If interrupts are disabled, RTEMS
     703 *  can still dispatch via blocking calls.  This is the function of the
     704 *  "Interrupt Level", and on the MIPS, it controls the IEC bit and all
     705 *  the hardware interrupts as defined in the SR.  Software ints
     706 *  are automatically enabled for all threads, as they will only occur under
     707 *  program control anyhow.  Besides, the interrupt level parm is only 8 bits,
     708 *  and controlling the software ints plus the others would require 9.
     709 *
     710 *  If the Interrupt Level is 0, all ints are on.  Otherwise, the
     711 *  Interrupt Level should supply a bit pattern to impose on the SR
     712 *  interrupt bits; bit 0 applies to the mips1 IEC bit/mips3 EXL&IE, bits 1 thru 6
     713 *  apply to the SR register Intr bits from bit 10 thru bit 15.  Bit 7 of
     714 *  the Interrupt Level parameter is unused at this time.
     715 *
     716 *  These are the only per-thread SR bits, the others are maintained
     717 *  globally & explicitly preserved by the Context Switch code in cpu_asm.s
     718 */
     719
     720
     721#if __mips == 3
     722#define _INTON  (SR_EXL | SR_IE)
     723#endif
     724#if __mips == 1
     725#define _INTON  SR_IEC
     726#endif
     727
     728#define _CPU_Context_Initialize( _the_context, _stack_base, _size, _isr, _entry_point, _is_fp ) \
    708729  { \
    709730        unsigned32 _stack_tmp = \
    710731           (unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
     732        unsigned32 _intlvl = _isr & 0xff; \
    711733        _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
    712734        (_the_context)->sp = _stack_tmp; \
    713735        (_the_context)->fp = _stack_tmp; \
    714736        (_the_context)->ra = (unsigned64)_entry_point; \
    715         (_the_context)->c0_sr = ((_the_context)->c0_sr & 0x0fff0000) | \
    716                                 ((_isr)?0xff00:0xff01) | \
    717                                 ((_is_fp)?0x20000000:0x10000000); \
     737        (_the_context)->c0_sr = ((_intlvl==0)?(0xFF00 | _INTON):( ((_intlvl<<9) & 0xfc00) | \
     738                                                       0x300 | \
     739                                                       ((_intlvl & 1)?_INTON:0)) ) | \
     740                                SR_CU0 | ((_is_fp)?SR_CU1:0); \
    718741  }
     742
     743
    719744
    720745/*
Note: See TracChangeset for help on using the changeset viewer.