Changeset 9700578 in rtems for c/src/exec/score/cpu/sparc


Ignore:
Timestamp:
10/30/95 21:54:45 (28 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
c4808ca
Parents:
ea74482
Message:

SPARC port passes all tests

Location:
c/src/exec/score/cpu/sparc
Files:
2 added
7 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/cpu/sparc/asm.h

    rea74482 r9700578  
    2929
    3030#define ASM
     31
    3132#include <rtems/score/sparc.h>
     33#include <rtems/score/cpu.h>
    3234
    3335/*
     
    3840 */
    3941
    40 /* XXX This does not appear to work on gcc 2.7.0 on the sparc */
     42/* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */
     43/* XXX The following ifdef magic fixes the problem but results in a warning   */
     44/* XXX when compiling assembly code.                                          */
    4145#undef  __USER_LABEL_PREFIX__
    4246#ifndef __USER_LABEL_PREFIX__
     
    9296#define EXTERN(sym) .globl SYM (sym)
    9397
     98/*
     99 *  Entry for traps which jump to a programmer-specified trap handler.
     100 */
     101 
     102#define TRAP(_vector, _handler)  \
     103  mov   %psr, %l0 ; \
     104  sethi %hi(_handler), %l4 ; \
     105  jmp   %l4+%lo(_handler); \
     106  mov   _vector, %l3
     107
    94108#endif
    95109/* end of include file */
  • c/src/exec/score/cpu/sparc/cpu.c

    rea74482 r9700578  
    88#include <rtems/score/isr.h>
    99
    10 /*  _CPU_Initialize
     10#if defined(erc32)
     11#include <erc32.h>
     12#endif
     13
     14/*
     15 *  This initializes the set of opcodes placed in each trap
     16 *  table entry.  The routine which installs a handler is responsible
     17 *  for filling in the fields for the _handler address and the _vector
     18 *  trap type.
     19 *
     20 *  The constants following this structure are masks for the fields which
     21 *  must be filled in when the handler is installed.
     22 */
     23
     24const CPU_Trap_table_entry _CPU_Trap_slot_template = {
     25  0xa1480000,      /* mov   %psr, %l0           */
     26  0x29000000,      /* sethi %hi(_handler), %l4  */
     27  0x81c52000,      /* jmp   %l4 + %lo(_handler) */
     28  0xa6102000       /* mov   _vector, %l3        */
     29};
     30
     31/*PAGE
     32 *
     33 *  _CPU_Initialize
    1134 *
    1235 *  This routine performs processor dependent initialization.
    1336 *
    14  *  INPUT PARAMETERS:
     37 *  Input Parameters:
    1538 *    cpu_table       - CPU table to initialize
    1639 *    thread_dispatch - address of disptaching routine
    17  */
    18 
     40 *
     41 *  Output Parameters: NONE
     42 *
     43 *  NOTE: There is no need to save the pointer to the thread dispatch routine.
     44 *        The SPARC's assembly code can reference it directly with no problems.
     45 */
    1946
    2047void _CPU_Initialize(
    2148  rtems_cpu_table  *cpu_table,
    22   void      (*thread_dispatch)      /* ignored on this CPU */
     49  void            (*thread_dispatch)      /* ignored on this CPU */
    2350)
    2451{
    25   void *pointer;
    26 
    27   /*
    28    *  The thread_dispatch argument is the address of the entry point
    29    *  for the routine called at the end of an ISR once it has been
    30    *  decided a context switch is necessary.  On some compilation
    31    *  systems it is difficult to call a high-level language routine
    32    *  from assembly.  This allows us to trick these systems.
    33    *
    34    *  If you encounter this problem save the entry point in a CPU
    35    *  dependent variable.
    36    */
    37 
    38   _CPU_Thread_dispatch_pointer = thread_dispatch;
    39 
    40   /*
    41    *  If there is not an easy way to initialize the FP context
    42    *  during Context_Initialize, then it is usually easier to
    43    *  save an "uninitialized" FP context here and copy it to
    44    *  the task's during Context_Initialize.
     52  void                  *pointer;
     53  unsigned32             trap_table_start;
     54  unsigned32             tbr_value;
     55  CPU_Trap_table_entry  *old_tbr;
     56  CPU_Trap_table_entry  *trap_table;
     57
     58  /*
     59   *  Install the executive's trap table.  All entries from the original
     60   *  trap table are copied into the executive's trap table.  This is essential
     61   *  since this preserves critical trap handlers such as the window underflow
     62   *  and overflow handlers.  It is the responsibility of the BSP to provide
     63   *  install these in the initial trap table.
     64   */
     65 
     66  trap_table_start = (unsigned32) &_CPU_Trap_Table_area;
     67  if (trap_table_start & (SPARC_TRAP_TABLE_ALIGNMENT-1))
     68    trap_table_start = (trap_table_start + SPARC_TRAP_TABLE_ALIGNMENT) &
     69                       ~(SPARC_TRAP_TABLE_ALIGNMENT-1);
     70
     71  trap_table = (CPU_Trap_table_entry *) trap_table_start;
     72
     73  sparc_get_tbr( tbr_value );
     74
     75  old_tbr = (CPU_Trap_table_entry *) (tbr_value & 0xfffff000);
     76
     77  memcpy( trap_table, (void *) old_tbr, 256 * sizeof( CPU_Trap_table_entry ) );
     78
     79  sparc_set_tbr( trap_table_start );
     80
     81  /*
     82   *  This seems to be the most appropriate way to obtain an initial
     83   *  FP context on the SPARC.  The NULL fp context is copied it to
     84   *  the task's FP context during Context_Initialize.
    4585   */
    4686
     
    4888  _CPU_Context_save_fp( &pointer );
    4989
     90  /*
     91   *  Grab our own copy of the user's CPU table.
     92   */
     93
    5094  _CPU_Table = *cpu_table;
     95
     96#if defined(erc32)
     97
     98  /*
     99   *  ERC32 specific initialization
     100   */
     101
     102  _ERC32_MEC_Timer_Control_Mirror = 0;
     103  ERC32_MEC.Timer_Control = 0;
     104
     105  ERC32_MEC.Control |= ERC32_CONFIGURATION_POWER_DOWN_ALLOWED;
     106
     107#endif
     108
    51109}
    52110
     
    54112 *
    55113 *  _CPU_ISR_Get_level
     114 *
     115 *  Input Parameters: NONE
     116 *
     117 *  Output Parameters:
     118 *    returns the current interrupt level (PIL field of the PSR)
    56119 */
    57120 
     
    65128}
    66129
    67 /*  _CPU_ISR_install_vector
     130/*PAGE
     131 *
     132 *  _CPU_ISR_install_raw_handler
     133 *
     134 *  This routine installs the specified handler as a "raw" non-executive
     135 *  supported trap handler (a.k.a. interrupt service routine).
     136 *
     137 *  Input Parameters:
     138 *    vector      - trap table entry number plus synchronous
     139 *                    vs. asynchronous information
     140 *    new_handler - address of the handler to be installed
     141 *    old_handler - pointer to an address of the handler previously installed
     142 *
     143 *  Output Parameters: NONE
     144 *    *new_handler - address of the handler previously installed
     145 *
     146 *  NOTE:
     147 *
     148 *  On the SPARC, there are really only 256 vectors.  However, the executive
     149 *  has no easy, fast, reliable way to determine which traps are synchronous
     150 *  and which are asynchronous.  By default, synchronous traps return to the
     151 *  instruction which caused the interrupt.  So if you install a software
     152 *  trap handler as an executive interrupt handler (which is desirable since
     153 *  RTEMS takes care of window and register issues), then the executive needs
     154 *  to know that the return address is to the trap rather than the instruction
     155 *  following the trap.
     156 *
     157 *  So vectors 0 through 255 are treated as regular asynchronous traps which
     158 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
     159 *  by the executive to be synchronous and to require that the return address
     160 *  be fudged.
     161 *
     162 *  If you use this mechanism to install a trap handler which must reexecute
     163 *  the instruction which caused the trap, then it should be installed as
     164 *  an asynchronous trap.  This will avoid the executive changing the return
     165 *  address.
     166 */
     167 
     168void _CPU_ISR_install_raw_handler(
     169  unsigned32  vector,
     170  proc_ptr    new_handler,
     171  proc_ptr   *old_handler
     172)
     173{
     174  unsigned32             real_vector;
     175  CPU_Trap_table_entry  *tbr;
     176  CPU_Trap_table_entry  *slot;
     177  unsigned32             u32_tbr;
     178  unsigned32             u32_handler;
     179
     180  /*
     181   *  Get the "real" trap number for this vector ignoring the synchronous
     182   *  versus asynchronous indicator included with our vector numbers.
     183   */
     184
     185  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
     186
     187  /*
     188   *  Get the current base address of the trap table and calculate a pointer
     189   *  to the slot we are interested in.
     190   */
     191
     192  sparc_get_tbr( u32_tbr );
     193
     194  u32_tbr &= 0xfffff000;
     195
     196  tbr = (CPU_Trap_table_entry *) u32_tbr;
     197
     198  slot = &tbr[ real_vector ];
     199
     200  /*
     201   *  Get the address of the old_handler from the trap table.
     202   *
     203   *  NOTE: The old_handler returned will be bogus if it does not follow
     204   *        the RTEMS model.
     205   */
     206
     207#define HIGH_BITS_MASK   0xFFFFFC00
     208#define HIGH_BITS_SHIFT  10
     209#define LOW_BITS_MASK    0x000003FF
     210
     211  if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
     212    u32_handler =
     213      ((slot->sethi_of_handler_to_l4 & HIGH_BITS_MASK) << HIGH_BITS_SHIFT) |
     214      (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
     215    *old_handler = (proc_ptr) u32_handler;
     216  } else
     217    *old_handler = 0;
     218
     219  /*
     220   *  Copy the template to the slot and then fix it.
     221   */
     222
     223  *slot = _CPU_Trap_slot_template;
     224
     225  u32_handler = (unsigned32) new_handler;
     226
     227  slot->mov_vector_l3 |= vector;
     228  slot->sethi_of_handler_to_l4 |=
     229    (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
     230  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
     231}
     232
     233/*PAGE
     234 *
     235 *  _CPU_ISR_install_vector
    68236 *
    69237 *  This kernel routine installs the RTEMS handler for the
     
    71239 *
    72240 *  Input parameters:
    73  *    vector      - interrupt vector number
    74  *    old_handler - former ISR for this vector number
    75  *    new_handler - replacement ISR for this vector number
    76  *
    77  *  Output parameters:  NONE
    78  *
    79  */
    80 
     241 *    vector       - interrupt vector number
     242 *    new_handler  - replacement ISR for this vector number
     243 *    old_handler  - pointer to former ISR for this vector number
     244 *
     245 *  Output parameters:
     246 *    *old_handler - former ISR for this vector number
     247 *
     248 */
    81249
    82250void _CPU_ISR_install_vector(
     
    86254)
    87255{
    88    *old_handler = _ISR_Vector_table[ vector ];
     256   unsigned32 real_vector;
     257   proc_ptr   ignored;
     258
     259  /*
     260   *  Get the "real" trap number for this vector ignoring the synchronous
     261   *  versus asynchronous indicator included with our vector numbers.
     262   */
     263
     264   real_vector = SPARC_REAL_TRAP_NUMBER( vector );
    89265
    90266   /*
    91     *  If the interrupt vector table is a table of pointer to isr entry
    92     *  points, then we need to install the appropriate RTEMS interrupt
    93     *  handler for this vector number.
     267    *  Return the previous ISR handler.
    94268    */
     269
     270   *old_handler = _ISR_Vector_table[ real_vector ];
     271
     272   /*
     273    *  Install the wrapper so this ISR can be invoked properly.
     274    */
     275
     276   _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
    95277
    96278   /*
     
    99281    */
    100282
    101     _ISR_Vector_table[ vector ] = new_handler;
    102 }
    103 
    104 /*PAGE
    105  *
    106  *  _CPU_Install_interrupt_stack
    107  */
    108 
    109 void _CPU_Install_interrupt_stack( void )
    110 {
     283    _ISR_Vector_table[ real_vector ] = new_handler;
    111284}
    112285
     
    114287 *
    115288 *  _CPU_Context_Initialize
    116  */
    117 
    118 /*
    119  *  The following constants assist in building a thread's initial context.
    120  */
    121 
    122 #define CPU_FRAME_SIZE  (112)   /* based on disassembled test code */
    123 #define ADDR_ADJ_OFFSET  -8
     289 *
     290 *  This kernel routine initializes the basic non-FP context area associated
     291 *  with each thread.
     292 *
     293 *  Input parameters:
     294 *    the_context  - pointer to the context area
     295 *    stack_base   - address of memory for the SPARC
     296 *    size         - size in bytes of the stack area
     297 *    new_level    - interrupt level for this context area
     298 *    entry_point  - the starting execution point for this this context
     299 *    is_fp        - TRUE if this context is associated with an FP thread
     300 *
     301 *  Output parameters: NONE
     302 */
    124303
    125304void _CPU_Context_Initialize(
    126   Context_Control  *_the_context,
    127   unsigned32       *_stack_base,
    128   unsigned32        _size,
    129   unsigned32        _new_level,
    130   void             *_entry_point
     305  Context_Control  *the_context,
     306  unsigned32       *stack_base,
     307  unsigned32        size,
     308  unsigned32        new_level,
     309  void             *entry_point,
     310  boolean           is_fp
    131311)
    132312{
    133     unsigned32   jmp_addr;
    134     unsigned32   _stack_high;  /* highest "stack aligned" address */
    135     unsigned32   _the_size;
     313    unsigned32   stack_high;  /* highest "stack aligned" address */
     314    unsigned32   the_size;
    136315    unsigned32   tmp_psr;
    137  
    138     jmp_addr = (unsigned32) _entry_point;
    139316 
    140317    /*
    141318     *  On CPUs with stacks which grow down (i.e. SPARC), we build the stack
    142      *  based on the _stack_high address. 
     319     *  based on the stack_high address. 
    143320     */
    144321 
    145     _stack_high = ((unsigned32)(_stack_base) + _size);
    146     _stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
    147  
    148     _the_size = _size & ~(CPU_STACK_ALIGNMENT - 1);
    149  
    150 /* XXX following code is based on unix port */
     322    stack_high = ((unsigned32)(stack_base) + size);
     323    stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
     324 
     325    the_size = size & ~(CPU_STACK_ALIGNMENT - 1);
     326 
    151327    /*
    152      *  XXX SPARC port needs a diagram like this one...
    153      *  See /usr/include/sys/stack.h in Solaris 2.3 for a nice
    154      *  diagram of the stack.
     328     *  See the README in this directory for a diagram of the stack.
    155329     */
    156330 
    157     _the_context->o7 = jmp_addr + ADDR_ADJ_OFFSET;
    158     _the_context->o6 = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
    159     _the_context->i6 = (unsigned32)(_stack_high);
    160 #if 0
    161     _the_context->rp = jmp_addr + ADDR_ADJ_OFFSET;
    162     _the_context->sp = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
    163     _the_context->fp = (unsigned32)(_stack_high);
     331    the_context->o7    = ((unsigned32) entry_point) - 8;
     332    the_context->o6_sp = stack_high - CPU_MINIMUM_STACK_FRAME_SIZE;
     333    the_context->i6_fp = stack_high;
     334
     335    /*
     336     *  Build the PSR for the task.  Most everything can be 0 and the
     337     *  CWP is corrected during the context switch.
     338     *
     339     *  The EF bit determines if the floating point unit is available.
     340     *  The FPU is ONLY enabled if the context is associated with an FP task
     341     *  and this SPARC model has an FPU.
     342     */
     343
     344    sparc_get_psr( tmp_psr );
     345    tmp_psr &= ~SPARC_PSR_PIL_MASK;
     346    tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
     347    tmp_psr &= ~SPARC_PSR_EF_MASK;      /* disabled by default */
     348   
     349#if (SPARC_HAS_FPU == 1)
     350    /*
     351     *  If this bit is not set, then a task gets a fault when it accesses
     352     *  a floating point register.  This is a nice way to detect floating
     353     *  point tasks which are not currently declared as such.
     354     */
     355
     356    if ( is_fp )
     357      tmp_psr |= SPARC_PSR_EF_MASK;
    164358#endif
    165 
    166     _the_context->wim = 0x01;
    167 
    168     sparc_get_psr( tmp_psr );
    169     tmp_psr &= ~SPARC_PIL_MASK;
    170     tmp_psr |= (((_new_level) << 8) & SPARC_PIL_MASK);
    171     tmp_psr  = (tmp_psr & ~0x07) | 0x07;  /* XXX should use num windows */
    172     _the_context->psr = tmp_psr;
     359    the_context->psr = tmp_psr;
    173360}
    174361
     
    177364 *  _CPU_Internal_threads_Idle_thread_body
    178365 *
    179  *  NOTES:
    180  *
    181  *  1. This is the same as the regular CPU independent algorithm.
    182  *
    183  *  2. If you implement this using a "halt", "idle", or "shutdown"
    184  *     instruction, then don't forget to put it in an infinite loop.
    185  *
    186  *  3. Be warned. Some processors with onboard DMA have been known
    187  *     to stop the DMA if the CPU were put in IDLE mode.  This might
    188  *     also be a problem with other on-chip peripherals.  So use this
    189  *     hook with caution.
    190  */
     366 *  Some SPARC implementations have low power, sleep, or idle modes.  This
     367 *  tries to take advantage of those models. 
     368 */
     369 
     370#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
     371
     372/*
     373 *  This is the implementation for the erc32.
     374 *
     375 *  NOTE: Low power mode was enabled at initialization time.
     376 */
     377
     378#if defined(erc32)
    191379
    192380void _CPU_Internal_threads_Idle_thread_body( void )
    193381{
    194 
    195   for( ; ; )
    196     /* insert your "halt" instruction here */ ;
    197 }
     382  while (1) {
     383    ERC32_MEC.Power_Down = 0;   /* value is irrelevant */
     384  }
     385}
     386
     387#endif
     388
     389#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
  • c/src/exec/score/cpu/sparc/cpu.h

    rea74482 r9700578  
    11/*  cpu.h
    22 *
    3  *  This include file contains information pertaining to the XXX
    4  *  processor.
     3 *  This include file contains information pertaining to the port of
     4 *  the executive to the SPARC processor.
    55 *
    66 *  $Id$
     
    2626 *  If TRUE, then they are inlined.
    2727 *  If FALSE, then a subroutine call is made.
    28  *
    29  *  Basically this is an example of the classic trade-off of size
    30  *  versus speed.  Inlining the call (TRUE) typically increases the
    31  *  size of the executive while speeding up the enabling of dispatching.
    32  *  [NOTE: In general, the _Thread_Dispatch_disable_level will
    33  *  only be 0 or 1 unless you are in an interrupt handler and that
    34  *  interrupt handler invokes the executive.]  When not inlined
    35  *  something calls _Thread_Enable_dispatch which in turns calls
    36  *  _Thread_Dispatch.  If the enable dispatch is inlined, then
    37  *  one subroutine call is avoided entirely.]
    3828 */
    3929
     
    4939 *  If FALSE, then the loops are not unrolled.
    5040 *
    51  *  The primary factor in making this decision is the cost of disabling
    52  *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
    53  *  body of the loop.  On some CPUs, the flash is more expensive than
    54  *  one iteration of the loop body.  In this case, it might be desirable
    55  *  to unroll the loop.  It is important to note that on some CPUs, this
    56  *  code is the longest interrupt disable period in the executive.  So it is
    57  *  necessary to strike a balance when setting this parameter.
     41 *  This parameter could go either way on the SPARC.  The interrupt flash
     42 *  code is relatively lengthy given the requirements for nops following
     43 *  writes to the psr.  But if the clock speed were high enough, this would
     44 *  not represent a great deal of time.
    5845 */
    5946
     
    6653 *  If FALSE, nothing is done.
    6754 *
    68  *  If the CPU supports a dedicated interrupt stack in hardware,
    69  *  then it is generally the responsibility of the BSP to allocate it
    70  *  and set it up.
    71  *
    72  *  If the CPU does not support a dedicated interrupt stack, then
    73  *  the porter has two options: (1) execute interrupts on the stack of
    74  *  the interrupted task, and (2) have the executive manage a dedicated
    75  *  interrupt stack.
    76  *
    77  *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
    78  *
    79  *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
    80  *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
    81  *  possible that both are FALSE for a particular CPU.  Although it
    82  *  is unclear what that would imply about the interrupt processing
    83  *  procedure on that CPU.
    84  */
    85 
    86 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK   FALSE /* XXX */
     55 *  The SPARC does not have a dedicated HW interrupt stack and one has
     56 *  been implemented in SW.
     57 */
     58
     59#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
    8760
    8861/*
     
    9265 *  If FALSE, then no installation is performed.
    9366 *
    94  *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
    95  *
    96  *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
    97  *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
    98  *  possible that both are FALSE for a particular CPU.  Although it
    99  *  is unclear what that would imply about the interrupt processing
    100  *  procedure on that CPU.
    101  */
    102 
    103 #define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE /* XXX */
     67 *  The SPARC does not have a dedicated HW interrupt stack.
     68 */
     69
     70#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
    10471
    10572/*
     
    10875 *  If TRUE, then the memory is allocated during initialization.
    10976 *  If FALSE, then the memory is allocated during initialization.
    110  *
    111  *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
    112  *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
    11377 */
    11478
     
    12084 *  If TRUE, then the FLOATING_POINT task attribute is supported.
    12185 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
    122  *
    123  *  If there is a FP coprocessor such as the i387 or mc68881, then
    124  *  the answer is TRUE.
    125  *
    126  *  The macro name "SPARC_HAS_FPU" should be made CPU specific.
    127  *  It indicates whether or not this CPU model has FP support.  For
    128  *  example, it would be possible to have an i386_nofp CPU model
    129  *  which set this to false to indicate that you have an i386 without
    130  *  an i387 and wish to leave floating point support out.
    13186 */
    13287
     
    14297 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
    14398 *  If FALSE, then the FLOATING_POINT task attribute is followed.
    144  *
    145  *  So far, the only CPU in which this option has been used is the
    146  *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
    147  *  floating point registers to perform integer multiplies.  If
    148  *  a function which you would not think utilize the FP unit DOES,
    149  *  then one can not easily predict which tasks will use the FP hardware.
    150  *  In this case, this option should be TRUE.
    151  *
    152  *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
    15399 */
    154100
     
    161107 *  and it has a floating point context which is switched in and out.
    162108 *  If FALSE, then the IDLE task does not have a floating point context.
    163  *
    164  *  Setting this to TRUE negatively impacts the time required to preempt
    165  *  the IDLE task from an interrupt because the floating point context
    166  *  must be saved as part of the preemption.
    167109 */
    168110
     
    182124 *  task is restored.  The state of the floating point registers between
    183125 *  those two operations is not specified.
    184  *
    185  *  If the floating point context does NOT have to be saved as part of
    186  *  interrupt dispatching, then it should be safe to set this to TRUE.
    187  *
    188  *  Setting this flag to TRUE results in using a different algorithm
    189  *  for deciding when to save and restore the floating point context.
    190  *  The deferred FP switch algorithm minimizes the number of times
    191  *  the FP context is saved and restored.  The FP context is not saved
    192  *  until a context switch is made to another, different FP task.
    193  *  Thus in a system with only one FP task, the FP context will never
    194  *  be saved or restored.
    195126 */
    196127
     
    206137 *  If FALSE, then use the generic IDLE thread body if the BSP does
    207138 *  not provide one.
    208  *
    209  *  This is intended to allow for supporting processors which have
    210  *  a low power or idle mode.  When the IDLE thread is executed, then
    211  *  the CPU can be powered down.
    212  *
    213  *  The order of precedence for selecting the IDLE thread body is:
    214  *
    215  *    1.  BSP provided
    216  *    2.  CPU dependent (if provided)
    217  *    3.  generic (if no BSP and no CPU dependent)
    218  */
    219 
     139 */
     140
     141#if (SPARC_HAS_LOW_POWER_MODE == 1)
     142#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
     143#else
    220144#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
     145#endif
    221146
    222147/*
     
    226151 *  If TRUE, then the grows upward.
    227152 *  If FALSE, then the grows toward smaller addresses.
     153 *
     154 *  The stack grows to lower addresses on the SPARC.
    228155 */
    229156
     
    237164 *  much of the critical data area as possible in a cache line.
    238165 *
    239  *  The placement of this macro in the declaration of the variables
    240  *  is based on the syntactically requirements of the GNU C
    241  *  "__attribute__" extension.  For example with GNU C, use
    242  *  the following to force a structures to a 32 byte boundary.
    243  *
    244  *      __attribute__ ((aligned (32)))
    245  *
    246  *  NOTE:  Currently only the Priority Bit Map table uses this feature.
    247  *         To benefit from using this, the data must be heavily
    248  *         used so it will stay in the cache and used frequently enough
    249  *         in the executive to justify turning this on.
     166 *  The SPARC does not appear to have particularly strict alignment
     167 *  requirements.  This value was chosen to take advantages of caches.
    250168 */
    251169
     
    256174 *  interrupt field of the task mode.  How those bits map to the
    257175 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
     176 *
     177 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
    258178 */
    259179
     
    261181
    262182/*
    263  *  Processor defined structures
    264  *
    265  *  Examples structures include the descriptor tables from the i386
    266  *  and the processor control structure on the i960ca.
    267  */
    268 
    269 /* XXX may need to put some structures here.  */
     183 *  This structure represents the organization of the minimum stack frame
     184 *  for the SPARC.  More framing information is required in certain situaions
     185 *  such as when there are a large number of out parameters or when the callee
     186 *  must save floating point registers.
     187 */
     188
     189#ifndef ASM
     190
     191typedef struct {
     192  unsigned32  l0;
     193  unsigned32  l1;
     194  unsigned32  l2;
     195  unsigned32  l3;
     196  unsigned32  l4;
     197  unsigned32  l5;
     198  unsigned32  l6;
     199  unsigned32  l7;
     200  unsigned32  i0;
     201  unsigned32  i1;
     202  unsigned32  i2;
     203  unsigned32  i3;
     204  unsigned32  i4;
     205  unsigned32  i5;
     206  unsigned32  i6_fp;
     207  unsigned32  i7;
     208  void       *structure_return_address;
     209  /*
     210   *  The following are for the callee to save the register arguments in
     211   *  should this be necessary.
     212   */
     213  unsigned32  saved_arg0;
     214  unsigned32  saved_arg1;
     215  unsigned32  saved_arg2;
     216  unsigned32  saved_arg3;
     217  unsigned32  saved_arg4;
     218  unsigned32  saved_arg5;
     219  unsigned32  pad0;
     220}  CPU_Minimum_stack_frame;
     221
     222#endif /* ASM */
     223
     224#define CPU_STACK_FRAME_L0_OFFSET             0x00
     225#define CPU_STACK_FRAME_L1_OFFSET             0x04
     226#define CPU_STACK_FRAME_L2_OFFSET             0x08
     227#define CPU_STACK_FRAME_L3_OFFSET             0x0c
     228#define CPU_STACK_FRAME_L4_OFFSET             0x10
     229#define CPU_STACK_FRAME_L5_OFFSET             0x14
     230#define CPU_STACK_FRAME_L6_OFFSET             0x18
     231#define CPU_STACK_FRAME_L7_OFFSET             0x1c
     232#define CPU_STACK_FRAME_I0_OFFSET             0x20
     233#define CPU_STACK_FRAME_I1_OFFSET             0x24
     234#define CPU_STACK_FRAME_I2_OFFSET             0x28
     235#define CPU_STACK_FRAME_I3_OFFSET             0x2c
     236#define CPU_STACK_FRAME_I4_OFFSET             0x30
     237#define CPU_STACK_FRAME_I5_OFFSET             0x34
     238#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
     239#define CPU_STACK_FRAME_I7_OFFSET             0x3c
     240#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
     241#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
     242#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
     243#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
     244#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
     245#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
     246#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
     247#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
     248
     249#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
    270250
    271251/*
     
    281261 *     3. special interrupt level context :: Context_Control_interrupt
    282262 *
    283  *  On some processors, it is cost-effective to save only the callee
    284  *  preserved registers during a task context switch.  This means
    285  *  that the ISR code needs to save those registers which do not
    286  *  persist across function calls.  It is not mandatory to make this
    287  *  distinctions between the caller/callee saves registers for the
    288  *  purpose of minimizing context saved during task switch and on interrupts.
    289  *  If the cost of saving extra registers is minimal, simplicity is the
    290  *  choice.  Save the same context on interrupt entry as for tasks in
    291  *  this case.
    292  *
    293  *  Additionally, if gdb is to be made aware of tasks for this CPU, then
    294  *  care should be used in designing the context area.
    295  *
    296  *  On some CPUs with hardware floating point support, the Context_Control_fp
    297  *  structure will not be used or it simply consist of an array of a
    298  *  fixed number of bytes.   This is done when the floating point context
    299  *  is dumped by a "FP save context" type instruction and the format
    300  *  is not really defined by the CPU.  In this case, there is no need
    301  *  to figure out the exact format -- only the size.  Of course, although
    302  *  this is enough information for context switches, it is probably not
    303  *  enough for a debugger such as gdb.  But that is another problem.
     263 *  On the SPARC, we are relatively conservative in that we save most
     264 *  of the CPU state in the context area.  The ET (enable trap) bit and
     265 *  the CWP (current window pointer) fields of the PSR are considered
     266 *  system wide resources and are not maintained on a per-thread basis.
    304267 */
    305268
    306269#ifndef ASM
    307270
    308 /* XXX */
    309271typedef struct {
    310     unsigned32 g0;
    311     unsigned32 g1;
     272    /*
     273     *  Using a double g0_g1 will put everything in this structure on a
     274     *  double word boundary which allows us to use double word loads
     275     *  and stores safely in the context switch.
     276     */
     277    double     g0_g1;
    312278    unsigned32 g2;
    313279    unsigned32 g3;
     
    332298    unsigned32 i4;
    333299    unsigned32 i5;
    334     unsigned32 i6;
     300    unsigned32 i6_fp;
    335301    unsigned32 i7;
    336302
     
    341307    unsigned32 o4;
    342308    unsigned32 o5;
    343     unsigned32 o6;
     309    unsigned32 o6_sp;
    344310    unsigned32 o7;
    345311
    346     unsigned32 wim;
    347312    unsigned32 psr;
    348313} Context_Control;
     
    378343#define I4_OFFSET    0x50
    379344#define I5_OFFSET    0x54
    380 #define I6_OFFSET    0x58
     345#define I6_FP_OFFSET 0x58
    381346#define I7_OFFSET    0x5C
    382347
     
    387352#define O4_OFFSET    0x70
    388353#define O5_OFFSET    0x74
    389 #define O6_OFFSET    0x78
     354#define O6_SP_OFFSET 0x78
    390355#define O7_OFFSET    0x7C
    391356
    392 #define WIM_OFFSET   0x80
    393 #define PSR_OFFSET   0x84
     357#define PSR_OFFSET   0x80
     358
     359#define CONTEXT_CONTROL_SIZE 0x84
     360
     361/*
     362 *  The floating point context area.
     363 */
    394364
    395365#ifndef ASM
    396366
    397 /* XXX */
    398367typedef struct {
    399368    double      f0_f1;
     
    440409#define FSR_OFFSET       0x80
    441410
     411#define CONTEXT_CONTROL_FP_SIZE 0x84
     412
    442413#ifndef ASM
    443414
     415/*
     416 *  Context saved on stack for an interrupt.
     417 *
     418 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
     419 *         benefit of the user's handler.
     420 */
     421
    444422typedef struct {
    445     unsigned32 special_interrupt_register_XXX;
     423  CPU_Minimum_stack_frame  Stack_frame;
     424  unsigned32               psr;
     425  unsigned32               pc;
     426  unsigned32               npc;
     427  unsigned32               g1;
     428  unsigned32               g2;
     429  unsigned32               g3;
     430  unsigned32               g4;
     431  unsigned32               g5;
     432  unsigned32               g6;
     433  unsigned32               g7;
     434  unsigned32               i0;
     435  unsigned32               i1;
     436  unsigned32               i2;
     437  unsigned32               i3;
     438  unsigned32               i4;
     439  unsigned32               i5;
     440  unsigned32               i6_fp;
     441  unsigned32               i7;
     442  unsigned32               y;
     443  unsigned32               pad0_offset;
    446444} CPU_Interrupt_frame;
    447445
     
    452450 */
    453451
     452#define ISF_STACK_FRAME_OFFSET 0x00
     453#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
     454#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
     455#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
     456#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
     457#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
     458#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
     459#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
     460#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
     461#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
     462#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
     463#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
     464#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
     465#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
     466#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
     467#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
     468#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
     469#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
     470#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
     471#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
     472#define ISF_PAD0_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
     473
     474#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
    454475#ifndef ASM
    455476
    456477/*
    457478 *  The following table contains the information required to configure
    458  *  the XXX processor specific parameters.
     479 *  the processor specific parameters.
    459480 *
    460481 *  NOTE: The interrupt_stack_size field is required if
     
    473494  unsigned32   interrupt_stack_size;
    474495  unsigned32   extra_system_initialization_stack;
    475   unsigned32   some_other_cpu_dependent_info_XXX;
    476496}   rtems_cpu_table;
    477497
    478498/*
    479  *  This variable is optional.  It is used on CPUs on which it is difficult
    480  *  to generate an "uninitialized" FP context.  It is filled in by
    481  *  _CPU_Initialize and copied into the task's FP context area during
    482  *  _CPU_Context_Initialize.
     499 *  This variable is contains the initialize context for the FP unit.
     500 *  It is filled in by _CPU_Initialize and copied into the task's FP
     501 *  context area during _CPU_Context_Initialize.
    483502 */
    484503
     
    486505
    487506/*
    488  *  On some CPUs, software managed interrupt stack is supported.
    489507 *  This stack is allocated by the Interrupt Manager and the switch
    490508 *  is performed in _ISR_Handler.  These variables contain pointers
     
    492510 *  for the interrupt stack.  Since it is unknown whether the stack
    493511 *  grows up or down (in general), this give the CPU dependent
    494  *  code the option of picking the version it wants to use.
    495  *
    496  *  NOTE: These two variables are required if the macro
    497  *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
    498  */
    499 
    500 EXTERN void               *_CPU_Interrupt_stack_low;
    501 EXTERN void               *_CPU_Interrupt_stack_high;
    502 
    503 /*
    504  *  With some compilation systems, it is difficult if not impossible to
    505  *  call a high-level language routine from assembly language.  This
    506  *  is especially true of commercial Ada compilers and name mangling
    507  *  C++ ones.  This variable can be optionally defined by the CPU porter
    508  *  and contains the address of the routine _Thread_Dispatch.  This
    509  *  can make it easier to invoke that routine at the end of the interrupt
    510  *  sequence (if a dispatch is necessary).
    511  */
    512 
    513 EXTERN void           (*_CPU_Thread_dispatch_pointer)();
    514 
    515 /*
    516  *  Nothing prevents the porter from declaring more CPU specific variables.
    517  */
    518 
    519 /* XXX: if needed, put more variables here */
    520 
    521 /*
    522  *  The size of the floating point context area.  On some CPUs this
    523  *  will not be a "sizeof" because the format of the floating point
    524  *  area is not defined -- only the size is.  This is usually on
    525  *  CPUs with a "floating point save context" instruction.
     512 *  code the option of picking the version it wants to use.  Thus
     513 *  both must be present if either is.
     514 *
     515 *  The SPARC supports a software based interrupt stack and these
     516 *  are required.
     517 */
     518
     519EXTERN void *_CPU_Interrupt_stack_low;
     520EXTERN void *_CPU_Interrupt_stack_high;
     521
     522#if defined(erc32)
     523
     524/*
     525 *  ERC32 Specific Variables
     526 */
     527
     528EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror;
     529
     530#endif
     531
     532/*
     533 *  The following type defines an entry in the SPARC's trap table.
     534 *
     535 *  NOTE: The instructions chosen are RTEMS dependent although one is
     536 *        obligated to use two of the four instructions to perform a
     537 *        long jump.  The other instructions load one register with the
     538 *        trap type (a.k.a. vector) and another with the psr.
     539 */
     540 
     541typedef struct {
     542  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
     543  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
     544  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
     545  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
     546} CPU_Trap_table_entry;
     547 
     548/*
     549 *  This is the set of opcodes for the instructions loaded into a trap
     550 *  table entry.  The routine which installs a handler is responsible
     551 *  for filling in the fields for the _handler address and the _vector
     552 *  trap type.
     553 *
     554 *  The constants following this structure are masks for the fields which
     555 *  must be filled in when the handler is installed.
     556 */
     557 
     558extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
     559
     560/*
     561 *  This is the executive's trap table which is installed into the TBR
     562 *  register.
     563 *
     564 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
     565 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
     566 *         align an entity to anything greater than a 512 byte boundary.
     567 *
     568 *         Because of this, we pull a little bit of a trick.  We allocate
     569 *         enough memory so we can grab an address on a 4096 byte boundary
     570 *         from this area.
     571 */
     572 
     573#define SPARC_TRAP_TABLE_ALIGNMENT 4096
     574 
     575EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
     576           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
     577 
     578
     579/*
     580 *  The size of the floating point context area. 
    526581 */
    527582
    528583#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
     584
     585#endif
    529586
    530587/*
     
    539596 *  This defines the number of entries in the ISR_Vector_table managed
    540597 *  by the executive.
    541  */
    542 
    543 #define CPU_INTERRUPT_NUMBER_OF_VECTORS  255
     598 *
     599 *  On the SPARC, there are really only 256 vectors.  However, the executive
     600 *  has no easy, fast, reliable way to determine which traps are synchronous
     601 *  and which are asynchronous.  By default, synchronous traps return to the
     602 *  instruction which caused the interrupt.  So if you install a software
     603 *  trap handler as an executive interrupt handler (which is desirable since
     604 *  RTEMS takes care of window and register issues), then the executive needs
     605 *  to know that the return address is to the trap rather than the instruction
     606 *  following the trap.
     607 *
     608 *  So vectors 0 through 255 are treated as regular asynchronous traps which
     609 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
     610 *  by the executive to be synchronous and to require that the return address
     611 *  be fudged.
     612 *
     613 *  If you use this mechanism to install a trap handler which must reexecute
     614 *  the instruction which caused the trap, then it should be installed as
     615 *  an asynchronous trap.  This will avoid the executive changing the return
     616 *  address.
     617 */
     618
     619#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
     620#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
     621
     622#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
     623#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
     624#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
     625
     626#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
    544627
    545628/*
    546629 *  Should be large enough to run all tests.  This insures
    547630 *  that a "reasonable" small application should not have any problems.
    548  */
    549 
    550 #define CPU_STACK_MINIMUM_SIZE          (1024*2)
     631 *
     632 *  This appears to be a fairly generous number for the SPARC since
     633 *  represents a call depth of about 20 routines based on the minimum
     634 *  stack frame.
     635 */
     636
     637#define CPU_STACK_MINIMUM_SIZE  (1024*2)
    551638
    552639/*
    553640 *  CPU's worst alignment requirement for data types on a byte boundary.  This
    554641 *  alignment does not take into account the requirements for the stack.
    555  */
    556 
    557 #define CPU_ALIGNMENT              8
     642 *
     643 *  On the SPARC, this is required for double word loads and stores.
     644 */
     645
     646#define CPU_ALIGNMENT      8
    558647
    559648/*
     
    592681 *
    593682 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
     683 *
     684 *  The alignment restrictions for the SPARC are not that strict but this
     685 *  should unsure that the stack is always sufficiently alignment that the
     686 *  window overflow, underflow, and flush routines can use double word loads
     687 *  and stores.
    594688 */
    595689
    596690#define CPU_STACK_ALIGNMENT        16
    597 
    598 #endif  /* ASM */
    599691
    600692#ifndef ASM
     
    632724 *  Map interrupt level in task mode onto the hardware that the CPU
    633725 *  actually provides.  Currently, interrupt levels which do not
    634  *  map onto the CPU in a generic fashion are undefined.  Someday,
    635  *  it would be nice if these were "mapped" by the application
    636  *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
    637  *  8 - 255 would be available for bsp/application specific meaning.
    638  *  This could be used to manage a programmable interrupt controller
    639  *  via the rtems_task_mode directive.
     726 *  map onto the CPU in a straight fashion are undefined. 
    640727 */
    641728
     
    660747 *     - initializing the floating point context
    661748 *
    662  *  This routine generally does not set any unnecessary register
    663  *  in the context.  The state of the "general data" registers is
    664  *  undefined at task start time.
    665  *
    666749 *  NOTE:  Implemented as a subroutine for the SPARC port.
    667750 */
    668751
    669752void _CPU_Context_Initialize(
    670   Context_Control  *_the_context,
    671   unsigned32       *_stack_base,
    672   unsigned32        _size,
    673   unsigned32        _new_level,
    674   void             *_entry_point
     753  Context_Control  *the_context,
     754  unsigned32       *stack_base,
     755  unsigned32        size,
     756  unsigned32        new_level,
     757  void             *entry_point,
     758  boolean           is_fp
    675759);
    676760
    677761/*
    678762 *  This routine is responsible for somehow restarting the currently
    679  *  executing task.  If you are lucky, then all that is necessary
    680  *  is restoring the context.  Otherwise, there will need to be
    681  *  a special assembly routine which does something special in this
    682  *  case.  Context_Restore should work most of the time.  It will
    683  *  not work if restarting self conflicts with the stack frame
    684  *  assumptions of restoring a context.
     763 *  executing task. 
     764 *
     765 *  On the SPARC, this is is relatively painless but requires a small
     766 *  amount of wrapper code before using the regular restore code in
     767 *  of the context switch.
    685768 */
    686769
     
    689772
    690773/*
    691  *  The purpose of this macro is to allow the initial pointer into
    692  *  a floating point context area (used to save the floating point
    693  *  context) to be at an arbitrary place in the floating point
    694  *  context area.
    695  *
    696  *  This is necessary because some FP units are designed to have
    697  *  their context saved as a stack which grows into lower addresses.
    698  *  Other FP units can be saved by simply moving registers into offsets
    699  *  from the base of the context area.  Finally some FP units provide
    700  *  a "dump context" instruction which could fill in from high to low
    701  *  or low to high based on the whim of the CPU designers.
     774 *  The FP context area for the SPARC is a simple structure and nothing
     775 *  special is required to find the "starting load point"
    702776 */
    703777
     
    707781/*
    708782 *  This routine initializes the FP context area passed to it to.
    709  *  There are a few standard ways in which to initialize the
    710  *  floating point context.  The code included for this macro assumes
    711  *  that this is a CPU in which a "initial" FP context was saved into
    712  *  _CPU_Null_fp_context and it simply copies it to the destination
    713  *  context passed to it.
    714  *
    715  *  Other models include (1) not doing anything, and (2) putting
    716  *  a "null FP status word" in the correct place in the FP context.
     783 *
     784 *  The SPARC allows us to use the simple initialization model
     785 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
     786 *  at CPU initialization and it is simply copied into the destination
     787 *  context.
    717788 */
    718789
    719790#define _CPU_Context_Initialize_fp( _destination ) \
    720   { \
     791  do { \
    721792   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
    722   }
     793  } while (0)
    723794
    724795/* end of Context handler macros */
     
    733804
    734805#define _CPU_Fatal_halt( _error ) \
    735   { \
    736   }
     806  do { \
     807    unsigned32 level; \
     808    \
     809    sparc_disable_interrupts( level ); \
     810    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
     811    while (1); /* loop forever */ \
     812  } while (0)
    737813
    738814/* end of Fatal Error manager macros */
     
    741817
    742818/*
    743  *  This routine sets _output to the bit number of the first bit
    744  *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
    745  *  This type may be either 16 or 32 bits wide although only the 16
    746  *  least significant bits will be used.
    747  *
    748  *  There are a number of variables in using a "find first bit" type
    749  *  instruction.
    750  *
    751  *    (1) What happens when run on a value of zero?
    752  *    (2) Bits may be numbered from MSB to LSB or vice-versa.
    753  *    (3) The numbering may be zero or one based.
    754  *    (4) The "find first bit" instruction may search from MSB or LSB.
    755  *
    756  *  The executive guarantees that (1) will never happen so it is not a concern.
    757  *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
    758  *  _CPU_Priority_Bits_index().  These three form a set of routines
    759  *  which must logically operate together.  Bits in the _value are
    760  *  set and cleared based on masks built by _CPU_Priority_mask().
    761  *  The basic major and minor values calculated by _Priority_Major()
    762  *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
    763  *  to properly range between the values returned by the "find first bit"
    764  *  instruction.  This makes it possible for _Priority_Get_highest() to
    765  *  calculate the major and directly index into the minor table.
    766  *  This mapping is necessary to ensure that 0 (a high priority major/minor)
    767  *  is the first bit found.
    768  *
    769  *  This entire "find first bit" and mapping process depends heavily
    770  *  on the manner in which a priority is broken into a major and minor
    771  *  components with the major being the 4 MSB of a priority and minor
    772  *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
    773  *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
    774  *  to the lowest priority.
    775  *
    776  *  If your CPU does not have a "find first bit" instruction, then
    777  *  there are ways to make do without it.  Here are a handful of ways
    778  *  to implement this in software:
    779  *
    780  *    - a series of 16 bit test instructions
    781  *    - a "binary search using if's"
    782  *    - _number = 0
    783  *      if _value > 0x00ff
    784  *        _value >>=8
    785  *        _number = 8;
    786  *
    787  *      if _value > 0x0000f
    788  *        _value >=8
    789  *        _number += 4
    790  *
    791  *      _number += bit_set_table[ _value ]
    792  *
    793  *    where bit_set_table[ 16 ] has values which indicate the first
    794  *      bit set
    795  */
    796 
    797 #ifndef INIT
    798   extern const unsigned char __log2table[256];
     819 *  The SPARC port uses the generic C algorithm for bitfield scan if the
     820 *  CPU model does not have a scan instruction.
     821 */
     822
     823#if ( SPARC_HAS_BITSCAN == 0 )
     824#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
     825#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
    799826#else
    800 const unsigned char __log2table[256] = {
    801     0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
    802     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
    803     5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
    804     5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
    805     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    806     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    807     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    808     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    809     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    810     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    811     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    812     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    813     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    814     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    815     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    816     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
    817 };
     827#error "scan instruction not currently supported by RTEMS!!"
    818828#endif
    819829
    820 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \
    821   { \
    822     register __value = (_value); \
    823     \
    824     if ( !(__value & 0xff00) ) \
    825       (_output) = __log2table[ __value ]; \
    826     else \
    827       (_output) = __log2table[ __value >> 8 ] + 8; \
    828   }
    829 
    830 
    831830/* end of Bitfield handler macros */
    832831
    833 /*
    834  *  This routine builds the mask which corresponds to the bit fields
    835  *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
    836  *  for that routine.
    837  */
    838 
    839 #define _CPU_Priority_Mask( _bit_number ) \
    840   ( 0x8000 >> (_bit_number) )
    841 
    842 /*
    843  *  This routine translates the bit numbers returned by
    844  *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
    845  *  a major or minor component of a priority.  See the discussion
    846  *  for that routine.
    847  */
    848 
    849 #define _CPU_Priority_Bits_index( _priority ) \
    850   (15 - (_priority))
     832/* Priority handler handler macros */
     833
     834/*
     835 *  The SPARC port uses the generic C algorithm for bitfield scan if the
     836 *  CPU model does not have a scan instruction.
     837 */
     838
     839#if ( SPARC_HAS_BITSCAN == 1 )
     840#error "scan instruction not currently supported by RTEMS!!"
     841#endif
    851842
    852843/* end of Priority handler macros */
     
    863854  rtems_cpu_table  *cpu_table,
    864855  void            (*thread_dispatch)
     856);
     857
     858/*
     859 *  _CPU_ISR_install_raw_handler
     860 *
     861 *  This routine installs new_handler to be directly called from the trap
     862 *  table.
     863 */
     864 
     865void _CPU_ISR_install_raw_handler(
     866  unsigned32  vector,
     867  proc_ptr    new_handler,
     868  proc_ptr   *old_handler
    865869);
    866870
     
    877881);
    878882
    879 /*
    880  *  _CPU_Install_interrupt_stack
    881  *
    882  *  This routine installs the hardware interrupt stack pointer.
    883  *
    884  *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
    885  *         is TRUE.
    886  */
    887 
    888 void _CPU_Install_interrupt_stack( void );
    889 
     883#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
     884 
    890885/*
    891886 *  _CPU_Internal_threads_Idle_thread_body
    892887 *
    893  *  This routine is the CPU dependent IDLE thread body.
    894  *
    895  *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
    896  *         is TRUE.
    897  */
    898 
     888 *  Some SPARC implementations have low power, sleep, or idle modes.  This
     889 *  tries to take advantage of those models.
     890 */
     891 
    899892void _CPU_Internal_threads_Idle_thread_body( void );
     893 
     894#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
    900895
    901896/*
     
    914909 *
    915910 *  This routine is generallu used only to restart self in an
    916  *  efficient manner.  It may simply be a label in _CPU_Context_switch.
    917  *
    918  *  NOTE: May be unnecessary to reload some registers.
     911 *  efficient manner.
    919912 */
    920913
     
    943936);
    944937
    945 /*  The following routine swaps the endian format of an unsigned int.
     938/*
     939 *  CPU_swap_u32
     940 *
     941 *  The following routine swaps the endian format of an unsigned int.
    946942 *  It must be static because it is referenced indirectly.
    947943 *
    948  *  This version will work on any processor, but if there is a better
    949  *  way for your CPU PLEASE use it.  The most common way to do this is to:
     944 *  This version will work on any processor, but if you come across a better
     945 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
     946 *  entity as shown below is not any more efficient on the SPARC.
    950947 *
    951948 *     swap least significant two bytes with 16-bit rotate
     
    953950 *     swap most significant two bytes with 16-bit rotate
    954951 *
    955  *  Some CPUs have special instructions which swap a 32-bit quantity in
    956  *  a single instruction (e.g. i486).  It is probably best to avoid
    957  *  an "endian swapping control bit" in the CPU.  One good reason is
    958  *  that interrupts would probably have to be disabled to insure that
    959  *  an interrupt does not try to access the same "chunk" with the wrong
    960  *  endian.  Another good reason is that on some CPUs, the endian bit
    961  *  endianness for ALL fetches -- both code and data -- so the code
    962  *  will be fetched incorrectly.
     952 *  It is not obvious how the SPARC can do significantly better than the
     953 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
     954 *  following code at optimization level four (i.e. -O4).
    963955 */
    964956 
  • c/src/exec/score/cpu/sparc/cpu_asm.s

    rea74482 r9700578  
    33 *  This file contains the basic algorithms for all assembly code used
    44 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
    5  *  in assembly language
     5 *  in assembly language.
    66 *
    77 *  $Id$
     
    1111#include <rtems/score/cpu.h>
    1212
     13#if (SPARC_HAS_FPU == 1)
     14
    1315/*
    14  *  _CPU_Context_save_fp
     16 *  void _CPU_Context_save_fp(
     17 *    void **fp_context_ptr
     18 *  )
    1519 *
    1620 *  This routine is responsible for saving the FP context
     
    1822 *  from is changed then the pointer is modified by this routine.
    1923 *
    20  *  Sometimes a macro implementation of this is in cpu.h which dereferences
    21  *  the ** and a similarly named routine in this file is passed something
    22  *  like a (Context_Control_fp *).  The general rule on making this decision
    23  *  is to avoid writing assembly language.
    24  * 
    25  *  void _CPU_Context_save_fp(
    26  *    void **fp_context_ptr
    27  *  )
    28  *  {
    29  *  }
     24 *  NOTE: See the README in this directory for information on the
     25 *        management of the "EF" bit in the PSR.
    3026 */
    3127
     
    3329        PUBLIC(_CPU_Context_save_fp)
    3430SYM(_CPU_Context_save_fp):
    35         save    %sp,-104,%sp
    36         ld      [%i0],%l0
    37         std     %f0,[%l0+FO_F1_OFFSET]
    38         std     %f2,[%l0+F2_F3_OFFSET]
    39         std     %f4,[%l0+F4_F5_OFFSET]
    40         std     %f6,[%l0+F6_F7_OFFSET]
    41         std     %f8,[%l0+F8_F9_OFFSET]
    42         std     %f10,[%l0+F1O_F11_OFFSET]
    43         std     %f12,[%l0+F12_F13_OFFSET]
    44         std     %f14,[%l0+F14_F15_OFFSET]
    45         std     %f16,[%l0+F16_F17_OFFSET]
    46         std     %f18,[%l0+F18_F19_OFFSET]
    47         std     %f20,[%l0+F2O_F21_OFFSET]
    48         std     %f22,[%l0+F22_F23_OFFSET]
    49         std     %f24,[%l0+F24_F25_OFFSET]
    50         std     %f26,[%l0+F26_F27_OFFSET]
    51         std     %f28,[%l0+F28_F29_OFFSET]
    52         std     %f30,[%l0+F3O_F31_OFFSET]
    53         st      %fsr,[%l0+FSR_OFFSET]
     31        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
     32
     33        /*
     34         *  The following enables the floating point unit.
     35         */
     36   
     37        mov     %psr, %l0
     38        sethi   %hi(SPARC_PSR_EF_MASK), %l1
     39        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
     40        or      %l0, %l1, %l0
     41        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
     42
     43        ld      [%i0], %l0
     44        std     %f0, [%l0 + FO_F1_OFFSET]
     45        std     %f2, [%l0 + F2_F3_OFFSET]
     46        std     %f4, [%l0 + F4_F5_OFFSET]
     47        std     %f6, [%l0 + F6_F7_OFFSET]
     48        std     %f8, [%l0 + F8_F9_OFFSET]
     49        std     %f10, [%l0 + F1O_F11_OFFSET]
     50        std     %f12, [%l0 + F12_F13_OFFSET]
     51        std     %f14, [%l0 + F14_F15_OFFSET]
     52        std     %f16, [%l0 + F16_F17_OFFSET]
     53        std     %f18, [%l0 + F18_F19_OFFSET]
     54        std     %f20, [%l0 + F2O_F21_OFFSET]
     55        std     %f22, [%l0 + F22_F23_OFFSET]
     56        std     %f24, [%l0 + F24_F25_OFFSET]
     57        std     %f26, [%l0 + F26_F27_OFFSET]
     58        std     %f28, [%l0 + F28_F29_OFFSET]
     59        std     %f30, [%l0 + F3O_F31_OFFSET]
     60        st      %fsr, [%l0 + FSR_OFFSET]
    5461        ret
    5562        restore
    5663
    5764/*
    58  *  _CPU_Context_restore_fp
     65 *  void _CPU_Context_restore_fp(
     66 *    void **fp_context_ptr
     67 *  )
    5968 *
    6069 *  This routine is responsible for restoring the FP context
     
    6271 *  from is changed then the pointer is modified by this routine.
    6372 *
    64  *  Sometimes a macro implementation of this is in cpu.h which dereferences
    65  *  the ** and a similarly named routine in this file is passed something
    66  *  like a (Context_Control_fp *).  The general rule on making this decision
    67  *  is to avoid writing assembly language.
    68  * 
    69  *  void _CPU_Context_restore_fp(
    70  *    void **fp_context_ptr
    71  *  )
    72  *  {
    73  *  }
     73 *  NOTE: See the README in this directory for information on the
     74 *        management of the "EF" bit in the PSR.
    7475 */
    7576
     
    7778        PUBLIC(_CPU_Context_restore_fp)
    7879SYM(_CPU_Context_restore_fp):
    79         save    %sp,-104,%sp
    80         ld      [%o0],%l0
    81         ldd     [%l0+FO_F1_OFFSET],%f0
    82         ldd     [%l0+F2_F3_OFFSET],%f2
    83         ldd     [%l0+F4_F5_OFFSET],%f4
    84         ldd     [%l0+F6_F7_OFFSET],%f6
    85         ldd     [%l0+F8_F9_OFFSET],%f8
    86         ldd     [%l0+F1O_F11_OFFSET],%f10
    87         ldd     [%l0+F12_F13_OFFSET],%f12
    88         ldd     [%l0+F14_F15_OFFSET],%f14
    89         ldd     [%l0+F16_F17_OFFSET],%f16
    90         ldd     [%l0+F18_F19_OFFSET],%f18
    91         ldd     [%l0+F2O_F21_OFFSET],%f20
    92         ldd     [%l0+F22_F23_OFFSET],%f22
    93         ldd     [%l0+F24_F25_OFFSET],%f24
    94         ldd     [%l0+F26_F27_OFFSET],%f26
    95         ldd     [%l0+F28_F29_OFFSET],%f28
    96         ldd     [%l0+F3O_F31_OFFSET],%f30
    97         ld      [%l0+FSR_OFFSET],%fsr
     80        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
     81
     82        /*
     83         *  The following enables the floating point unit.
     84         */
     85   
     86        mov     %psr, %l0
     87        sethi   %hi(SPARC_PSR_EF_MASK), %l1
     88        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
     89        or      %l0, %l1, %l0
     90        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
     91
     92        ld      [%i0], %l0
     93        ldd     [%l0 + FO_F1_OFFSET], %f0
     94        ldd     [%l0 + F2_F3_OFFSET], %f2
     95        ldd     [%l0 + F4_F5_OFFSET], %f4
     96        ldd     [%l0 + F6_F7_OFFSET], %f6
     97        ldd     [%l0 + F8_F9_OFFSET], %f8
     98        ldd     [%l0 + F1O_F11_OFFSET], %f10
     99        ldd     [%l0 + F12_F13_OFFSET], %f12
     100        ldd     [%l0 + F14_F15_OFFSET], %f14
     101        ldd     [%l0 + F16_F17_OFFSET], %f16
     102        ldd     [%l0 + F18_F19_OFFSET], %f18
     103        ldd     [%l0 + F2O_F21_OFFSET], %f20
     104        ldd     [%l0 + F22_F23_OFFSET], %f22
     105        ldd     [%l0 + F24_F25_OFFSET], %f24
     106        ldd     [%l0 + F26_F27_OFFSET], %f26
     107        ldd     [%l0 + F28_F29_OFFSET], %f28
     108        ldd     [%l0 + F3O_F31_OFFSET], %f30
     109        ld      [%l0 + FSR_OFFSET], %fsr
    98110        ret
    99111        restore
    100112
    101 /*  _CPU_Context_switch
    102  *
    103  *  This routine performs a normal non-FP context switch.
    104  * 
     113#endif /* SPARC_HAS_FPU */
     114
     115/*
    105116 *  void _CPU_Context_switch(
    106117 *    Context_Control  *run,
    107118 *    Context_Control  *heir
    108119 *  )
    109  *  {
    110  *  }
     120 *
     121 *  This routine performs a normal non-FP context switch.
    111122 */
    112 
    113 /* from gcc-2.7.0/config/sparc/sparc.h on register usage */
    114 
    115 /* 1 for registers that have pervasive standard uses
    116    and are not available for the register allocator.
    117    g0 is used for the condition code and not to represent %g0, which is
    118    hardwired to 0, so reg 0 is *not* fixed.
    119    On non-v9 systems:
    120    g1 is free to use as temporary.
    121    g2-g4 are reserved for applications.  Gcc normally uses them as
    122    temporaries, but this can be disabled via the -mno-app-regs option.
    123    g5 through g7 are reserved for the operating system.
    124    On v9 systems:
    125    g1 and g5 are free to use as temporaries.
    126    g2-g4 are reserved for applications (the compiler will not normally use
    127    them, but they can be used as temporaries with -mapp-regs).
    128    g6-g7 are reserved for the operating system.
    129    ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
    130    currently be a fixed register until this pattern is rewritten.
    131    Register 1 is also used when restoring call-preserved registers in large
    132    stack frames.  */
    133 
    134123
    135124        .align 4
    136125        PUBLIC(_CPU_Context_switch)
    137126SYM(_CPU_Context_switch):
    138         ta      0x03                       /* flush registers */
    139 
    140         /* skip g0 */
    141         st      %g1,[%o0+G1_OFFSET]        /* globals */
    142         st      %g2,[%o0+G2_OFFSET]
    143         st      %g3,[%o0+G3_OFFSET]
    144         st      %g4,[%o0+G4_OFFSET]
    145         st      %g5,[%o0+G5_OFFSET]
    146         st      %g6,[%o0+G6_OFFSET]
    147         st      %g7,[%o0+G7_OFFSET]
    148 
    149         st      %l0,[%o0+L0_OFFSET]
    150         st      %l1,[%o0+L1_OFFSET]
    151         st      %l2,[%o0+L2_OFFSET]
    152         st      %l3,[%o0+L3_OFFSET]
    153         st      %l4,[%o0+L4_OFFSET]
    154         st      %l5,[%o0+L5_OFFSET]
    155         st      %l6,[%o0+L6_OFFSET]
    156         st      %l7,[%o0+L7_OFFSET]
    157 
    158         st      %i0,[%o0+I0_OFFSET]
    159         st      %i1,[%o0+I1_OFFSET]
    160         st      %i2,[%o0+I2_OFFSET]
    161         st      %i3,[%o0+I3_OFFSET]
    162         st      %i4,[%o0+I4_OFFSET]
    163         st      %i5,[%o0+I5_OFFSET]
    164         st      %i6,[%o0+I6_OFFSET]
    165         st      %i7,[%o0+I7_OFFSET]
    166 
    167         st      %o0,[%o0+O0_OFFSET]
    168         st      %o1,[%o0+O1_OFFSET]
    169         st      %o2,[%o0+O2_OFFSET]
    170         st      %o3,[%o0+O3_OFFSET]
    171         st      %o4,[%o0+O4_OFFSET]
    172         st      %o5,[%o0+O5_OFFSET]
    173         st      %o6,[%o0+O6_OFFSET]
    174         st      %o7,[%o0+O7_OFFSET]
    175 
    176         rd      %psr,%o2
    177         st      %o2,[%o0+PSR_OFFSET]        /* save status register */
    178 
    179         /* enter here with o1 = context to restore */
    180         /*                 o2 = psr */
    181 restore:
    182 
    183         ld      [%o1+PSR_OFFSET],%o0
    184         and     %o2,31,%o2               /* g1 = cwp */
    185         and     %o0,-32,%o0                /* o0 = psr w/o cwp */
    186         or      %o0,%o2,%o2                /* o2 = new psr */
    187         wr      %o2,0,%psr                 /* restore status register */
    188 
    189         /* skip g0 */
    190         ld      [%o1+G1_OFFSET],%g1
    191         ld      [%o1+G2_OFFSET],%g2
    192         ld      [%o1+G3_OFFSET],%g3
    193         ld      [%o1+G4_OFFSET],%g4
    194         ld      [%o1+G5_OFFSET],%g5
    195         ld      [%o1+G6_OFFSET],%g6
    196         ld      [%o1+G7_OFFSET],%g7
    197 
    198         ld      [%o1+L0_OFFSET],%l0
    199         ld      [%o1+L1_OFFSET],%l1
    200         ld      [%o1+L2_OFFSET],%l2
    201         ld      [%o1+L3_OFFSET],%l3
    202         ld      [%o1+L4_OFFSET],%l4
    203         ld      [%o1+L5_OFFSET],%l5
    204         ld      [%o1+L6_OFFSET],%l6
    205         ld      [%o1+L7_OFFSET],%l7
    206 
    207         ld      [%o1+I0_OFFSET],%i0
    208         ld      [%o1+I1_OFFSET],%i1
    209         ld      [%o1+I2_OFFSET],%i2
    210         ld      [%o1+I3_OFFSET],%i3
    211         ld      [%o1+I4_OFFSET],%i4
    212         ld      [%o1+I5_OFFSET],%i5
    213         ld      [%o1+I6_OFFSET],%i6
    214         ld      [%o1+I7_OFFSET],%i7
    215 
    216         ld      [%o1+O0_OFFSET],%o0
    217         /* do o1 last to avoid destroying heir context pointer */
    218         ld      [%o1+O2_OFFSET],%o2
    219         ld      [%o1+O3_OFFSET],%o3
    220         ld      [%o1+O4_OFFSET],%o4
    221         ld      [%o1+O5_OFFSET],%o5
    222         ld      [%o1+O6_OFFSET],%o6
    223         ld      [%o1+O7_OFFSET],%o7
    224 
    225         ld      [%o1+O1_OFFSET],%o1   /* overwrite heir pointer */
    226 
    227         jmp     %o7 + 8                     /* return */
    228         nop                                 /* delay slot */
    229        
     127        ! skip g0
     128        st      %g1, [%o0 + G1_OFFSET]       ! save the global registers
     129        std     %g2, [%o0 + G2_OFFSET]
     130        std     %g4, [%o0 + G4_OFFSET]
     131        std     %g6, [%o0 + G6_OFFSET]
     132
     133        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
     134        std     %l2, [%o0 + L2_OFFSET]
     135        std     %l4, [%o0 + L4_OFFSET]
     136        std     %l6, [%o0 + L6_OFFSET]
     137
     138        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
     139        std     %i2, [%o0 + I2_OFFSET]
     140        std     %i4, [%o0 + I4_OFFSET]
     141        std     %i6, [%o0 + I6_FP_OFFSET]
     142
     143        std     %o0, [%o0 + O0_OFFSET]       ! save the output registers
     144        std     %o2, [%o0 + O2_OFFSET]
     145        std     %o4, [%o0 + O4_OFFSET]
     146        std     %o6, [%o0 + O6_SP_OFFSET]
     147
     148        rd      %psr, %o2
     149        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
     150
     151        /*
     152         *  This is entered from _CPU_Context_restore with:
     153         *    o1 = context to restore
     154         *    o2 = psr
     155         */
     156
     157        PUBLIC(_CPU_Context_restore_heir)
     158SYM(_CPU_Context_restore_heir):
     159        /*
     160         *  Flush all windows with valid contents except the current one.
     161         *  In examining the set register windows, one may logically divide
     162         *  the windows into sets (some of which may be empty) based on their
     163         *  current status: 
     164         *
     165         *    + current (i.e. in use),
     166         *    + used (i.e. a restore would not trap)
     167         *    + invalid (i.e. 1 in corresponding bit in WIM)
     168         *    + unused
     169         *
     170         *  Either the used or unused set of windows may be empty.
     171         *
     172         *  NOTE: We assume only one bit is set in the WIM at a time.
     173         *
     174         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
     175         *  into sets as follows:
     176         *
     177         *    + 0   - invalid
     178         *    + 1-4 - unused
     179         *    + 5   - current
     180         *    + 6-7 - used
     181         *
     182         *  In this case, we only would save the used windows -- 6 and 7.
     183         *
     184         *   Traps are disabled for the same logical period as in a
     185         *     flush all windows trap handler.
     186         *   
     187         *    Register Usage while saving the windows:
     188         *      g1 = current PSR
     189         *      g2 = current wim
     190         *      g3 = CWP
     191         *      g4 = wim scratch
     192         *      g5 = scratch
     193         */
     194
     195        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
     196
     197        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
     198                                              ! g1 = psr w/o cwp
     199        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
     200        or      %g1, %g3, %g1                 ! g1 = heirs psr
     201        mov     %g1, %psr                     ! restore status register and
     202                                              ! **** DISABLE TRAPS ****
     203        mov     %wim, %g2                     ! g2 = wim
     204        mov     1, %g4
     205        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
     206
     207save_frame_loop:
     208        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
     209        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
     210        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
     211
     212        /*
     213         *  If a restore would not underflow, then continue.
     214         */
     215
     216        andcc   %g4, %g2, %g0                 ! Any windows to flush?
     217        bnz     done_flushing                 ! No, then continue
     218        nop
     219
     220        restore                               ! back one window
     221
     222        /*
     223         *  Now save the window just as if we overflowed to it.
     224         */
     225 
     226        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
     227        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
     228        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
     229        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
     230 
     231        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
     232        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
     233        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
     234        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
     235
     236        ba      save_frame_loop
     237        nop
     238
     239done_flushing:
     240
     241        add     %g3, 1, %g3                   ! calculate desired WIM
     242        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
     243        mov     1, %g4
     244        sll     %g4, %g3, %g4                 ! g4 = new WIM
     245        mov     %g4, %wim
     246
     247        or      %g1, SPARC_PSR_ET_MASK, %g1
     248        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
     249                                              !   and restore CWP
     250        nop
     251        nop
     252        nop
     253
     254        ! skip g0
     255        ld      [%o1 + G1_OFFSET], %g1        ! restore the global registers
     256        ldd     [%o1 + G2_OFFSET], %g2
     257        ldd     [%o1 + G4_OFFSET], %g4
     258        ldd     [%o1 + G6_OFFSET], %g6
     259
     260        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
     261        ldd     [%o1 + L2_OFFSET], %l2
     262        ldd     [%o1 + L4_OFFSET], %l4
     263        ldd     [%o1 + L6_OFFSET], %l6
     264
     265        ldd     [%o1 + I0_OFFSET], %i0        ! restore the output registers
     266        ldd     [%o1 + I2_OFFSET], %i2
     267        ldd     [%o1 + I4_OFFSET], %i4
     268        ldd     [%o1 + I6_FP_OFFSET], %i6
     269
     270        ldd     [%o1 + O2_OFFSET], %o2        ! restore the output registers
     271        ldd     [%o1 + O4_OFFSET], %o4
     272        ldd     [%o1 + O6_SP_OFFSET], %o6
     273        ! do o0/o1 last to avoid destroying heir context pointer
     274        ldd     [%o1 + O0_OFFSET], %o0        ! overwrite heir pointer
     275
     276        jmp     %o7 + 8                       ! return
     277        nop                                   ! delay slot
    230278
    231279/*
    232  *  _CPU_Context_restore
    233  *
    234  *  This routine is generallu used only to restart self in an
    235  *  efficient manner.  It may simply be a label in _CPU_Context_switch.
    236  *
    237  *  NOTE: May be unnecessary to reload some registers.
    238  * 
    239280 *  void _CPU_Context_restore(
    240281 *    Context_Control *new_context
    241282 *  )
    242  *  {
    243  *  }
     283 *
     284 *  This routine is generally used only to perform restart self.
     285 *
     286 *  NOTE: It is unnecessary to reload some registers.
    244287 */
    245288
     
    247290        PUBLIC(_CPU_Context_restore)
    248291SYM(_CPU_Context_restore):
    249         save    %sp, -104, %sp              /* save a stack frame */
    250         ta      0x03                       /* flush registers */
    251         rd      %psr,%o2
    252         ba      restore
    253         mov     %i0,%o1                    /* in the delay slot */
    254 
    255 /*  void _ISR_Handler()
     292        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
     293        rd      %psr, %o2
     294        ba      SYM(_CPU_Context_restore_heir)
     295        mov     %i0, %o1                      ! in the delay slot
     296
     297/*
     298 *  void _ISR_Handler()
    256299 *
    257300 *  This routine provides the RTEMS interrupt management.
    258301 *
    259  *  void _ISR_Handler()
    260  *  {
    261  *  }
     302 *  We enter this handler from the 4 instructions in the trap table with
     303 *  the following registers assumed to be set as shown:
     304 *
     305 *    l0 = PSR
     306 *    l1 = PC
     307 *    l2 = nPC
     308 *    l3 = trap type
     309 *
     310 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
     311 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
    262312 */
    263313
     
    265315        PUBLIC(_ISR_Handler)
    266316SYM(_ISR_Handler):
    267         ret
    268 
    269   /*
    270    *  This discussion ignores a lot of the ugly details in a real
    271    *  implementation such as saving enough registers/state to be
    272    *  able to do something real.  Keep in mind that the goal is
    273    *  to invoke a user's ISR handler which is written in C and
    274    *  uses a certain set of registers.
    275    *
    276    *  Also note that the exact order is to a large extent flexible.
    277    *  Hardware will dictate a sequence for a certain subset of
    278    *  _ISR_Handler while requirements for setting
    279    */
    280 
    281  /*
    282   *  At entry to "common" _ISR_Handler, the vector number must be
    283   *  available.  On some CPUs the hardware puts either the vector
    284   *  number or the offset into the vector table for this ISR in a
    285   *  known place.  If the hardware does not give us this information,
    286   *  then the assembly portion of RTEMS for this port will contain
    287   *  a set of distinct interrupt entry points which somehow place
    288   *  the vector number in a known place (which is safe if another
    289   *  interrupt nests this one) and branches to _ISR_Handler.
    290   *
    291   *  save some or all context on stack
    292   *  may need to save some special interrupt information for exit
    293   *
    294   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
    295   *    if ( _ISR_Nest_level == 0 )
    296   *      switch to software interrupt stack
    297   *  #endif
    298   *
    299   *  _ISR_Nest_level++;
    300   *
    301   *  _Thread_Dispatch_disable_level++;
    302   *
    303   *  (*_ISR_Vector_table[ vector ])( vector );
    304   *
    305   *  --_ISR_Nest_level;
    306   *
    307   *  if ( _ISR_Nest_level )
    308   *    goto the label "exit interrupt (simple case)"
    309   *
    310   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
    311   *    restore stack
    312   *  #endif
    313   * 
    314   *  if ( !_Context_Switch_necessary )
    315   *    goto the label "exit interrupt (simple case)"
    316   * 
    317   *  if ( !_ISR_Signals_to_thread_executing )
    318   *    goto the label "exit interrupt (simple case)"
    319   *
    320   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
    321   *
    322   *  prepare to get out of interrupt
    323   *  return from interrupt  (maybe to _ISR_Dispatch)
    324   *
    325   *  LABEL "exit interrupt (simple case):
    326   *  prepare to get out of interrupt
    327   *  return from interrupt
    328   */
     317        /*
     318         *  Fix the return address for synchronous traps.
     319         */
     320
     321        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
     322                                      ! Is this a synchronous trap?
     323        be,a    win_ovflow            ! No, then skip the adjustment
     324        nop                           ! DELAY
     325        mov     %l2, %l1              ! do not return to the instruction
     326        add     %l2, 4, %l2           ! indicated
     327
     328win_ovflow:
     329        /*
     330         *  Save the globals this block uses.
     331         *
     332         *  These registers are not restored from the locals.  Their contents
     333         *  are saved directly from the locals into the ISF below.
     334         */
     335
     336        mov     %g4, %l4                 ! save the globals this block uses
     337        mov     %g5, %l5
     338
     339        /*
     340         *  When at a "window overflow" trap, (wim == (1 << cwp)).
     341         *  If we get here like that, then process a window overflow.
     342         */
     343
     344        rd      %wim, %g4
     345        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
     346                                         !   are LS 5 bits ; how convenient :)
     347        cmp     %g5, 1                   ! Is this an invalid window?
     348        bne     dont_do_the_window       ! No, then skip all this stuff
     349        ! we are using the delay slot
     350
     351        /*
     352         *  The following is same as a 1 position right rotate of WIM
     353         */
     354
     355        srl     %g4, 1, %g5              ! g5 = WIM >> 1
     356        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
     357                                         ! g4 = WIM << (Number Windows - 1)
     358        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
     359                                         !      (WIM << (Number Windows - 1))
     360
     361        /*
     362         *  At this point:
     363         *
     364         *    g4 = the new WIM
     365         *    g5 is free
     366         */
     367
     368        /*
     369         *  Since we are tinkering with the register windows, we need to
     370         *  make sure that all the required information is in global registers.
     371         */
     372
     373        save                          ! Save into the window
     374        wr      %g4, 0, %wim          ! WIM = new WIM
     375        nop                           ! delay slots
     376        nop
     377        nop
     378
     379        /*
     380         *  Now save the window just as if we overflowed to it.
     381         */
     382
     383        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
     384        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
     385        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
     386        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
     387
     388        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
     389        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
     390        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
     391        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
     392
     393        restore
     394        nop
     395
     396dont_do_the_window:
     397        /*
     398         *  Global registers %g4 and %g5 are saved directly from %l4 and
     399         *  %l5 directly into the ISF below.
     400         */
     401
     402save_isf:
     403
     404        /*
     405         *  Save the state of the interrupted task -- especially the global
     406         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
     407         *  includes a regular minimum stack frame which will be used if
     408         *  needed by register window overflow and underflow handlers.
     409         *
     410         *  REGISTERS SAME AS AT _ISR_Handler
     411         */
     412
     413        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
     414                                               ! make space for ISF
     415
     416        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
     417        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
     418        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
     419        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
     420        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
     421        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
     422
     423        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
     424        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
     425        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
     426        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
     427
     428        rd      %y, %g1
     429        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
     430
     431        mov     %sp, %o1                       ! 2nd arg to ISR Handler
     432
     433        /*
     434         *  Increment ISR nest level and Thread dispatch disable level.
     435         *
     436         *  Register usage for this section:
     437         *
     438         *    l4 = _Thread_Dispatch_disable_level pointer
     439         *    l5 = _ISR_Nest_level pointer
     440         *    l6 = _Thread_Dispatch_disable_level value
     441         *    l7 = _ISR_Nest_level value
     442         *
     443         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
     444         *        nest and thread dispatch disable levels are unnested.
     445         */
     446
     447        sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
     448        ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
     449        sethi    %hi(SYM(_ISR_Nest_level)), %l5
     450        ld       [%l5 + %lo(SYM(_ISR_Nest_level))], %l7
     451
     452        add      %l6, 1, %l6
     453        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
     454
     455        add      %l7, 1, %l7
     456        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
     457
     458        /*
     459         *  If ISR nest level was zero (now 1), then switch stack.
     460         */
     461
     462        mov      %sp, %fp
     463        subcc    %l7, 1, %l7             ! outermost interrupt handler?
     464        bnz      dont_switch_stacks      ! No, then do not switch stacks
     465
     466        sethi    %hi(SYM(_CPU_Interrupt_stack_high)), %g4
     467        ld       [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp
     468
     469dont_switch_stacks:
     470        /*
     471         *  Make sure we have a place on the stack for the window overflow
     472         *  trap handler to write into.  At this point it is safe to
     473         *  enable traps again.
     474         */
     475
     476        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
     477
     478        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
     479
     480        /*
     481         *  Vector to user's handler.
     482         *
     483         *  NOTE: TBR may no longer have vector number in it since
     484         *        we just enabled traps.  It is definitely in l3.
     485         */
     486
     487        sethi    %hi(SYM(_ISR_Vector_table)), %g4
     488        or       %g4, %lo(SYM(_ISR_Vector_table)), %g4
     489        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
     490        sll      %g5, 2, %g5            ! g5 = offset into table
     491        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
     492
     493
     494                                        ! o1 = 2nd arg = address of the ISF
     495                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
     496        mov      %l3, %o0               ! o0 = 1st arg = vector number
     497        call     %g4, 0
     498        nop                             ! delay slot
     499
     500        /*
     501         *  Redisable traps so we can finish up the interrupt processing.
     502         *  This is a VERY conservative place to do this.
     503         *
     504         *  NOTE: %l0 has the PSR which was in place when we took the trap.
     505         */
     506
     507        mov      %l0, %psr             ! **** DISABLE TRAPS ****
     508
     509        /*
     510         *  Decrement ISR nest level and Thread dispatch disable level.
     511         *
     512         *  Register usage for this section:
     513         *
     514         *    l4 = _Thread_Dispatch_disable_level pointer
     515         *    l5 = _ISR_Nest_level pointer
     516         *    l6 = _Thread_Dispatch_disable_level value
     517         *    l7 = _ISR_Nest_level value
     518         */
     519
     520        sub      %l6, 1, %l6
     521        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
     522
     523        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
     524
     525        /*
     526         *  If dispatching is disabled (includes nested interrupt case),
     527         *  then do a "simple" exit.
     528         */
     529
     530        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
     531        bnz      simple_return   ! Yes, then do a "simple" exit
     532        nop                      ! delay slot
     533
     534        /*
     535         *  If a context switch is necessary, then do fudge stack to
     536         *  return to the interrupt dispatcher.
     537         */
     538
     539        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
     540        ld       [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
     541
     542        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
     543        bnz      SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
     544        nop                      ! delay slot
     545
     546        /*
     547         *  Finally, check to see if signals were sent to the currently
     548         *  executing task.  If so, we need to invoke the interrupt dispatcher.
     549         */
     550
     551        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
     552        ld       [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
     553
     554        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
     555                                 !   executing thread?
     556        bz       simple_return   ! yes, then invoke the dispatcher
     557        nop                      ! delay slot
     558
     559        /*
     560         *  Invoke interrupt dispatcher.
     561         */
     562
     563        PUBLIC(_ISR_Dispatch)
     564SYM(_ISR_Dispatch):
     565
     566        /*
     567         *  The following subtract should get us back on the interrupted
     568         *  tasks stack and add enough room to invoke the dispatcher.
     569         *  When we enable traps, we are mostly back in the context
     570         *  of the task and subsequent interrupts can operate normally.
     571         */
     572
     573        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
     574
     575        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
     576        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
     577        nop
     578        nop
     579        nop
     580
     581        call    SYM(_Thread_Dispatch), 0
     582        nop
     583
     584        /*
     585         *  The CWP in place at this point may be different from
     586         *  that which was in effect at the beginning of the ISR if we
     587         *  have been context switched between the beginning of this invocation
     588         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
     589         *  not be changed back to their values at ISR entry time.  Any
     590         *  changes to the PSR must preserve the CWP.
     591         */
     592
     593simple_return:
     594        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
     595        wr      %l5, 0, %y
     596
     597        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
     598        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
     599        rd      %psr, %l3
     600        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
     601        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
     602        or      %l3, %l0, %l0                  ! install it later...
     603        andn    %l0, SPARC_PSR_ET_MASK, %l0
     604
     605        /*
     606         *  Restore tasks global and out registers
     607         */
     608
     609        mov    %fp, %g1
     610
     611                                              ! g1 is restored later
     612        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
     613        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
     614        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
     615
     616        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
     617        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
     618        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
     619        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
     620
     621        /*
     622         *  Registers:
     623         *
     624         *   ALL global registers EXCEPT G1 and the input registers have
     625         *   already been restored and thuse off limits.
     626         *
     627         *   The following is the contents of the local registers:
     628         *
     629         *     l0 = original psr
     630         *     l1 = return address (i.e. PC)
     631         *     l2 = nPC
     632         *     l3 = CWP
     633         */
     634
     635        /*
     636         *  if (CWP + 1) is an invalid window then we need to reload it.
     637         *
     638         *  WARNING: Traps should now be disabled
     639         */
     640
     641        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
     642        nop
     643        nop
     644        nop
     645        rd      %wim, %l4
     646        add     %l0, 1, %l6                ! l6 = cwp + 1
     647        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
     648        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
     649                                           !  and CWP are conveniently LS 5 bits
     650        cmp     %l5, 1                     ! Is tasks window invalid?
     651        bne     good_task_window
     652
     653        /*
     654         *  The following code is the same as a 1 position left rotate of WIM.
     655         */
     656
     657        sll     %l4, 1, %l5                ! l5 = WIM << 1
     658        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
     659                                           ! l4 = WIM >> (Number Windows - 1)
     660        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
     661                                           !      (WIM >> (Number Windows - 1))
     662
     663        /*
     664         *  Now restore the window just as if we underflowed to it.
     665         */
     666
     667        wr      %l4, 0, %wim               ! WIM = new WIM
     668        restore                            ! now into the tasks window
     669
     670        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
     671        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
     672        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
     673        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
     674        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
     675        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
     676        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
     677        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
     678                                           ! reload of sp clobbers ISF
     679        save                               ! Back to ISR dispatch window
     680
     681good_task_window:
     682
     683        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
     684                                           !  and restore condition codes.
     685        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
     686        jmp     %l1                        ! transfer control and
     687        rett    %l2                        ! go back to tasks window
     688
     689/* end of file */
  • c/src/exec/score/cpu/sparc/rtems.s

    rea74482 r9700578  
    2323 * 
    2424 *  void RTEMS()
    25  *  {
    26  *  }
    2725 */
    2826
     
    3028        PUBLIC(RTEMS)
    3129SYM(RTEMS):
    32         ret
     30        /*
     31         *  g2 was chosen because gcc uses it as a scratch register in
     32         *  similar code scenarios and the other locals, ins, and outs
     33         *  are off limits to this routine unless it does a "save" and
     34         *  copies its in registers to the outs which only works up until
     35         *  6 parameters.  Best to take the simple approach in this case.
     36         */
     37        sethi     SYM(_Entry_points), %g2
     38        or        %g2, %lo(SYM(_Entry_points)), %g2
     39        sll       %g1, 2,  %g1
     40        add       %g1, %g2, %g2
     41        jmp       %g2
     42        nop
    3343
  • c/src/exec/score/cpu/sparc/sparc.h

    rea74482 r9700578  
    11/*  sparc.h
    22 *
    3  *  This include file contains information pertaining to the Motorola
    4  *  SPARC processor family.
     3 *  This include file contains information pertaining to the SPARC
     4 *  processor family.
    55 *
    66 *  $Id$
     
    3838/*
    3939 *  This file contains the information required to build
    40  *  RTEMS for a particular member of the "sparc"
    41  *  family when executing in protected mode.  It does
     40 *  RTEMS for a particular member of the "sparc" family.  It does
    4241 *  this by setting variables to indicate which implementation
    4342 *  dependent features are present in a particular member
     
    5251 *    + SPARC_HAS_BITSCAN
    5352 *        0 - does not have scan instructions
    54  *        1 - has scan instruction  (no support implemented)
     53 *        1 - has scan instruction  (not currently implemented)
    5554 *
     55 *    + SPARC_NUMBER_OF_REGISTER_WINDOWS
     56 *        8 is the most common number supported by SPARC implementations.
     57 *        SPARC_PSR_CWP_MASK is derived from this value.
     58 *
     59 *    + SPARC_HAS_LOW_POWER_MODE
     60 *        0 - does not have low power mode support (or not supported)
     61 *        1 - has low power mode and thus a CPU model dependent idle task.
     62 *
    5663 */
    5764 
    5865#if defined(erc32)
    5966 
    60 #define CPU_MODEL_NAME  "erc32"
    61 #define SPARC_HAS_FPU     1
    62 #define SPARC_HAS_BITSCAN 0
     67#define CPU_MODEL_NAME                   "erc32"
     68#define SPARC_HAS_FPU                    1
     69#define SPARC_HAS_BITSCAN                0
     70#define SPARC_NUMBER_OF_REGISTER_WINDOWS 8
     71#define SPARC_HAS_LOW_POWER_MODE         1
    6372 
    6473#else
     
    7584
    7685/*
     86 *  Miscellaneous constants
     87 */
     88
     89/*
     90 *  PSR masks and starting bit positions
     91 *
     92 *  NOTE: Reserved bits are ignored.
     93 */
     94
     95#if (SPARC_NUMBER_OF_REGISTER_WINDOWS == 8)
     96#define SPARC_PSR_CWP_MASK               0x07   /* bits  0 -  4 */
     97#elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 16)
     98#define SPARC_PSR_CWP_MASK               0x0F   /* bits  0 -  4 */
     99#elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 32)
     100#define SPARC_PSR_CWP_MASK               0x1F   /* bits  0 -  4 */
     101#else
     102#error "Unsupported number of register windows for this cpu"
     103#endif
     104
     105#define SPARC_PSR_ET_MASK   0x00000020   /* bit   5 */
     106#define SPARC_PSR_PS_MASK   0x00000040   /* bit   6 */
     107#define SPARC_PSR_S_MASK    0x00000080   /* bit   7 */
     108#define SPARC_PSR_PIL_MASK  0x00000F00   /* bits  8 - 11 */
     109#define SPARC_PSR_EF_MASK   0x00001000   /* bit  12 */
     110#define SPARC_PSR_EC_MASK   0x00002000   /* bit  13 */
     111#define SPARC_PSR_ICC_MASK  0x00F00000   /* bits 20 - 23 */
     112#define SPARC_PSR_VER_MASK  0x0F000000   /* bits 24 - 27 */
     113#define SPARC_PSR_IMPL_MASK 0xF0000000   /* bits 28 - 31 */
     114
     115#define SPARC_PSR_CWP_BIT_POSITION   0   /* bits  0 -  4 */
     116#define SPARC_PSR_ET_BIT_POSITION    5   /* bit   5 */
     117#define SPARC_PSR_PS_BIT_POSITION    6   /* bit   6 */
     118#define SPARC_PSR_S_BIT_POSITION     7   /* bit   7 */
     119#define SPARC_PSR_PIL_BIT_POSITION   8   /* bits  8 - 11 */
     120#define SPARC_PSR_EF_BIT_POSITION   12   /* bit  12 */
     121#define SPARC_PSR_EC_BIT_POSITION   13   /* bit  13 */
     122#define SPARC_PSR_ICC_BIT_POSITION  20   /* bits 20 - 23 */
     123#define SPARC_PSR_VER_BIT_POSITION  24   /* bits 24 - 27 */
     124#define SPARC_PSR_IMPL_BIT_POSITION 28   /* bits 28 - 31 */
     125
     126#ifndef ASM
     127
     128/*
    77129 *  Standard nop
    78130 */
     
    84136
    85137/*
    86  *  Some macros to aid in accessing special registers.
     138 *  Get and set the PSR
    87139 */
    88140
     
    95147#define sparc_set_psr( _psr ) \
    96148  do { \
    97     asm volatile ( "wr   %%g0,%0,%%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \
    98     nop(); nop(); nop(); \
    99   } while ( 0 )
     149    asm volatile ( "mov  %0, %%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \
     150    nop(); \
     151    nop(); \
     152    nop(); \
     153  } while ( 0 )
     154
     155/*
     156 *  Get and set the TBR
     157 */
    100158
    101159#define sparc_get_tbr( _tbr ) \
    102160  do { \
     161     (_tbr) = 0; /* to avoid unitialized warnings */ \
    103162     asm volatile( "rd %%tbr, %0" :  "=r" (_tbr) : "0" (_tbr) ); \
    104163  } while ( 0 )
     
    106165#define sparc_set_tbr( _tbr ) \
    107166  do { \
    108   } while ( 0 )
     167     asm volatile( "wr %0, 0, %%tbr" :  "=r" (_tbr) : "0" (_tbr) ); \
     168  } while ( 0 )
     169
     170/*
     171 *  Get and set the WIM
     172 */
    109173
    110174#define sparc_get_wim( _wim ) \
    111175  do { \
    112      asm volatile( "rd %%wim, %0" :  "=r" (_wim) : "0" (_wim) ); \
     176    asm volatile( "rd %%wim, %0" :  "=r" (_wim) : "0" (_wim) ); \
    113177  } while ( 0 )
    114178
    115179#define sparc_set_wim( _wim ) \
    116180  do { \
     181    asm volatile( "wr %0, %%wim" :  "=r" (_wim) : "0" (_wim) ); \
     182    nop(); \
     183    nop(); \
     184    nop(); \
     185  } while ( 0 )
     186
     187/*
     188 *  Get and set the Y
     189 */
     190 
     191#define sparc_get_y( _y ) \
     192  do { \
     193    asm volatile( "rd %%y, %0" :  "=r" (_y) : "0" (_y) ); \
     194  } while ( 0 )
     195 
     196#define sparc_set_y( _y ) \
     197  do { \
     198    asm volatile( "wr %0, %%y" :  "=r" (_y) : "0" (_y) ); \
    117199  } while ( 0 )
    118200
     
    122204 */
    123205
    124 #define SPARC_PIL_MASK  0x00000F00
    125 
    126206#define sparc_disable_interrupts( _level ) \
    127   do { register unsigned int _mask = SPARC_PIL_MASK; \
    128     (_level) = 0; \
    129     \
    130     asm volatile ( "rd   %%psr,%0 ; \
    131                     wr   %0,%1,%%psr " \
    132                     : "=r" ((_level)), "=r" (_mask) \
    133                     : "0" ((_level)), "1" (_mask) \
    134     ); \
    135     nop(); nop(); nop(); \
     207  do { \
     208    register unsigned int _newlevel; \
     209    \
     210    sparc_get_psr( _level ); \
     211    (_newlevel) = (_level) | SPARC_PSR_PIL_MASK; \
     212    sparc_set_psr( _newlevel ); \
    136213  } while ( 0 )
    137214 
    138215#define sparc_enable_interrupts( _level ) \
    139   do { unsigned int _tmp; \
     216  do { \
     217    unsigned int _tmp; \
     218    \
    140219    sparc_get_psr( _tmp ); \
    141     _tmp &= ~SPARC_PIL_MASK; \
    142     _tmp |= (_level) & SPARC_PIL_MASK; \
     220    _tmp &= ~SPARC_PSR_PIL_MASK; \
     221    _tmp |= (_level) & SPARC_PSR_PIL_MASK; \
    143222    sparc_set_psr( _tmp ); \
    144223  } while ( 0 )
    145224 
    146  
    147225#define sparc_flash_interrupts( _level ) \
    148226  do { \
    149       register unsigned32 _ignored = 0; \
    150       sparc_enable_interrupts( (_level) ); \
    151       sparc_disable_interrupts( _ignored ); \
     227    register unsigned32 _ignored = 0; \
     228    \
     229    sparc_enable_interrupts( (_level) ); \
     230    sparc_disable_interrupts( _ignored ); \
    152231  } while ( 0 )
    153232
    154233#define sparc_set_interrupt_level( _new_level ) \
    155   do { register unsigned32 _new_psr_level = 0; \
     234  do { \
     235    register unsigned32 _new_psr_level = 0; \
    156236    \
    157237    sparc_get_psr( _new_psr_level ); \
    158     _new_psr_level &= ~SPARC_PIL_MASK; \
    159     _new_psr_level |= (((_new_level) << 8) & SPARC_PIL_MASK); \
     238    _new_psr_level &= ~SPARC_PSR_PIL_MASK; \
     239    _new_psr_level |= \
     240      (((_new_level) << SPARC_PSR_PIL_BIT_POSITION) & SPARC_PSR_PIL_MASK); \
    160241    sparc_set_psr( _new_psr_level ); \
    161242  } while ( 0 )
     
    166247    \
    167248    sparc_get_psr( _psr_level ); \
    168     (_level) = (_psr_level & SPARC_PIL_MASK) >> 8; \
    169   } while ( 0 )
     249    (_level) = \
     250      (_psr_level & SPARC_PSR_PIL_MASK) >> SPARC_PSR_PIL_BIT_POSITION; \
     251  } while ( 0 )
     252
     253#endif
    170254
    171255#ifdef __cplusplus
  • c/src/exec/score/cpu/sparc/sparctypes.h

    rea74482 r9700578  
    11/*  sparctypes.h
    22 *
    3  *  This include file contains type definitions pertaining to the Intel
     3 *  This include file contains type definitions pertaining to the
    44 *  SPARC processor family.
    55 *
     
    2020 */
    2121
    22 typedef unsigned char  unsigned8;      /* unsigned 8-bit  integer */
    23 typedef unsigned short unsigned16;     /* unsigned 16-bit integer */
    24 typedef unsigned int   unsigned32;     /* unsigned 32-bit integer */
    25 typedef unsigned long long unsigned64; /* unsigned 64-bit integer */
     22typedef unsigned char      unsigned8;            /* unsigned 8-bit  integer */
     23typedef unsigned short     unsigned16;           /* unsigned 16-bit integer */
     24typedef unsigned int       unsigned32;           /* unsigned 32-bit integer */
     25typedef unsigned long long unsigned64;           /* unsigned 64-bit integer */
    2626
    27 typedef unsigned16     Priority_Bit_map_control;
     27typedef unsigned16         Priority_Bit_map_control;
    2828
    29 typedef signed char      signed8;      /* 8-bit  signed integer */
    30 typedef signed short     signed16;     /* 16-bit signed integer */
    31 typedef signed int       signed32;     /* 32-bit signed integer */
    32 typedef signed long long signed64;     /* 64 bit signed integer */
     29typedef signed char        signed8;              /* 8-bit  signed integer */
     30typedef signed short       signed16;             /* 16-bit signed integer */
     31typedef signed int         signed32;             /* 32-bit signed integer */
     32typedef signed long long   signed64;             /* 64 bit signed integer */
    3333
    34 typedef unsigned32 boolean;     /* Boolean value   */
     34typedef unsigned32         boolean;              /* Boolean value   */
    3535
    36 typedef float          single_precision;     /* single precision float */
    37 typedef double         double_precision;     /* double precision float */
     36typedef float              single_precision;     /* single precision float */
     37typedef double             double_precision;     /* double precision float */
    3838
    3939typedef void sparc_isr;
Note: See TracChangeset for help on using the changeset viewer.