Changeset 9700578 in rtems for cpukit


Ignore:
Timestamp:
10/30/95 21:54:45 (27 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
c4808ca
Parents:
ea74482
Message:

SPARC port passes all tests

Location:
cpukit
Files:
1 added
16 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libcsupport/include/ringbuf.h

    rea74482 r9700578  
    1111
    1212#ifndef RINGBUF_QUEUE_LENGTH
    13 #define RINGBUF_QUEUE_LENGTH 200
     13#define RINGBUF_QUEUE_LENGTH 128
    1414#endif
    1515
    1616typedef struct {
    1717  char buffer[RINGBUF_QUEUE_LENGTH];
    18   int  head;
    19   int  tail;
     18  volatile int  head;
     19  volatile int  tail;
    2020} Ring_buffer_t;
    2121
     
    2828   ( (_buffer)->head == (_buffer)->tail )
    2929
     30#define Ring_buffer_Is_full( _buffer ) \
     31   ( (_buffer)->head == ((_buffer)->tail + 1) % RINGBUF_QUEUE_LENGTH )
     32
    3033#define Ring_buffer_Add_character( _buffer, _ch ) \
    3134  do { \
    32     (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \
    33     (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \
     35    rtems_unsigned32 isrlevel; \
     36    \
     37    rtems_interrupt_disable( isrlevel ); \
     38      (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \
     39      (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \
     40    rtems_interrupt_enable( isrlevel ); \
    3441  } while ( 0 )
    3542
    3643#define Ring_buffer_Remove_character( _buffer, _ch ) \
    3744  do { \
    38     (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \
    39     (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \
     45    rtems_unsigned32 isrlevel; \
     46    \
     47    rtems_interrupt_disable( isrlevel ); \
     48      (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \
     49      (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \
     50    rtems_interrupt_enable( isrlevel ); \
    4051  } while ( 0 )
    4152
  • cpukit/posix/include/rtems/posix/intr.h

    rea74482 r9700578  
    6161 */
    6262
    63 EXTERN POSIX_Interrupt_Control
    64   _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ];
     63EXTERN POSIX_Interrupt_Control _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ];
    6564 
    6665/*
  • cpukit/rtems/src/event.c

    rea74482 r9700578  
    109109  return( _Thread_Executing->Wait.return_code );
    110110}
     111
     112
     113/*PAGE
     114 *
     115 *  _Event_Seize
     116 *
     117 *  This routine attempts to satisfy the requested event condition
     118 *  for the running thread.
     119 *
     120 *  Input parameters:
     121 *    event_in   - the event condition to satisfy
     122 *    option_set - acquire event options
     123 *    ticks      - interval to wait
     124 *    event_out  - pointer to event set output area
     125 *
     126 *  Output parameters: NONE
     127 *    *event_out - event set output area filled in
     128 *
     129 *  INTERRUPT LATENCY:
     130 *    available
     131 *    wait
     132 *    check sync
     133 */
     134
     135void _Event_Seize(
     136  rtems_event_set  event_in,
     137  rtems_option     option_set,
     138  rtems_interval   ticks,
     139  rtems_event_set *event_out
     140)
     141{
     142  Thread_Control    *executing;
     143  rtems_event_set    seized_events;
     144  rtems_event_set    pending_events;
     145  ISR_Level          level;
     146  RTEMS_API_Control  *api;
     147
     148  executing = _Thread_Executing;
     149  executing->Wait.return_code = RTEMS_SUCCESSFUL;
     150
     151  api = executing->API_Extensions[ THREAD_API_RTEMS ];
     152
     153  _ISR_Disable( level );
     154  pending_events = api->pending_events;
     155  seized_events  = _Event_sets_Get( pending_events, event_in );
     156
     157  if ( !_Event_sets_Is_empty( seized_events ) &&
     158       (seized_events == event_in || _Options_Is_any( option_set )) ) {
     159    api->pending_events =
     160      _Event_sets_Clear( pending_events, seized_events );
     161    _ISR_Enable( level );
     162    *event_out = seized_events;
     163    return;
     164  }
     165
     166  if ( _Options_Is_no_wait( option_set ) ) {
     167    _ISR_Enable( level );
     168    executing->Wait.return_code = RTEMS_UNSATISFIED;
     169    *event_out = seized_events;
     170    return;
     171  }
     172
     173  _Event_Sync = TRUE;
     174  executing->Wait.option            = (unsigned32) option_set;
     175  executing->Wait.count             = (unsigned32) event_in;
     176  executing->Wait.return_argument   = event_out;
     177
     178  _ISR_Enable( level );
     179  _Thread_Set_state( executing, STATES_WAITING_FOR_EVENT );
     180
     181  if ( ticks ) {
     182    _Watchdog_Initialize(
     183      &executing->Timer,
     184      _Event_Timeout,
     185      executing->Object.id,
     186      NULL
     187    );
     188    _Watchdog_Insert_ticks(
     189      &executing->Timer,
     190      ticks,
     191      WATCHDOG_NO_ACTIVATE
     192    );
     193  }
     194
     195  _ISR_Disable( level );
     196  if ( _Event_Sync == TRUE ) {
     197    _Event_Sync = FALSE;
     198    if ( ticks )
     199      _Watchdog_Activate( &executing->Timer );
     200    _ISR_Enable( level );
     201    return;
     202  }
     203  _ISR_Enable( level );
     204  (void) _Watchdog_Remove( &executing->Timer );
     205  _Thread_Unblock( executing );
     206  return;
     207}
     208
     209/*PAGE
     210 *
     211 *  _Event_Surrender
     212 *
     213 *  This routines remove a thread from the specified threadq.
     214 *
     215 *  Input parameters:
     216 *    the_thread - pointer to thread to be dequeued
     217 *
     218 *  Output parameters: NONE
     219 *
     220 *  INTERRUPT LATENCY:
     221 *    before flash
     222 *    after flash
     223 *    check sync
     224 */
     225
     226void _Event_Surrender(
     227  Thread_Control *the_thread
     228)
     229{
     230  ISR_Level           level;
     231  rtems_event_set     pending_events;
     232  rtems_event_set     event_condition;
     233  rtems_event_set     seized_events;
     234  rtems_option        option_set;
     235  RTEMS_API_Control  *api;
     236
     237  api = the_thread->API_Extensions[ THREAD_API_RTEMS ];
     238
     239  option_set = (rtems_option) the_thread->Wait.option;
     240
     241  _ISR_Disable( level );
     242  pending_events  = api->pending_events;
     243  event_condition = (rtems_event_set) the_thread->Wait.count;
     244
     245  seized_events = _Event_sets_Get( pending_events, event_condition );
     246
     247  if ( !_Event_sets_Is_empty( seized_events ) ) {
     248    if ( _States_Is_waiting_for_event( the_thread->current_state ) ) {
     249      if ( seized_events == event_condition || _Options_Is_any( option_set ) ) {
     250        api->pending_events =
     251           _Event_sets_Clear( pending_events, seized_events );
     252        *(rtems_event_set *)the_thread->Wait.return_argument = seized_events;
     253
     254        _ISR_Flash( level );
     255
     256        if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
     257          _ISR_Enable( level );
     258          _Thread_Unblock( the_thread );
     259        }
     260        else {
     261          _Watchdog_Deactivate( &the_thread->Timer );
     262          _ISR_Enable( level );
     263          (void) _Watchdog_Remove( &the_thread->Timer );
     264          _Thread_Unblock( the_thread );
     265        }
     266        return;
     267      }
     268    }
     269    else if ( _Thread_Is_executing( the_thread ) && _Event_Sync == TRUE ) {
     270      if ( seized_events == event_condition || _Options_Is_any( option_set ) ) {
     271        api->pending_events = _Event_sets_Clear( pending_events,seized_events );
     272        *(rtems_event_set *)the_thread->Wait.return_argument = seized_events;
     273        _Event_Sync = FALSE;
     274      }
     275    }
     276  }
     277  _ISR_Enable( level );
     278}
     279
     280/*PAGE
     281 *
     282 *  _Event_Timeout
     283 *
     284 *  This routine processes a thread which timeouts while waiting to
     285 *  receive an event_set. It is called by the watchdog handler.
     286 *
     287 *  Input parameters:
     288 *    id - thread id
     289 *
     290 *  Output parameters: NONE
     291 */
     292
     293void _Event_Timeout(
     294  Objects_Id  id,
     295  void       *ignored
     296)
     297{
     298  Thread_Control *the_thread;
     299  Objects_Locations      location;
     300
     301  the_thread = _Thread_Get( id, &location );
     302  switch ( location ) {
     303    case OBJECTS_ERROR:
     304    case OBJECTS_REMOTE:  /* impossible */
     305      break;
     306    case OBJECTS_LOCAL:
     307      the_thread->Wait.return_code = RTEMS_TIMEOUT;
     308      _Thread_Unblock( the_thread );
     309      _Thread_Unnest_dispatch();
     310      break;
     311  }
     312}
  • cpukit/score/cpu/sparc/asm.h

    rea74482 r9700578  
    2929
    3030#define ASM
     31
    3132#include <rtems/score/sparc.h>
     33#include <rtems/score/cpu.h>
    3234
    3335/*
     
    3840 */
    3941
    40 /* XXX This does not appear to work on gcc 2.7.0 on the sparc */
     42/* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */
     43/* XXX The following ifdef magic fixes the problem but results in a warning   */
     44/* XXX when compiling assembly code.                                          */
    4145#undef  __USER_LABEL_PREFIX__
    4246#ifndef __USER_LABEL_PREFIX__
     
    9296#define EXTERN(sym) .globl SYM (sym)
    9397
     98/*
     99 *  Entry for traps which jump to a programmer-specified trap handler.
     100 */
     101 
     102#define TRAP(_vector, _handler)  \
     103  mov   %psr, %l0 ; \
     104  sethi %hi(_handler), %l4 ; \
     105  jmp   %l4+%lo(_handler); \
     106  mov   _vector, %l3
     107
    94108#endif
    95109/* end of include file */
  • cpukit/score/cpu/sparc/cpu.c

    rea74482 r9700578  
    88#include <rtems/score/isr.h>
    99
    10 /*  _CPU_Initialize
     10#if defined(erc32)
     11#include <erc32.h>
     12#endif
     13
     14/*
     15 *  This initializes the set of opcodes placed in each trap
     16 *  table entry.  The routine which installs a handler is responsible
     17 *  for filling in the fields for the _handler address and the _vector
     18 *  trap type.
     19 *
     20 *  The constants following this structure are masks for the fields which
     21 *  must be filled in when the handler is installed.
     22 */
     23
     24const CPU_Trap_table_entry _CPU_Trap_slot_template = {
     25  0xa1480000,      /* mov   %psr, %l0           */
     26  0x29000000,      /* sethi %hi(_handler), %l4  */
     27  0x81c52000,      /* jmp   %l4 + %lo(_handler) */
     28  0xa6102000       /* mov   _vector, %l3        */
     29};
     30
     31/*PAGE
     32 *
     33 *  _CPU_Initialize
    1134 *
    1235 *  This routine performs processor dependent initialization.
    1336 *
    14  *  INPUT PARAMETERS:
     37 *  Input Parameters:
    1538 *    cpu_table       - CPU table to initialize
    1639 *    thread_dispatch - address of disptaching routine
    17  */
    18 
     40 *
     41 *  Output Parameters: NONE
     42 *
     43 *  NOTE: There is no need to save the pointer to the thread dispatch routine.
     44 *        The SPARC's assembly code can reference it directly with no problems.
     45 */
    1946
    2047void _CPU_Initialize(
    2148  rtems_cpu_table  *cpu_table,
    22   void      (*thread_dispatch)      /* ignored on this CPU */
     49  void            (*thread_dispatch)      /* ignored on this CPU */
    2350)
    2451{
    25   void *pointer;
    26 
    27   /*
    28    *  The thread_dispatch argument is the address of the entry point
    29    *  for the routine called at the end of an ISR once it has been
    30    *  decided a context switch is necessary.  On some compilation
    31    *  systems it is difficult to call a high-level language routine
    32    *  from assembly.  This allows us to trick these systems.
    33    *
    34    *  If you encounter this problem save the entry point in a CPU
    35    *  dependent variable.
    36    */
    37 
    38   _CPU_Thread_dispatch_pointer = thread_dispatch;
    39 
    40   /*
    41    *  If there is not an easy way to initialize the FP context
    42    *  during Context_Initialize, then it is usually easier to
    43    *  save an "uninitialized" FP context here and copy it to
    44    *  the task's during Context_Initialize.
     52  void                  *pointer;
     53  unsigned32             trap_table_start;
     54  unsigned32             tbr_value;
     55  CPU_Trap_table_entry  *old_tbr;
     56  CPU_Trap_table_entry  *trap_table;
     57
     58  /*
     59   *  Install the executive's trap table.  All entries from the original
     60   *  trap table are copied into the executive's trap table.  This is essential
     61   *  since this preserves critical trap handlers such as the window underflow
     62   *  and overflow handlers.  It is the responsibility of the BSP to provide
     63   *  install these in the initial trap table.
     64   */
     65 
     66  trap_table_start = (unsigned32) &_CPU_Trap_Table_area;
     67  if (trap_table_start & (SPARC_TRAP_TABLE_ALIGNMENT-1))
     68    trap_table_start = (trap_table_start + SPARC_TRAP_TABLE_ALIGNMENT) &
     69                       ~(SPARC_TRAP_TABLE_ALIGNMENT-1);
     70
     71  trap_table = (CPU_Trap_table_entry *) trap_table_start;
     72
     73  sparc_get_tbr( tbr_value );
     74
     75  old_tbr = (CPU_Trap_table_entry *) (tbr_value & 0xfffff000);
     76
     77  memcpy( trap_table, (void *) old_tbr, 256 * sizeof( CPU_Trap_table_entry ) );
     78
     79  sparc_set_tbr( trap_table_start );
     80
     81  /*
     82   *  This seems to be the most appropriate way to obtain an initial
     83   *  FP context on the SPARC.  The NULL fp context is copied it to
     84   *  the task's FP context during Context_Initialize.
    4585   */
    4686
     
    4888  _CPU_Context_save_fp( &pointer );
    4989
     90  /*
     91   *  Grab our own copy of the user's CPU table.
     92   */
     93
    5094  _CPU_Table = *cpu_table;
     95
     96#if defined(erc32)
     97
     98  /*
     99   *  ERC32 specific initialization
     100   */
     101
     102  _ERC32_MEC_Timer_Control_Mirror = 0;
     103  ERC32_MEC.Timer_Control = 0;
     104
     105  ERC32_MEC.Control |= ERC32_CONFIGURATION_POWER_DOWN_ALLOWED;
     106
     107#endif
     108
    51109}
    52110
     
    54112 *
    55113 *  _CPU_ISR_Get_level
     114 *
     115 *  Input Parameters: NONE
     116 *
     117 *  Output Parameters:
     118 *    returns the current interrupt level (PIL field of the PSR)
    56119 */
    57120 
     
    65128}
    66129
    67 /*  _CPU_ISR_install_vector
     130/*PAGE
     131 *
     132 *  _CPU_ISR_install_raw_handler
     133 *
     134 *  This routine installs the specified handler as a "raw" non-executive
     135 *  supported trap handler (a.k.a. interrupt service routine).
     136 *
     137 *  Input Parameters:
     138 *    vector      - trap table entry number plus synchronous
     139 *                    vs. asynchronous information
     140 *    new_handler - address of the handler to be installed
     141 *    old_handler - pointer to an address of the handler previously installed
     142 *
     143 *  Output Parameters: NONE
     144 *    *new_handler - address of the handler previously installed
     145 *
     146 *  NOTE:
     147 *
     148 *  On the SPARC, there are really only 256 vectors.  However, the executive
     149 *  has no easy, fast, reliable way to determine which traps are synchronous
     150 *  and which are asynchronous.  By default, synchronous traps return to the
     151 *  instruction which caused the interrupt.  So if you install a software
     152 *  trap handler as an executive interrupt handler (which is desirable since
     153 *  RTEMS takes care of window and register issues), then the executive needs
     154 *  to know that the return address is to the trap rather than the instruction
     155 *  following the trap.
     156 *
     157 *  So vectors 0 through 255 are treated as regular asynchronous traps which
     158 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
     159 *  by the executive to be synchronous and to require that the return address
     160 *  be fudged.
     161 *
     162 *  If you use this mechanism to install a trap handler which must reexecute
     163 *  the instruction which caused the trap, then it should be installed as
     164 *  an asynchronous trap.  This will avoid the executive changing the return
     165 *  address.
     166 */
     167 
     168void _CPU_ISR_install_raw_handler(
     169  unsigned32  vector,
     170  proc_ptr    new_handler,
     171  proc_ptr   *old_handler
     172)
     173{
     174  unsigned32             real_vector;
     175  CPU_Trap_table_entry  *tbr;
     176  CPU_Trap_table_entry  *slot;
     177  unsigned32             u32_tbr;
     178  unsigned32             u32_handler;
     179
     180  /*
     181   *  Get the "real" trap number for this vector ignoring the synchronous
     182   *  versus asynchronous indicator included with our vector numbers.
     183   */
     184
     185  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
     186
     187  /*
     188   *  Get the current base address of the trap table and calculate a pointer
     189   *  to the slot we are interested in.
     190   */
     191
     192  sparc_get_tbr( u32_tbr );
     193
     194  u32_tbr &= 0xfffff000;
     195
     196  tbr = (CPU_Trap_table_entry *) u32_tbr;
     197
     198  slot = &tbr[ real_vector ];
     199
     200  /*
     201   *  Get the address of the old_handler from the trap table.
     202   *
     203   *  NOTE: The old_handler returned will be bogus if it does not follow
     204   *        the RTEMS model.
     205   */
     206
     207#define HIGH_BITS_MASK   0xFFFFFC00
     208#define HIGH_BITS_SHIFT  10
     209#define LOW_BITS_MASK    0x000003FF
     210
     211  if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
     212    u32_handler =
     213      ((slot->sethi_of_handler_to_l4 & HIGH_BITS_MASK) << HIGH_BITS_SHIFT) |
     214      (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
     215    *old_handler = (proc_ptr) u32_handler;
     216  } else
     217    *old_handler = 0;
     218
     219  /*
     220   *  Copy the template to the slot and then fix it.
     221   */
     222
     223  *slot = _CPU_Trap_slot_template;
     224
     225  u32_handler = (unsigned32) new_handler;
     226
     227  slot->mov_vector_l3 |= vector;
     228  slot->sethi_of_handler_to_l4 |=
     229    (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
     230  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
     231}
     232
     233/*PAGE
     234 *
     235 *  _CPU_ISR_install_vector
    68236 *
    69237 *  This kernel routine installs the RTEMS handler for the
     
    71239 *
    72240 *  Input parameters:
    73  *    vector      - interrupt vector number
    74  *    old_handler - former ISR for this vector number
    75  *    new_handler - replacement ISR for this vector number
    76  *
    77  *  Output parameters:  NONE
    78  *
    79  */
    80 
     241 *    vector       - interrupt vector number
     242 *    new_handler  - replacement ISR for this vector number
     243 *    old_handler  - pointer to former ISR for this vector number
     244 *
     245 *  Output parameters:
     246 *    *old_handler - former ISR for this vector number
     247 *
     248 */
    81249
    82250void _CPU_ISR_install_vector(
     
    86254)
    87255{
    88    *old_handler = _ISR_Vector_table[ vector ];
     256   unsigned32 real_vector;
     257   proc_ptr   ignored;
     258
     259  /*
     260   *  Get the "real" trap number for this vector ignoring the synchronous
     261   *  versus asynchronous indicator included with our vector numbers.
     262   */
     263
     264   real_vector = SPARC_REAL_TRAP_NUMBER( vector );
    89265
    90266   /*
    91     *  If the interrupt vector table is a table of pointer to isr entry
    92     *  points, then we need to install the appropriate RTEMS interrupt
    93     *  handler for this vector number.
     267    *  Return the previous ISR handler.
    94268    */
     269
     270   *old_handler = _ISR_Vector_table[ real_vector ];
     271
     272   /*
     273    *  Install the wrapper so this ISR can be invoked properly.
     274    */
     275
     276   _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
    95277
    96278   /*
     
    99281    */
    100282
    101     _ISR_Vector_table[ vector ] = new_handler;
    102 }
    103 
    104 /*PAGE
    105  *
    106  *  _CPU_Install_interrupt_stack
    107  */
    108 
    109 void _CPU_Install_interrupt_stack( void )
    110 {
     283    _ISR_Vector_table[ real_vector ] = new_handler;
    111284}
    112285
     
    114287 *
    115288 *  _CPU_Context_Initialize
    116  */
    117 
    118 /*
    119  *  The following constants assist in building a thread's initial context.
    120  */
    121 
    122 #define CPU_FRAME_SIZE  (112)   /* based on disassembled test code */
    123 #define ADDR_ADJ_OFFSET  -8
     289 *
     290 *  This kernel routine initializes the basic non-FP context area associated
     291 *  with each thread.
     292 *
     293 *  Input parameters:
     294 *    the_context  - pointer to the context area
     295 *    stack_base   - address of memory for the SPARC
     296 *    size         - size in bytes of the stack area
     297 *    new_level    - interrupt level for this context area
     298 *    entry_point  - the starting execution point for this this context
     299 *    is_fp        - TRUE if this context is associated with an FP thread
     300 *
     301 *  Output parameters: NONE
     302 */
    124303
    125304void _CPU_Context_Initialize(
    126   Context_Control  *_the_context,
    127   unsigned32       *_stack_base,
    128   unsigned32        _size,
    129   unsigned32        _new_level,
    130   void             *_entry_point
     305  Context_Control  *the_context,
     306  unsigned32       *stack_base,
     307  unsigned32        size,
     308  unsigned32        new_level,
     309  void             *entry_point,
     310  boolean           is_fp
    131311)
    132312{
    133     unsigned32   jmp_addr;
    134     unsigned32   _stack_high;  /* highest "stack aligned" address */
    135     unsigned32   _the_size;
     313    unsigned32   stack_high;  /* highest "stack aligned" address */
     314    unsigned32   the_size;
    136315    unsigned32   tmp_psr;
    137  
    138     jmp_addr = (unsigned32) _entry_point;
    139316 
    140317    /*
    141318     *  On CPUs with stacks which grow down (i.e. SPARC), we build the stack
    142      *  based on the _stack_high address. 
     319     *  based on the stack_high address. 
    143320     */
    144321 
    145     _stack_high = ((unsigned32)(_stack_base) + _size);
    146     _stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
    147  
    148     _the_size = _size & ~(CPU_STACK_ALIGNMENT - 1);
    149  
    150 /* XXX following code is based on unix port */
     322    stack_high = ((unsigned32)(stack_base) + size);
     323    stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
     324 
     325    the_size = size & ~(CPU_STACK_ALIGNMENT - 1);
     326 
    151327    /*
    152      *  XXX SPARC port needs a diagram like this one...
    153      *  See /usr/include/sys/stack.h in Solaris 2.3 for a nice
    154      *  diagram of the stack.
     328     *  See the README in this directory for a diagram of the stack.
    155329     */
    156330 
    157     _the_context->o7 = jmp_addr + ADDR_ADJ_OFFSET;
    158     _the_context->o6 = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
    159     _the_context->i6 = (unsigned32)(_stack_high);
    160 #if 0
    161     _the_context->rp = jmp_addr + ADDR_ADJ_OFFSET;
    162     _the_context->sp = (unsigned32)(_stack_high - CPU_FRAME_SIZE);
    163     _the_context->fp = (unsigned32)(_stack_high);
     331    the_context->o7    = ((unsigned32) entry_point) - 8;
     332    the_context->o6_sp = stack_high - CPU_MINIMUM_STACK_FRAME_SIZE;
     333    the_context->i6_fp = stack_high;
     334
     335    /*
     336     *  Build the PSR for the task.  Most everything can be 0 and the
     337     *  CWP is corrected during the context switch.
     338     *
     339     *  The EF bit determines if the floating point unit is available.
     340     *  The FPU is ONLY enabled if the context is associated with an FP task
     341     *  and this SPARC model has an FPU.
     342     */
     343
     344    sparc_get_psr( tmp_psr );
     345    tmp_psr &= ~SPARC_PSR_PIL_MASK;
     346    tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
     347    tmp_psr &= ~SPARC_PSR_EF_MASK;      /* disabled by default */
     348   
     349#if (SPARC_HAS_FPU == 1)
     350    /*
     351     *  If this bit is not set, then a task gets a fault when it accesses
     352     *  a floating point register.  This is a nice way to detect floating
     353     *  point tasks which are not currently declared as such.
     354     */
     355
     356    if ( is_fp )
     357      tmp_psr |= SPARC_PSR_EF_MASK;
    164358#endif
    165 
    166     _the_context->wim = 0x01;
    167 
    168     sparc_get_psr( tmp_psr );
    169     tmp_psr &= ~SPARC_PIL_MASK;
    170     tmp_psr |= (((_new_level) << 8) & SPARC_PIL_MASK);
    171     tmp_psr  = (tmp_psr & ~0x07) | 0x07;  /* XXX should use num windows */
    172     _the_context->psr = tmp_psr;
     359    the_context->psr = tmp_psr;
    173360}
    174361
     
    177364 *  _CPU_Internal_threads_Idle_thread_body
    178365 *
    179  *  NOTES:
    180  *
    181  *  1. This is the same as the regular CPU independent algorithm.
    182  *
    183  *  2. If you implement this using a "halt", "idle", or "shutdown"
    184  *     instruction, then don't forget to put it in an infinite loop.
    185  *
    186  *  3. Be warned. Some processors with onboard DMA have been known
    187  *     to stop the DMA if the CPU were put in IDLE mode.  This might
    188  *     also be a problem with other on-chip peripherals.  So use this
    189  *     hook with caution.
    190  */
     366 *  Some SPARC implementations have low power, sleep, or idle modes.  This
     367 *  tries to take advantage of those models. 
     368 */
     369 
     370#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
     371
     372/*
     373 *  This is the implementation for the erc32.
     374 *
     375 *  NOTE: Low power mode was enabled at initialization time.
     376 */
     377
     378#if defined(erc32)
    191379
    192380void _CPU_Internal_threads_Idle_thread_body( void )
    193381{
    194 
    195   for( ; ; )
    196     /* insert your "halt" instruction here */ ;
    197 }
     382  while (1) {
     383    ERC32_MEC.Power_Down = 0;   /* value is irrelevant */
     384  }
     385}
     386
     387#endif
     388
     389#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
  • cpukit/score/cpu/sparc/rtems/asm.h

    rea74482 r9700578  
    2929
    3030#define ASM
     31
    3132#include <rtems/score/sparc.h>
     33#include <rtems/score/cpu.h>
    3234
    3335/*
     
    3840 */
    3941
    40 /* XXX This does not appear to work on gcc 2.7.0 on the sparc */
     42/* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */
     43/* XXX The following ifdef magic fixes the problem but results in a warning   */
     44/* XXX when compiling assembly code.                                          */
    4145#undef  __USER_LABEL_PREFIX__
    4246#ifndef __USER_LABEL_PREFIX__
     
    9296#define EXTERN(sym) .globl SYM (sym)
    9397
     98/*
     99 *  Entry for traps which jump to a programmer-specified trap handler.
     100 */
     101 
     102#define TRAP(_vector, _handler)  \
     103  mov   %psr, %l0 ; \
     104  sethi %hi(_handler), %l4 ; \
     105  jmp   %l4+%lo(_handler); \
     106  mov   _vector, %l3
     107
    94108#endif
    95109/* end of include file */
  • cpukit/score/cpu/unix/cpu.c

    rea74482 r9700578  
    342342 *  _CPU_Internal_threads_Idle_thread_body
    343343 *
    344  *  NOTES:
    345  *
    346  *  1. This is the same as the regular CPU independent algorithm.
    347  *
    348  *  2. If you implement this using a "halt", "idle", or "shutdown"
    349  *     instruction, then don't forget to put it in an infinite loop.
    350  *
    351  *  3. Be warned. Some processors with onboard DMA have been known
    352  *     to stop the DMA if the CPU were put in IDLE mode.  This might
    353  *     also be a problem with other on-chip peripherals.  So use this
    354  *     hook with caution.
     344 *  Stop until we get a signal which is the logically the same thing
     345 *  entering low-power or sleep mode on a real processor and waiting for
     346 *  an interrupt.  This significantly reduces the consumption of host
     347 *  CPU cycles which is again similar to low power mode.
    355348 */
    356349
     
    371364  unsigned32        _size,
    372365  unsigned32        _new_level,
    373   void             *_entry_point
     366  void             *_entry_point,
     367  boolean           _is_fp
    374368)
    375369{
     
    698692}
    699693
    700 /*PAGE
    701  *
    702  *  _CPU_ffs
    703  */
    704 
    705 int _CPU_ffs(unsigned32 value)
    706 {
    707   int output;
    708   extern int ffs( int );
    709 
    710   output = ffs(value);
    711   output = output - 1;
    712 
    713   return output;
    714 }
    715 
    716 
    717694/*
    718695 *  Special Purpose Routines to hide the use of UNIX system calls.
    719696 */
    720697
    721 #if 0
    722 /* XXX clock had this set of #define's */
    723 
    724 /*
    725  *  In order to get the types and prototypes used in this file under
    726  *  Solaris 2.3, it is necessary to pull the following magic.
    727  */
    728  
    729 #if defined(solaris)
    730 #warning "Ignore the undefining __STDC__ warning"
    731 #undef __STDC__
    732 #define __STDC__ 0
    733 #undef  _POSIX_C_SOURCE
    734 #endif
    735 #endif
    736 
    737698int _CPU_Get_clock_vector( void )
    738699{
    739700  return SIGALRM;
    740701}
    741 
    742702
    743703void _CPU_Start_clock(
  • cpukit/score/include/rtems/score/bitfield.h

    rea74482 r9700578  
    3939 */
    4040
     41#if ( CPU_USE_GENERIC_BITFIELD_DATA == TRUE )
     42
     43#ifndef INIT
     44  extern const unsigned char __log2table[256];
     45#else
     46const unsigned char __log2table[256] = {
     47    7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
     48    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
     49    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
     50    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
     51    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
     52    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
     53    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
     54    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
     55    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     56    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     57    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     58    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     59    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     60    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     61    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
     62    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
     63};
     64#endif
     65 
     66#endif
     67
     68#if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE )
     69
    4170#define _Bitfield_Find_first_bit( _value, _bit_number ) \
    4271        _CPU_Bitfield_Find_first_bit( _value, _bit_number )
     72
     73#else
     74
     75/*
     76 *  The following must be a macro because if a CPU specific version
     77 *  is used it will most likely use inline assembly.
     78 */
     79
     80#define _Bitfield_Find_first_bit( _value, _bit_number ) \
     81  { \
     82    register __value = (_value); \
     83    register const unsigned char *__p = __log2table; \
     84    \
     85    if ( __value < 0x100 ) \
     86      (_bit_number) = __p[ __value ] + 8; \
     87    else \
     88      (_bit_number) = __p[ __value >> 8 ]; \
     89  }
     90
     91#endif
    4392
    4493#ifdef __cplusplus
  • cpukit/score/include/rtems/score/context.h

    rea74482 r9700578  
    4848 */
    4949
    50 #define _Context_Initialize( _the_context, _stack, _size, _isr, _entry ) \
    51    _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry )
     50#define \
     51   _Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp ) \
     52   _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp )
    5253
    5354/*
  • cpukit/score/include/rtems/score/isr.h

    rea74482 r9700578  
    5151             );
    5252/*
    53  *  This constant promotes out the number of vectors supported by
    54  *  the current CPU being used.
     53 *  This constant promotes out the number of vectors truly supported by
     54 *  the current CPU being used.  This is usually the number of distinct vectors
     55 *  the cpu can vector.
    5556 */
    5657 
    57 #define ISR_NUMBER_OF_VECTORS    CPU_INTERRUPT_NUMBER_OF_VECTORS
     58#define ISR_NUMBER_OF_VECTORS                CPU_INTERRUPT_NUMBER_OF_VECTORS
     59
     60/*
     61 *  This constant promotes out the highest valid interrupt vector number.
     62 */
     63
     64#define ISR_INTERRUPT_MAXIMUM_VECTOR_NUMBER  CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER
    5865
    5966/*
     
    7683 */
    7784
    78 EXTERN ISR_Handler_entry _ISR_Vector_table[CPU_INTERRUPT_NUMBER_OF_VECTORS];
     85EXTERN ISR_Handler_entry _ISR_Vector_table[ ISR_NUMBER_OF_VECTORS ];
    7986
    8087/*
  • cpukit/score/include/rtems/score/priority.h

    rea74482 r9700578  
    115115
    116116/*
     117 *  _Priority_Mask
     118 *
     119 *  DESCRIPTION:
     120 *
     121 *  This function returns the mask associated with the major or minor
     122 *  number passed to it.
     123 */
     124 
     125#if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE )
     126
     127STATIC INLINE unsigned32 _Priority_Mask (
     128  unsigned32 bit_number
     129);
     130
     131#else
     132
     133#define _Priority_Mask( _bit_number ) \
     134  _CPU_Priority_Mask( _bit_number )
     135 
     136#endif
     137
     138/*
     139 *  _Priority_Bits_index
     140 *
     141 *  DESCRIPTION:
     142 *
     143 *  This function translates the bit numbers returned by the bit scan
     144 *  of a priority bit field into something suitable for use as
     145 *  a major or minor component of a priority.
     146 */
     147 
     148#if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE )
     149
     150STATIC INLINE unsigned32 _Priority_Bits_index (
     151  unsigned32 bit_number
     152);
     153
     154#else
     155
     156#define _Priority_Bits_index( _priority ) \
     157  _CPU_Priority_bits_index( _priority )
     158
     159#endif
     160 
     161/*
    117162 *  _Priority_Add_to_bit_map
    118163 *
  • cpukit/score/inline/rtems/score/isr.inl

    rea74482 r9700578  
    3939)
    4040{
    41   return ( vector < CPU_INTERRUPT_NUMBER_OF_VECTORS );
     41  return ( vector <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER );
    4242}
    4343
  • cpukit/score/inline/rtems/score/priority.inl

    rea74482 r9700578  
    7979}
    8080
     81#if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE )
     82 
     83/*PAGE
     84 *
     85 *  _Priority_Mask
     86 *
     87 */
     88 
     89STATIC INLINE unsigned32 _Priority_Mask (
     90  unsigned32 bit_number
     91)
     92{
     93  return (0x8000 >> bit_number);
     94}
     95 
     96 
     97/*PAGE
     98 *
     99 *  _Priority_Bits_index
     100 *
     101 */
     102 
     103STATIC INLINE unsigned32 _Priority_Bits_index (
     104  unsigned32 bit_number
     105)
     106{
     107  return bit_number;
     108}
     109
     110#endif
     111
    81112/*PAGE
    82113 *
     
    122153  _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor );
    123154
    124   return (_CPU_Priority_Bits_index( major ) << 4) +
    125           _CPU_Priority_Bits_index( minor );
     155  return (_Priority_Bits_index( major ) << 4) +
     156          _Priority_Bits_index( minor );
    126157}
    127158
     
    145176
    146177  the_priority_map->minor =
    147     &_Priority_Bit_map[ _CPU_Priority_Bits_index(major) ];
    148 
    149   mask = _CPU_Priority_Mask( major );
     178    &_Priority_Bit_map[ _Priority_Bits_index(major) ];
     179
     180  mask = _Priority_Mask( major );
    150181  the_priority_map->ready_major = mask;
    151182  the_priority_map->block_major = ~mask;
    152183
    153   mask = _CPU_Priority_Mask( minor );
     184  mask = _Priority_Mask( minor );
    154185  the_priority_map->ready_minor = mask;
    155186  the_priority_map->block_minor = ~mask;
  • cpukit/score/macros/rtems/score/isr.inl

    rea74482 r9700578  
    3434
    3535#define _ISR_Is_vector_number_valid( _vector ) \
    36   ( (_vector) < CPU_INTERRUPT_NUMBER_OF_VECTORS )
     36  ( (_vector) <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER )
    3737
    3838/*PAGE
  • cpukit/score/macros/rtems/score/priority.inl

    rea74482 r9700578  
    6565#define _Priority_Minor( _the_priority ) ( (_the_priority) % 16 )
    6666
     67#if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE )
     68 
     69/*PAGE
     70 *
     71 *  _Priority_Mask
     72 *
     73 */
     74 
     75#define _Priority_Mask( _bit_number ) \
     76  (0x8000 >> _bit_number)
     77 
     78/*PAGE
     79 *
     80 *  _Priority_Bits_index
     81 *
     82 */
     83 
     84#define _Priority_Bits_index( _bit_number ) \
     85  (_bit_number)
     86 
     87#endif
     88 
    6789/*PAGE
    6890 *
     
    104126    _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor ); \
    105127    \
    106     (_high_priority) = (_CPU_Priority_Bits_index( major ) * 16) +  \
    107                        _CPU_Priority_Bits_index( minor ); \
     128    (_high_priority) = (_Priority_Bits_index( major ) * 16) +  \
     129                        _Priority_Bits_index( minor ); \
    108130  }
    109131
     
    125147    \
    126148    (_the_priority_map)->minor =  \
    127       &_Priority_Bit_map[ _CPU_Priority_Bits_index(_major) ]; \
     149      &_Priority_Bit_map[ _Priority_Bits_index(_major) ]; \
    128150    \
    129     _mask = _CPU_Priority_Mask( _major ); \
     151    _mask = _Priority_Mask( _major ); \
    130152    (_the_priority_map)->ready_major = _mask; \
    131153    (_the_priority_map)->block_major = ~_mask; \
    132154    \
    133     _mask = _CPU_Priority_Mask( _minor ); \
     155    _mask = _Priority_Mask( _minor ); \
    134156    (_the_priority_map)->ready_minor = _mask; \
    135157    (_the_priority_map)->block_minor = ~_mask; \
  • cpukit/score/src/thread.c

    rea74482 r9700578  
    806806)
    807807{
     808  boolean is_fp = FALSE;
     809
    808810  if ( the_thread->Start.fp_context ) {
    809811    the_thread->fp_context = the_thread->Start.fp_context;
    810812    _Context_Initialize_fp( &the_thread->fp_context );
     813    is_fp = TRUE;
    811814  }
    812815
     
    819822    the_thread->Start.Initial_stack.size,
    820823    the_thread->Start.isr_level,
    821     _Thread_Handler
     824    _Thread_Handler,
     825    is_fp
    822826  );
    823827
Note: See TracChangeset for help on using the changeset viewer.