Changeset 9700578 in rtems
- Timestamp:
- 10/30/95 21:54:45 (28 years ago)
- Branches:
- 4.10, 4.11, 4.8, 4.9, 5, master
- Children:
- c4808ca
- Parents:
- ea74482
- Files:
-
- 4 added
- 108 edited
Legend:
- Unmodified
- Added
- Removed
-
c/ACKNOWLEDGEMENTS
rea74482 r9700578 69 69 radiation-hardened CPU. Jiri Gaisler (jgais@wd.estec.esa.nl) deserves 70 70 special thanks for championing this port within the ESA was well as 71 for developing the SPARC Instruction Simulator used to test this port. 71 for developing and supporting the SPARC Instruction Simulator used to 72 develop and test this port. 72 73 73 74 Finally, the RTEMS project would like to thank those who have contributed -
c/src/exec/libcsupport/include/ringbuf.h
rea74482 r9700578 11 11 12 12 #ifndef RINGBUF_QUEUE_LENGTH 13 #define RINGBUF_QUEUE_LENGTH 20013 #define RINGBUF_QUEUE_LENGTH 128 14 14 #endif 15 15 16 16 typedef struct { 17 17 char buffer[RINGBUF_QUEUE_LENGTH]; 18 int head;19 int tail;18 volatile int head; 19 volatile int tail; 20 20 } Ring_buffer_t; 21 21 … … 28 28 ( (_buffer)->head == (_buffer)->tail ) 29 29 30 #define Ring_buffer_Is_full( _buffer ) \ 31 ( (_buffer)->head == ((_buffer)->tail + 1) % RINGBUF_QUEUE_LENGTH ) 32 30 33 #define Ring_buffer_Add_character( _buffer, _ch ) \ 31 34 do { \ 32 (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \ 33 (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \ 35 rtems_unsigned32 isrlevel; \ 36 \ 37 rtems_interrupt_disable( isrlevel ); \ 38 (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \ 39 (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \ 40 rtems_interrupt_enable( isrlevel ); \ 34 41 } while ( 0 ) 35 42 36 43 #define Ring_buffer_Remove_character( _buffer, _ch ) \ 37 44 do { \ 38 (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \ 39 (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \ 45 rtems_unsigned32 isrlevel; \ 46 \ 47 rtems_interrupt_disable( isrlevel ); \ 48 (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \ 49 (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \ 50 rtems_interrupt_enable( isrlevel ); \ 40 51 } while ( 0 ) 41 52 -
c/src/exec/posix/headers/intr.h
rea74482 r9700578 61 61 */ 62 62 63 EXTERN POSIX_Interrupt_Control 64 _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ]; 63 EXTERN POSIX_Interrupt_Control _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ]; 65 64 66 65 /* -
c/src/exec/posix/include/rtems/posix/intr.h
rea74482 r9700578 61 61 */ 62 62 63 EXTERN POSIX_Interrupt_Control 64 _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ]; 63 EXTERN POSIX_Interrupt_Control _POSIX_Interrupt_Information[ ISR_NUMBER_OF_VECTORS ]; 65 64 66 65 /* -
c/src/exec/rtems/src/event.c
rea74482 r9700578 109 109 return( _Thread_Executing->Wait.return_code ); 110 110 } 111 112 113 /*PAGE 114 * 115 * _Event_Seize 116 * 117 * This routine attempts to satisfy the requested event condition 118 * for the running thread. 119 * 120 * Input parameters: 121 * event_in - the event condition to satisfy 122 * option_set - acquire event options 123 * ticks - interval to wait 124 * event_out - pointer to event set output area 125 * 126 * Output parameters: NONE 127 * *event_out - event set output area filled in 128 * 129 * INTERRUPT LATENCY: 130 * available 131 * wait 132 * check sync 133 */ 134 135 void _Event_Seize( 136 rtems_event_set event_in, 137 rtems_option option_set, 138 rtems_interval ticks, 139 rtems_event_set *event_out 140 ) 141 { 142 Thread_Control *executing; 143 rtems_event_set seized_events; 144 rtems_event_set pending_events; 145 ISR_Level level; 146 RTEMS_API_Control *api; 147 148 executing = _Thread_Executing; 149 executing->Wait.return_code = RTEMS_SUCCESSFUL; 150 151 api = executing->API_Extensions[ THREAD_API_RTEMS ]; 152 153 _ISR_Disable( level ); 154 pending_events = api->pending_events; 155 seized_events = _Event_sets_Get( pending_events, event_in ); 156 157 if ( !_Event_sets_Is_empty( seized_events ) && 158 (seized_events == event_in || _Options_Is_any( option_set )) ) { 159 api->pending_events = 160 _Event_sets_Clear( pending_events, seized_events ); 161 _ISR_Enable( level ); 162 *event_out = seized_events; 163 return; 164 } 165 166 if ( _Options_Is_no_wait( option_set ) ) { 167 _ISR_Enable( level ); 168 executing->Wait.return_code = RTEMS_UNSATISFIED; 169 *event_out = seized_events; 170 return; 171 } 172 173 _Event_Sync = TRUE; 174 executing->Wait.option = (unsigned32) option_set; 175 executing->Wait.count = (unsigned32) event_in; 176 executing->Wait.return_argument = event_out; 177 178 _ISR_Enable( level ); 179 _Thread_Set_state( executing, STATES_WAITING_FOR_EVENT ); 180 181 if ( ticks ) { 182 _Watchdog_Initialize( 183 &executing->Timer, 184 _Event_Timeout, 185 executing->Object.id, 186 NULL 187 ); 188 _Watchdog_Insert_ticks( 189 &executing->Timer, 190 ticks, 191 WATCHDOG_NO_ACTIVATE 192 ); 193 } 194 195 _ISR_Disable( level ); 196 if ( _Event_Sync == TRUE ) { 197 _Event_Sync = FALSE; 198 if ( ticks ) 199 _Watchdog_Activate( &executing->Timer ); 200 _ISR_Enable( level ); 201 return; 202 } 203 _ISR_Enable( level ); 204 (void) _Watchdog_Remove( &executing->Timer ); 205 _Thread_Unblock( executing ); 206 return; 207 } 208 209 /*PAGE 210 * 211 * _Event_Surrender 212 * 213 * This routines remove a thread from the specified threadq. 214 * 215 * Input parameters: 216 * the_thread - pointer to thread to be dequeued 217 * 218 * Output parameters: NONE 219 * 220 * INTERRUPT LATENCY: 221 * before flash 222 * after flash 223 * check sync 224 */ 225 226 void _Event_Surrender( 227 Thread_Control *the_thread 228 ) 229 { 230 ISR_Level level; 231 rtems_event_set pending_events; 232 rtems_event_set event_condition; 233 rtems_event_set seized_events; 234 rtems_option option_set; 235 RTEMS_API_Control *api; 236 237 api = the_thread->API_Extensions[ THREAD_API_RTEMS ]; 238 239 option_set = (rtems_option) the_thread->Wait.option; 240 241 _ISR_Disable( level ); 242 pending_events = api->pending_events; 243 event_condition = (rtems_event_set) the_thread->Wait.count; 244 245 seized_events = _Event_sets_Get( pending_events, event_condition ); 246 247 if ( !_Event_sets_Is_empty( seized_events ) ) { 248 if ( _States_Is_waiting_for_event( the_thread->current_state ) ) { 249 if ( seized_events == event_condition || _Options_Is_any( option_set ) ) { 250 api->pending_events = 251 _Event_sets_Clear( pending_events, seized_events ); 252 *(rtems_event_set *)the_thread->Wait.return_argument = seized_events; 253 254 _ISR_Flash( level ); 255 256 if ( !_Watchdog_Is_active( &the_thread->Timer ) ) { 257 _ISR_Enable( level ); 258 _Thread_Unblock( the_thread ); 259 } 260 else { 261 _Watchdog_Deactivate( &the_thread->Timer ); 262 _ISR_Enable( level ); 263 (void) _Watchdog_Remove( &the_thread->Timer ); 264 _Thread_Unblock( the_thread ); 265 } 266 return; 267 } 268 } 269 else if ( _Thread_Is_executing( the_thread ) && _Event_Sync == TRUE ) { 270 if ( seized_events == event_condition || _Options_Is_any( option_set ) ) { 271 api->pending_events = _Event_sets_Clear( pending_events,seized_events ); 272 *(rtems_event_set *)the_thread->Wait.return_argument = seized_events; 273 _Event_Sync = FALSE; 274 } 275 } 276 } 277 _ISR_Enable( level ); 278 } 279 280 /*PAGE 281 * 282 * _Event_Timeout 283 * 284 * This routine processes a thread which timeouts while waiting to 285 * receive an event_set. It is called by the watchdog handler. 286 * 287 * Input parameters: 288 * id - thread id 289 * 290 * Output parameters: NONE 291 */ 292 293 void _Event_Timeout( 294 Objects_Id id, 295 void *ignored 296 ) 297 { 298 Thread_Control *the_thread; 299 Objects_Locations location; 300 301 the_thread = _Thread_Get( id, &location ); 302 switch ( location ) { 303 case OBJECTS_ERROR: 304 case OBJECTS_REMOTE: /* impossible */ 305 break; 306 case OBJECTS_LOCAL: 307 the_thread->Wait.return_code = RTEMS_TIMEOUT; 308 _Thread_Unblock( the_thread ); 309 _Thread_Unnest_dispatch(); 310 break; 311 } 312 } -
c/src/exec/score/cpu/hppa1.1/cpu.h
rea74482 r9700578 279 279 */ 280 280 281 #define CPU_INTERRUPT_NUMBER_OF_VECTORS (HPPA_INTERRUPT_MAX) 281 #define CPU_INTERRUPT_NUMBER_OF_VECTORS (HPPA_INTERRUPT_MAX) 282 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 282 283 283 284 /* … … 388 389 389 390 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 390 _new_level, _entry_point ) \391 _new_level, _entry_point, _is_fp ) \ 391 392 do { \ 392 393 unsigned32 _stack; \ … … 457 458 */ 458 459 460 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE 461 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE 462 459 463 int hppa_rtems_ffs(unsigned int value); 460 464 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ … … 478 482 ( 1 << (_bit_number) ) 479 483 480 #define _CPU_Priority_ Bits_index( _priority ) \484 #define _CPU_Priority_bits_index( _priority ) \ 481 485 (_priority) 482 486 -
c/src/exec/score/cpu/i386/cpu.h
rea74482 r9700578 144 144 */ 145 145 146 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 146 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 147 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 147 148 148 149 /* … … 209 210 210 211 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 211 _isr, _entry_point ) \212 _isr, _entry_point, _is_fp ) \ 212 213 do { \ 213 214 unsigned32 _stack; \ … … 266 267 */ 267 268 269 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE 270 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE 271 268 272 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ 269 273 { \ … … 293 297 ( 1 << (_bit_number) ) 294 298 295 #define _CPU_Priority_ Bits_index( _priority ) \299 #define _CPU_Priority_bits_index( _priority ) \ 296 300 (_priority) 297 301 -
c/src/exec/score/cpu/i960/cpu.h
rea74482 r9700578 186 186 */ 187 187 188 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 188 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 189 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 189 190 190 191 /* … … 253 254 254 255 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 255 _isr, _entry ) \256 _isr, _entry, _is_fp ) \ 256 257 { CPU_Call_frame *_texit_frame; \ 257 258 unsigned32 _mask; \ … … 319 320 */ 320 321 322 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE 323 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE 324 321 325 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ 322 326 { unsigned32 _search = (_value); \ … … 342 346 ( 0x8000 >> (_bit_number) ) 343 347 344 #define _CPU_Priority_ Bits_index( _priority ) \348 #define _CPU_Priority_bits_index( _priority ) \ 345 349 ( 15 - (_priority) ) 346 350 -
c/src/exec/score/cpu/m68k/cpu.h
rea74482 r9700578 176 176 */ 177 177 178 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 178 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 179 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 179 180 180 181 /* … … 238 239 239 240 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 240 _isr, _entry_point ) \241 _isr, _entry_point, _is_fp ) \ 241 242 do { \ 242 243 void *_stack; \ … … 302 303 */ 303 304 305 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE 306 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE 307 304 308 #if ( M68K_HAS_BFFFO == 1 ) 305 309 #ifdef NO_UNINITIALIZED_WARNINGS … … 328 332 329 333 /* duplicates BFFFO results for 16 bits (i.e., 15-(_priority) in 330 _CPU_Priority_ Bits_index is not needed), handles the 0 case, and334 _CPU_Priority_bits_index is not needed), handles the 0 case, and 331 335 does not molest _value -- jsg */ 332 336 #ifndef m68000 … … 387 391 ( 0x8000 >> (_bit_number) ) 388 392 389 #define _CPU_Priority_ Bits_index( _priority ) \393 #define _CPU_Priority_bits_index( _priority ) \ 390 394 (_priority) 391 395 -
c/src/exec/score/cpu/m68k/cpu_asm.s
rea74482 r9700578 175 175 */ 176 176 177 .global SYM (_ISR_Exit)178 SYM (_ISR_Exit):179 180 177 subql #1,SYM (_ISR_Nest_level) | one less nest level 181 178 subql #1,SYM (_Thread_Dispatch_disable_level) -
c/src/exec/score/cpu/no_cpu/cpu.h
rea74482 r9700578 413 413 */ 414 414 415 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 32 415 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 32 416 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 416 417 417 418 /* … … 537 538 538 539 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 539 _isr, _entry_point ) \540 _isr, _entry_point, _is_fp ) \ 540 541 { \ 541 542 } … … 622 623 * RTEMS guarantees that (1) will never happen so it is not a concern. 623 624 * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and 624 * _CPU_Priority_ Bits_index(). These three form a set of routines625 * _CPU_Priority_bits_index(). These three form a set of routines 625 626 * which must logically operate together. Bits in the _value are 626 627 * set and cleared based on masks built by _CPU_Priority_mask(). 627 628 * The basic major and minor values calculated by _Priority_Major() 628 * and _Priority_Minor() are "massaged" by _CPU_Priority_ Bits_index()629 * and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index() 629 630 * to properly range between the values returned by the "find first bit" 630 631 * instruction. This makes it possible for _Priority_Get_highest() to … … 661 662 */ 662 663 664 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE 665 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE 666 667 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) 668 663 669 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ 664 670 { \ … … 666 672 } 667 673 674 #endif 675 668 676 /* end of Bitfield handler macros */ 669 677 … … 674 682 */ 675 683 684 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) 685 676 686 #define _CPU_Priority_Mask( _bit_number ) \ 677 687 ( 1 << (_bit_number) ) 688 689 #endif 678 690 679 691 /* … … 684 696 */ 685 697 686 #define _CPU_Priority_Bits_index( _priority ) \ 698 #if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE) 699 700 #define _CPU_Priority_bits_index( _priority ) \ 687 701 (_priority) 702 703 #endif 688 704 689 705 /* end of Priority handler macros */ -
c/src/exec/score/cpu/powerpc/cpu.h
rea74482 r9700578 542 542 */ 543 543 544 #define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX) 544 #define CPU_INTERRUPT_NUMBER_OF_VECTORS (PPC_INTERRUPT_MAX) 545 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 545 546 546 547 /* … … 683 684 #if PPC_ABI == PPC_ABI_POWEROPEN 684 685 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \ 685 _isr, _entry_point ) \686 _isr, _entry_point, _is_fp ) \ 686 687 { \ 687 688 unsigned32 sp, *desc; \ … … 817 818 * RTEMS guarantees that (1) will never happen so it is not a concern. 818 819 * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and 819 * _CPU_Priority_ Bits_index(). These three form a set of routines820 * _CPU_Priority_bits_index(). These three form a set of routines 820 821 * which must logically operate together. Bits in the _value are 821 822 * set and cleared based on masks built by _CPU_Priority_mask(). 822 823 * The basic major and minor values calculated by _Priority_Major() 823 * and _Priority_Minor() are "massaged" by _CPU_Priority_ Bits_index()824 * and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index() 824 825 * to properly range between the values returned by the "find first bit" 825 826 * instruction. This makes it possible for _Priority_Get_highest() to … … 856 857 */ 857 858 859 #define CPU_USE_GENERIC_BITFIELD_CODE FALSE 860 #define CPU_USE_GENERIC_BITFIELD_DATA FALSE 861 858 862 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ 859 863 { \ … … 880 884 */ 881 885 882 #define _CPU_Priority_ Bits_index( _priority ) \886 #define _CPU_Priority_bits_index( _priority ) \ 883 887 (_priority) 884 888 -
c/src/exec/score/cpu/sparc/asm.h
rea74482 r9700578 29 29 30 30 #define ASM 31 31 32 #include <rtems/score/sparc.h> 33 #include <rtems/score/cpu.h> 32 34 33 35 /* … … 38 40 */ 39 41 40 /* XXX This does not appear to work on gcc 2.7.0 on the sparc */ 42 /* XXX __USER_LABEL_PREFIX__ and __REGISTER_PREFIX__ do not work on gcc 2.7.0 */ 43 /* XXX The following ifdef magic fixes the problem but results in a warning */ 44 /* XXX when compiling assembly code. */ 41 45 #undef __USER_LABEL_PREFIX__ 42 46 #ifndef __USER_LABEL_PREFIX__ … … 92 96 #define EXTERN(sym) .globl SYM (sym) 93 97 98 /* 99 * Entry for traps which jump to a programmer-specified trap handler. 100 */ 101 102 #define TRAP(_vector, _handler) \ 103 mov %psr, %l0 ; \ 104 sethi %hi(_handler), %l4 ; \ 105 jmp %l4+%lo(_handler); \ 106 mov _vector, %l3 107 94 108 #endif 95 109 /* end of include file */ -
c/src/exec/score/cpu/sparc/cpu.c
rea74482 r9700578 8 8 #include <rtems/score/isr.h> 9 9 10 /* _CPU_Initialize 10 #if defined(erc32) 11 #include <erc32.h> 12 #endif 13 14 /* 15 * This initializes the set of opcodes placed in each trap 16 * table entry. The routine which installs a handler is responsible 17 * for filling in the fields for the _handler address and the _vector 18 * trap type. 19 * 20 * The constants following this structure are masks for the fields which 21 * must be filled in when the handler is installed. 22 */ 23 24 const CPU_Trap_table_entry _CPU_Trap_slot_template = { 25 0xa1480000, /* mov %psr, %l0 */ 26 0x29000000, /* sethi %hi(_handler), %l4 */ 27 0x81c52000, /* jmp %l4 + %lo(_handler) */ 28 0xa6102000 /* mov _vector, %l3 */ 29 }; 30 31 /*PAGE 32 * 33 * _CPU_Initialize 11 34 * 12 35 * This routine performs processor dependent initialization. 13 36 * 14 * I NPUT PARAMETERS:37 * Input Parameters: 15 38 * cpu_table - CPU table to initialize 16 39 * thread_dispatch - address of disptaching routine 17 */ 18 40 * 41 * Output Parameters: NONE 42 * 43 * NOTE: There is no need to save the pointer to the thread dispatch routine. 44 * The SPARC's assembly code can reference it directly with no problems. 45 */ 19 46 20 47 void _CPU_Initialize( 21 48 rtems_cpu_table *cpu_table, 22 void (*thread_dispatch) /* ignored on this CPU */49 void (*thread_dispatch) /* ignored on this CPU */ 23 50 ) 24 51 { 25 void *pointer; 26 27 /* 28 * The thread_dispatch argument is the address of the entry point 29 * for the routine called at the end of an ISR once it has been 30 * decided a context switch is necessary. On some compilation 31 * systems it is difficult to call a high-level language routine 32 * from assembly. This allows us to trick these systems. 33 * 34 * If you encounter this problem save the entry point in a CPU 35 * dependent variable. 36 */ 37 38 _CPU_Thread_dispatch_pointer = thread_dispatch; 39 40 /* 41 * If there is not an easy way to initialize the FP context 42 * during Context_Initialize, then it is usually easier to 43 * save an "uninitialized" FP context here and copy it to 44 * the task's during Context_Initialize. 52 void *pointer; 53 unsigned32 trap_table_start; 54 unsigned32 tbr_value; 55 CPU_Trap_table_entry *old_tbr; 56 CPU_Trap_table_entry *trap_table; 57 58 /* 59 * Install the executive's trap table. All entries from the original 60 * trap table are copied into the executive's trap table. This is essential 61 * since this preserves critical trap handlers such as the window underflow 62 * and overflow handlers. It is the responsibility of the BSP to provide 63 * install these in the initial trap table. 64 */ 65 66 trap_table_start = (unsigned32) &_CPU_Trap_Table_area; 67 if (trap_table_start & (SPARC_TRAP_TABLE_ALIGNMENT-1)) 68 trap_table_start = (trap_table_start + SPARC_TRAP_TABLE_ALIGNMENT) & 69 ~(SPARC_TRAP_TABLE_ALIGNMENT-1); 70 71 trap_table = (CPU_Trap_table_entry *) trap_table_start; 72 73 sparc_get_tbr( tbr_value ); 74 75 old_tbr = (CPU_Trap_table_entry *) (tbr_value & 0xfffff000); 76 77 memcpy( trap_table, (void *) old_tbr, 256 * sizeof( CPU_Trap_table_entry ) ); 78 79 sparc_set_tbr( trap_table_start ); 80 81 /* 82 * This seems to be the most appropriate way to obtain an initial 83 * FP context on the SPARC. The NULL fp context is copied it to 84 * the task's FP context during Context_Initialize. 45 85 */ 46 86 … … 48 88 _CPU_Context_save_fp( &pointer ); 49 89 90 /* 91 * Grab our own copy of the user's CPU table. 92 */ 93 50 94 _CPU_Table = *cpu_table; 95 96 #if defined(erc32) 97 98 /* 99 * ERC32 specific initialization 100 */ 101 102 _ERC32_MEC_Timer_Control_Mirror = 0; 103 ERC32_MEC.Timer_Control = 0; 104 105 ERC32_MEC.Control |= ERC32_CONFIGURATION_POWER_DOWN_ALLOWED; 106 107 #endif 108 51 109 } 52 110 … … 54 112 * 55 113 * _CPU_ISR_Get_level 114 * 115 * Input Parameters: NONE 116 * 117 * Output Parameters: 118 * returns the current interrupt level (PIL field of the PSR) 56 119 */ 57 120 … … 65 128 } 66 129 67 /* _CPU_ISR_install_vector 130 /*PAGE 131 * 132 * _CPU_ISR_install_raw_handler 133 * 134 * This routine installs the specified handler as a "raw" non-executive 135 * supported trap handler (a.k.a. interrupt service routine). 136 * 137 * Input Parameters: 138 * vector - trap table entry number plus synchronous 139 * vs. asynchronous information 140 * new_handler - address of the handler to be installed 141 * old_handler - pointer to an address of the handler previously installed 142 * 143 * Output Parameters: NONE 144 * *new_handler - address of the handler previously installed 145 * 146 * NOTE: 147 * 148 * On the SPARC, there are really only 256 vectors. However, the executive 149 * has no easy, fast, reliable way to determine which traps are synchronous 150 * and which are asynchronous. By default, synchronous traps return to the 151 * instruction which caused the interrupt. So if you install a software 152 * trap handler as an executive interrupt handler (which is desirable since 153 * RTEMS takes care of window and register issues), then the executive needs 154 * to know that the return address is to the trap rather than the instruction 155 * following the trap. 156 * 157 * So vectors 0 through 255 are treated as regular asynchronous traps which 158 * provide the "correct" return address. Vectors 256 through 512 are assumed 159 * by the executive to be synchronous and to require that the return address 160 * be fudged. 161 * 162 * If you use this mechanism to install a trap handler which must reexecute 163 * the instruction which caused the trap, then it should be installed as 164 * an asynchronous trap. This will avoid the executive changing the return 165 * address. 166 */ 167 168 void _CPU_ISR_install_raw_handler( 169 unsigned32 vector, 170 proc_ptr new_handler, 171 proc_ptr *old_handler 172 ) 173 { 174 unsigned32 real_vector; 175 CPU_Trap_table_entry *tbr; 176 CPU_Trap_table_entry *slot; 177 unsigned32 u32_tbr; 178 unsigned32 u32_handler; 179 180 /* 181 * Get the "real" trap number for this vector ignoring the synchronous 182 * versus asynchronous indicator included with our vector numbers. 183 */ 184 185 real_vector = SPARC_REAL_TRAP_NUMBER( vector ); 186 187 /* 188 * Get the current base address of the trap table and calculate a pointer 189 * to the slot we are interested in. 190 */ 191 192 sparc_get_tbr( u32_tbr ); 193 194 u32_tbr &= 0xfffff000; 195 196 tbr = (CPU_Trap_table_entry *) u32_tbr; 197 198 slot = &tbr[ real_vector ]; 199 200 /* 201 * Get the address of the old_handler from the trap table. 202 * 203 * NOTE: The old_handler returned will be bogus if it does not follow 204 * the RTEMS model. 205 */ 206 207 #define HIGH_BITS_MASK 0xFFFFFC00 208 #define HIGH_BITS_SHIFT 10 209 #define LOW_BITS_MASK 0x000003FF 210 211 if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) { 212 u32_handler = 213 ((slot->sethi_of_handler_to_l4 & HIGH_BITS_MASK) << HIGH_BITS_SHIFT) | 214 (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK); 215 *old_handler = (proc_ptr) u32_handler; 216 } else 217 *old_handler = 0; 218 219 /* 220 * Copy the template to the slot and then fix it. 221 */ 222 223 *slot = _CPU_Trap_slot_template; 224 225 u32_handler = (unsigned32) new_handler; 226 227 slot->mov_vector_l3 |= vector; 228 slot->sethi_of_handler_to_l4 |= 229 (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; 230 slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); 231 } 232 233 /*PAGE 234 * 235 * _CPU_ISR_install_vector 68 236 * 69 237 * This kernel routine installs the RTEMS handler for the … … 71 239 * 72 240 * Input parameters: 73 * vector - interrupt vector number74 * old_handler - formerISR for this vector number75 * new_handler - replacementISR for this vector number76 * 77 * Output parameters: NONE78 * 79 * /80 241 * vector - interrupt vector number 242 * new_handler - replacement ISR for this vector number 243 * old_handler - pointer to former ISR for this vector number 244 * 245 * Output parameters: 246 * *old_handler - former ISR for this vector number 247 * 248 */ 81 249 82 250 void _CPU_ISR_install_vector( … … 86 254 ) 87 255 { 88 *old_handler = _ISR_Vector_table[ vector ]; 256 unsigned32 real_vector; 257 proc_ptr ignored; 258 259 /* 260 * Get the "real" trap number for this vector ignoring the synchronous 261 * versus asynchronous indicator included with our vector numbers. 262 */ 263 264 real_vector = SPARC_REAL_TRAP_NUMBER( vector ); 89 265 90 266 /* 91 * If the interrupt vector table is a table of pointer to isr entry 92 * points, then we need to install the appropriate RTEMS interrupt 93 * handler for this vector number. 267 * Return the previous ISR handler. 94 268 */ 269 270 *old_handler = _ISR_Vector_table[ real_vector ]; 271 272 /* 273 * Install the wrapper so this ISR can be invoked properly. 274 */ 275 276 _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); 95 277 96 278 /* … … 99 281 */ 100 282 101 _ISR_Vector_table[ vector ] = new_handler; 102 } 103 104 /*PAGE 105 * 106 * _CPU_Install_interrupt_stack 107 */ 108 109 void _CPU_Install_interrupt_stack( void ) 110 { 283 _ISR_Vector_table[ real_vector ] = new_handler; 111 284 } 112 285 … … 114 287 * 115 288 * _CPU_Context_Initialize 116 */ 117 118 /* 119 * The following constants assist in building a thread's initial context. 120 */ 121 122 #define CPU_FRAME_SIZE (112) /* based on disassembled test code */ 123 #define ADDR_ADJ_OFFSET -8 289 * 290 * This kernel routine initializes the basic non-FP context area associated 291 * with each thread. 292 * 293 * Input parameters: 294 * the_context - pointer to the context area 295 * stack_base - address of memory for the SPARC 296 * size - size in bytes of the stack area 297 * new_level - interrupt level for this context area 298 * entry_point - the starting execution point for this this context 299 * is_fp - TRUE if this context is associated with an FP thread 300 * 301 * Output parameters: NONE 302 */ 124 303 125 304 void _CPU_Context_Initialize( 126 Context_Control *_the_context, 127 unsigned32 *_stack_base, 128 unsigned32 _size, 129 unsigned32 _new_level, 130 void *_entry_point 305 Context_Control *the_context, 306 unsigned32 *stack_base, 307 unsigned32 size, 308 unsigned32 new_level, 309 void *entry_point, 310 boolean is_fp 131 311 ) 132 312 { 133 unsigned32 jmp_addr; 134 unsigned32 _stack_high; /* highest "stack aligned" address */ 135 unsigned32 _the_size; 313 unsigned32 stack_high; /* highest "stack aligned" address */ 314 unsigned32 the_size; 136 315 unsigned32 tmp_psr; 137 138 jmp_addr = (unsigned32) _entry_point;139 316 140 317 /* 141 318 * On CPUs with stacks which grow down (i.e. SPARC), we build the stack 142 * based on the _stack_high address.319 * based on the stack_high address. 143 320 */ 144 321 145 _stack_high = ((unsigned32)(_stack_base) + _size); 146 _stack_high &= ~(CPU_STACK_ALIGNMENT - 1); 147 148 _the_size = _size & ~(CPU_STACK_ALIGNMENT - 1); 149 150 /* XXX following code is based on unix port */ 322 stack_high = ((unsigned32)(stack_base) + size); 323 stack_high &= ~(CPU_STACK_ALIGNMENT - 1); 324 325 the_size = size & ~(CPU_STACK_ALIGNMENT - 1); 326 151 327 /* 152 * XXX SPARC port needs a diagram like this one... 153 * See /usr/include/sys/stack.h in Solaris 2.3 for a nice 154 * diagram of the stack. 328 * See the README in this directory for a diagram of the stack. 155 329 */ 156 330 157 _the_context->o7 = jmp_addr + ADDR_ADJ_OFFSET; 158 _the_context->o6 = (unsigned32)(_stack_high - CPU_FRAME_SIZE); 159 _the_context->i6 = (unsigned32)(_stack_high); 160 #if 0 161 _the_context->rp = jmp_addr + ADDR_ADJ_OFFSET; 162 _the_context->sp = (unsigned32)(_stack_high - CPU_FRAME_SIZE); 163 _the_context->fp = (unsigned32)(_stack_high); 331 the_context->o7 = ((unsigned32) entry_point) - 8; 332 the_context->o6_sp = stack_high - CPU_MINIMUM_STACK_FRAME_SIZE; 333 the_context->i6_fp = stack_high; 334 335 /* 336 * Build the PSR for the task. Most everything can be 0 and the 337 * CWP is corrected during the context switch. 338 * 339 * The EF bit determines if the floating point unit is available. 340 * The FPU is ONLY enabled if the context is associated with an FP task 341 * and this SPARC model has an FPU. 342 */ 343 344 sparc_get_psr( tmp_psr ); 345 tmp_psr &= ~SPARC_PSR_PIL_MASK; 346 tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK; 347 tmp_psr &= ~SPARC_PSR_EF_MASK; /* disabled by default */ 348 349 #if (SPARC_HAS_FPU == 1) 350 /* 351 * If this bit is not set, then a task gets a fault when it accesses 352 * a floating point register. This is a nice way to detect floating 353 * point tasks which are not currently declared as such. 354 */ 355 356 if ( is_fp ) 357 tmp_psr |= SPARC_PSR_EF_MASK; 164 358 #endif 165 166 _the_context->wim = 0x01; 167 168 sparc_get_psr( tmp_psr ); 169 tmp_psr &= ~SPARC_PIL_MASK; 170 tmp_psr |= (((_new_level) << 8) & SPARC_PIL_MASK); 171 tmp_psr = (tmp_psr & ~0x07) | 0x07; /* XXX should use num windows */ 172 _the_context->psr = tmp_psr; 359 the_context->psr = tmp_psr; 173 360 } 174 361 … … 177 364 * _CPU_Internal_threads_Idle_thread_body 178 365 * 179 * NOTES: 180 * 181 * 1. This is the same as the regular CPU independent algorithm. 182 * 183 * 2. If you implement this using a "halt", "idle", or "shutdown" 184 * instruction, then don't forget to put it in an infinite loop. 185 * 186 * 3. Be warned. Some processors with onboard DMA have been known 187 * to stop the DMA if the CPU were put in IDLE mode. This might 188 * also be a problem with other on-chip peripherals. So use this 189 * hook with caution. 190 */ 366 * Some SPARC implementations have low power, sleep, or idle modes. This 367 * tries to take advantage of those models. 368 */ 369 370 #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE) 371 372 /* 373 * This is the implementation for the erc32. 374 * 375 * NOTE: Low power mode was enabled at initialization time. 376 */ 377 378 #if defined(erc32) 191 379 192 380 void _CPU_Internal_threads_Idle_thread_body( void ) 193 381 { 194 195 for( ; ; ) 196 /* insert your "halt" instruction here */ ; 197 } 382 while (1) { 383 ERC32_MEC.Power_Down = 0; /* value is irrelevant */ 384 } 385 } 386 387 #endif 388 389 #endif /* CPU_PROVIDES_IDLE_THREAD_BODY */ -
c/src/exec/score/cpu/sparc/cpu.h
rea74482 r9700578 1 1 /* cpu.h 2 2 * 3 * This include file contains information pertaining to the XXX4 * processor.3 * This include file contains information pertaining to the port of 4 * the executive to the SPARC processor. 5 5 * 6 6 * $Id$ … … 26 26 * If TRUE, then they are inlined. 27 27 * If FALSE, then a subroutine call is made. 28 *29 * Basically this is an example of the classic trade-off of size30 * versus speed. Inlining the call (TRUE) typically increases the31 * size of the executive while speeding up the enabling of dispatching.32 * [NOTE: In general, the _Thread_Dispatch_disable_level will33 * only be 0 or 1 unless you are in an interrupt handler and that34 * interrupt handler invokes the executive.] When not inlined35 * something calls _Thread_Enable_dispatch which in turns calls36 * _Thread_Dispatch. If the enable dispatch is inlined, then37 * one subroutine call is avoided entirely.]38 28 */ 39 29 … … 49 39 * If FALSE, then the loops are not unrolled. 50 40 * 51 * The primary factor in making this decision is the cost of disabling 52 * and enabling interrupts (_ISR_Flash) versus the cost of rest of the 53 * body of the loop. On some CPUs, the flash is more expensive than 54 * one iteration of the loop body. In this case, it might be desirable 55 * to unroll the loop. It is important to note that on some CPUs, this 56 * code is the longest interrupt disable period in the executive. So it is 57 * necessary to strike a balance when setting this parameter. 41 * This parameter could go either way on the SPARC. The interrupt flash 42 * code is relatively lengthy given the requirements for nops following 43 * writes to the psr. But if the clock speed were high enough, this would 44 * not represent a great deal of time. 58 45 */ 59 46 … … 66 53 * If FALSE, nothing is done. 67 54 * 68 * If the CPU supports a dedicated interrupt stack in hardware, 69 * then it is generally the responsibility of the BSP to allocate it 70 * and set it up. 71 * 72 * If the CPU does not support a dedicated interrupt stack, then 73 * the porter has two options: (1) execute interrupts on the stack of 74 * the interrupted task, and (2) have the executive manage a dedicated 75 * interrupt stack. 76 * 77 * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. 78 * 79 * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and 80 * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is 81 * possible that both are FALSE for a particular CPU. Although it 82 * is unclear what that would imply about the interrupt processing 83 * procedure on that CPU. 84 */ 85 86 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE /* XXX */ 55 * The SPARC does not have a dedicated HW interrupt stack and one has 56 * been implemented in SW. 57 */ 58 59 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE 87 60 88 61 /* … … 92 65 * If FALSE, then no installation is performed. 93 66 * 94 * If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE. 95 * 96 * Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and 97 * CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE. It is 98 * possible that both are FALSE for a particular CPU. Although it 99 * is unclear what that would imply about the interrupt processing 100 * procedure on that CPU. 101 */ 102 103 #define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE /* XXX */ 67 * The SPARC does not have a dedicated HW interrupt stack. 68 */ 69 70 #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE 104 71 105 72 /* … … 108 75 * If TRUE, then the memory is allocated during initialization. 109 76 * If FALSE, then the memory is allocated during initialization. 110 *111 * This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE112 * or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.113 77 */ 114 78 … … 120 84 * If TRUE, then the FLOATING_POINT task attribute is supported. 121 85 * If FALSE, then the FLOATING_POINT task attribute is ignored. 122 *123 * If there is a FP coprocessor such as the i387 or mc68881, then124 * the answer is TRUE.125 *126 * The macro name "SPARC_HAS_FPU" should be made CPU specific.127 * It indicates whether or not this CPU model has FP support. For128 * example, it would be possible to have an i386_nofp CPU model129 * which set this to false to indicate that you have an i386 without130 * an i387 and wish to leave floating point support out.131 86 */ 132 87 … … 142 97 * If TRUE, then the FLOATING_POINT task attribute is assumed. 143 98 * If FALSE, then the FLOATING_POINT task attribute is followed. 144 *145 * So far, the only CPU in which this option has been used is the146 * HP PA-RISC. The HP C compiler and gcc both implicitly use the147 * floating point registers to perform integer multiplies. If148 * a function which you would not think utilize the FP unit DOES,149 * then one can not easily predict which tasks will use the FP hardware.150 * In this case, this option should be TRUE.151 *152 * If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.153 99 */ 154 100 … … 161 107 * and it has a floating point context which is switched in and out. 162 108 * If FALSE, then the IDLE task does not have a floating point context. 163 *164 * Setting this to TRUE negatively impacts the time required to preempt165 * the IDLE task from an interrupt because the floating point context166 * must be saved as part of the preemption.167 109 */ 168 110 … … 182 124 * task is restored. The state of the floating point registers between 183 125 * those two operations is not specified. 184 *185 * If the floating point context does NOT have to be saved as part of186 * interrupt dispatching, then it should be safe to set this to TRUE.187 *188 * Setting this flag to TRUE results in using a different algorithm189 * for deciding when to save and restore the floating point context.190 * The deferred FP switch algorithm minimizes the number of times191 * the FP context is saved and restored. The FP context is not saved192 * until a context switch is made to another, different FP task.193 * Thus in a system with only one FP task, the FP context will never194 * be saved or restored.195 126 */ 196 127 … … 206 137 * If FALSE, then use the generic IDLE thread body if the BSP does 207 138 * not provide one. 208 * 209 * This is intended to allow for supporting processors which have 210 * a low power or idle mode. When the IDLE thread is executed, then 211 * the CPU can be powered down. 212 * 213 * The order of precedence for selecting the IDLE thread body is: 214 * 215 * 1. BSP provided 216 * 2. CPU dependent (if provided) 217 * 3. generic (if no BSP and no CPU dependent) 218 */ 219 139 */ 140 141 #if (SPARC_HAS_LOW_POWER_MODE == 1) 142 #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE 143 #else 220 144 #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE 145 #endif 221 146 222 147 /* … … 226 151 * If TRUE, then the grows upward. 227 152 * If FALSE, then the grows toward smaller addresses. 153 * 154 * The stack grows to lower addresses on the SPARC. 228 155 */ 229 156 … … 237 164 * much of the critical data area as possible in a cache line. 238 165 * 239 * The placement of this macro in the declaration of the variables 240 * is based on the syntactically requirements of the GNU C 241 * "__attribute__" extension. For example with GNU C, use 242 * the following to force a structures to a 32 byte boundary. 243 * 244 * __attribute__ ((aligned (32))) 245 * 246 * NOTE: Currently only the Priority Bit Map table uses this feature. 247 * To benefit from using this, the data must be heavily 248 * used so it will stay in the cache and used frequently enough 249 * in the executive to justify turning this on. 166 * The SPARC does not appear to have particularly strict alignment 167 * requirements. This value was chosen to take advantages of caches. 250 168 */ 251 169 … … 256 174 * interrupt field of the task mode. How those bits map to the 257 175 * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level(). 176 * 177 * The SPARC has 16 interrupt levels in the PIL field of the PSR. 258 178 */ 259 179 … … 261 181 262 182 /* 263 * Processor defined structures 264 * 265 * Examples structures include the descriptor tables from the i386 266 * and the processor control structure on the i960ca. 267 */ 268 269 /* XXX may need to put some structures here. */ 183 * This structure represents the organization of the minimum stack frame 184 * for the SPARC. More framing information is required in certain situaions 185 * such as when there are a large number of out parameters or when the callee 186 * must save floating point registers. 187 */ 188 189 #ifndef ASM 190 191 typedef struct { 192 unsigned32 l0; 193 unsigned32 l1; 194 unsigned32 l2; 195 unsigned32 l3; 196 unsigned32 l4; 197 unsigned32 l5; 198 unsigned32 l6; 199 unsigned32 l7; 200 unsigned32 i0; 201 unsigned32 i1; 202 unsigned32 i2; 203 unsigned32 i3; 204 unsigned32 i4; 205 unsigned32 i5; 206 unsigned32 i6_fp; 207 unsigned32 i7; 208 void *structure_return_address; 209 /* 210 * The following are for the callee to save the register arguments in 211 * should this be necessary. 212 */ 213 unsigned32 saved_arg0; 214 unsigned32 saved_arg1; 215 unsigned32 saved_arg2; 216 unsigned32 saved_arg3; 217 unsigned32 saved_arg4; 218 unsigned32 saved_arg5; 219 unsigned32 pad0; 220 } CPU_Minimum_stack_frame; 221 222 #endif /* ASM */ 223 224 #define CPU_STACK_FRAME_L0_OFFSET 0x00 225 #define CPU_STACK_FRAME_L1_OFFSET 0x04 226 #define CPU_STACK_FRAME_L2_OFFSET 0x08 227 #define CPU_STACK_FRAME_L3_OFFSET 0x0c 228 #define CPU_STACK_FRAME_L4_OFFSET 0x10 229 #define CPU_STACK_FRAME_L5_OFFSET 0x14 230 #define CPU_STACK_FRAME_L6_OFFSET 0x18 231 #define CPU_STACK_FRAME_L7_OFFSET 0x1c 232 #define CPU_STACK_FRAME_I0_OFFSET 0x20 233 #define CPU_STACK_FRAME_I1_OFFSET 0x24 234 #define CPU_STACK_FRAME_I2_OFFSET 0x28 235 #define CPU_STACK_FRAME_I3_OFFSET 0x2c 236 #define CPU_STACK_FRAME_I4_OFFSET 0x30 237 #define CPU_STACK_FRAME_I5_OFFSET 0x34 238 #define CPU_STACK_FRAME_I6_FP_OFFSET 0x38 239 #define CPU_STACK_FRAME_I7_OFFSET 0x3c 240 #define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET 0x40 241 #define CPU_STACK_FRAME_SAVED_ARG0_OFFSET 0x44 242 #define CPU_STACK_FRAME_SAVED_ARG1_OFFSET 0x48 243 #define CPU_STACK_FRAME_SAVED_ARG2_OFFSET 0x4c 244 #define CPU_STACK_FRAME_SAVED_ARG3_OFFSET 0x50 245 #define CPU_STACK_FRAME_SAVED_ARG4_OFFSET 0x54 246 #define CPU_STACK_FRAME_SAVED_ARG5_OFFSET 0x58 247 #define CPU_STACK_FRAME_PAD0_OFFSET 0x5c 248 249 #define CPU_MINIMUM_STACK_FRAME_SIZE 0x60 270 250 271 251 /* … … 281 261 * 3. special interrupt level context :: Context_Control_interrupt 282 262 * 283 * On some processors, it is cost-effective to save only the callee 284 * preserved registers during a task context switch. This means 285 * that the ISR code needs to save those registers which do not 286 * persist across function calls. It is not mandatory to make this 287 * distinctions between the caller/callee saves registers for the 288 * purpose of minimizing context saved during task switch and on interrupts. 289 * If the cost of saving extra registers is minimal, simplicity is the 290 * choice. Save the same context on interrupt entry as for tasks in 291 * this case. 292 * 293 * Additionally, if gdb is to be made aware of tasks for this CPU, then 294 * care should be used in designing the context area. 295 * 296 * On some CPUs with hardware floating point support, the Context_Control_fp 297 * structure will not be used or it simply consist of an array of a 298 * fixed number of bytes. This is done when the floating point context 299 * is dumped by a "FP save context" type instruction and the format 300 * is not really defined by the CPU. In this case, there is no need 301 * to figure out the exact format -- only the size. Of course, although 302 * this is enough information for context switches, it is probably not 303 * enough for a debugger such as gdb. But that is another problem. 263 * On the SPARC, we are relatively conservative in that we save most 264 * of the CPU state in the context area. The ET (enable trap) bit and 265 * the CWP (current window pointer) fields of the PSR are considered 266 * system wide resources and are not maintained on a per-thread basis. 304 267 */ 305 268 306 269 #ifndef ASM 307 270 308 /* XXX */309 271 typedef struct { 310 unsigned32 g0; 311 unsigned32 g1; 272 /* 273 * Using a double g0_g1 will put everything in this structure on a 274 * double word boundary which allows us to use double word loads 275 * and stores safely in the context switch. 276 */ 277 double g0_g1; 312 278 unsigned32 g2; 313 279 unsigned32 g3; … … 332 298 unsigned32 i4; 333 299 unsigned32 i5; 334 unsigned32 i6 ;300 unsigned32 i6_fp; 335 301 unsigned32 i7; 336 302 … … 341 307 unsigned32 o4; 342 308 unsigned32 o5; 343 unsigned32 o6 ;309 unsigned32 o6_sp; 344 310 unsigned32 o7; 345 311 346 unsigned32 wim;347 312 unsigned32 psr; 348 313 } Context_Control; … … 378 343 #define I4_OFFSET 0x50 379 344 #define I5_OFFSET 0x54 380 #define I6_ OFFSET0x58345 #define I6_FP_OFFSET 0x58 381 346 #define I7_OFFSET 0x5C 382 347 … … 387 352 #define O4_OFFSET 0x70 388 353 #define O5_OFFSET 0x74 389 #define O6_ OFFSET0x78354 #define O6_SP_OFFSET 0x78 390 355 #define O7_OFFSET 0x7C 391 356 392 #define WIM_OFFSET 0x80 393 #define PSR_OFFSET 0x84 357 #define PSR_OFFSET 0x80 358 359 #define CONTEXT_CONTROL_SIZE 0x84 360 361 /* 362 * The floating point context area. 363 */ 394 364 395 365 #ifndef ASM 396 366 397 /* XXX */398 367 typedef struct { 399 368 double f0_f1; … … 440 409 #define FSR_OFFSET 0x80 441 410 411 #define CONTEXT_CONTROL_FP_SIZE 0x84 412 442 413 #ifndef ASM 443 414 415 /* 416 * Context saved on stack for an interrupt. 417 * 418 * NOTE: The PSR, PC, and NPC are only saved in this structure for the 419 * benefit of the user's handler. 420 */ 421 444 422 typedef struct { 445 unsigned32 special_interrupt_register_XXX; 423 CPU_Minimum_stack_frame Stack_frame; 424 unsigned32 psr; 425 unsigned32 pc; 426 unsigned32 npc; 427 unsigned32 g1; 428 unsigned32 g2; 429 unsigned32 g3; 430 unsigned32 g4; 431 unsigned32 g5; 432 unsigned32 g6; 433 unsigned32 g7; 434 unsigned32 i0; 435 unsigned32 i1; 436 unsigned32 i2; 437 unsigned32 i3; 438 unsigned32 i4; 439 unsigned32 i5; 440 unsigned32 i6_fp; 441 unsigned32 i7; 442 unsigned32 y; 443 unsigned32 pad0_offset; 446 444 } CPU_Interrupt_frame; 447 445 … … 452 450 */ 453 451 452 #define ISF_STACK_FRAME_OFFSET 0x00 453 #define ISF_PSR_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x00 454 #define ISF_PC_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x04 455 #define ISF_NPC_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x08 456 #define ISF_G1_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c 457 #define ISF_G2_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x10 458 #define ISF_G3_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x14 459 #define ISF_G4_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x18 460 #define ISF_G5_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c 461 #define ISF_G6_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x20 462 #define ISF_G7_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x24 463 #define ISF_I0_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x28 464 #define ISF_I1_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c 465 #define ISF_I2_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x30 466 #define ISF_I3_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x34 467 #define ISF_I4_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x38 468 #define ISF_I5_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c 469 #define ISF_I6_FP_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x40 470 #define ISF_I7_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x44 471 #define ISF_Y_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x48 472 #define ISF_PAD0_OFFSET CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c 473 474 #define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50 454 475 #ifndef ASM 455 476 456 477 /* 457 478 * The following table contains the information required to configure 458 * the XXXprocessor specific parameters.479 * the processor specific parameters. 459 480 * 460 481 * NOTE: The interrupt_stack_size field is required if … … 473 494 unsigned32 interrupt_stack_size; 474 495 unsigned32 extra_system_initialization_stack; 475 unsigned32 some_other_cpu_dependent_info_XXX;476 496 } rtems_cpu_table; 477 497 478 498 /* 479 * This variable is optional. It is used on CPUs on which it is difficult 480 * to generate an "uninitialized" FP context. It is filled in by 481 * _CPU_Initialize and copied into the task's FP context area during 482 * _CPU_Context_Initialize. 499 * This variable is contains the initialize context for the FP unit. 500 * It is filled in by _CPU_Initialize and copied into the task's FP 501 * context area during _CPU_Context_Initialize. 483 502 */ 484 503 … … 486 505 487 506 /* 488 * On some CPUs, software managed interrupt stack is supported.489 507 * This stack is allocated by the Interrupt Manager and the switch 490 508 * is performed in _ISR_Handler. These variables contain pointers … … 492 510 * for the interrupt stack. Since it is unknown whether the stack 493 511 * grows up or down (in general), this give the CPU dependent 494 * code the option of picking the version it wants to use. 495 * 496 * NOTE: These two variables are required if the macro 497 * CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE. 498 */ 499 500 EXTERN void *_CPU_Interrupt_stack_low; 501 EXTERN void *_CPU_Interrupt_stack_high; 502 503 /* 504 * With some compilation systems, it is difficult if not impossible to 505 * call a high-level language routine from assembly language. This 506 * is especially true of commercial Ada compilers and name mangling 507 * C++ ones. This variable can be optionally defined by the CPU porter 508 * and contains the address of the routine _Thread_Dispatch. This 509 * can make it easier to invoke that routine at the end of the interrupt 510 * sequence (if a dispatch is necessary). 511 */ 512 513 EXTERN void (*_CPU_Thread_dispatch_pointer)(); 514 515 /* 516 * Nothing prevents the porter from declaring more CPU specific variables. 517 */ 518 519 /* XXX: if needed, put more variables here */ 520 521 /* 522 * The size of the floating point context area. On some CPUs this 523 * will not be a "sizeof" because the format of the floating point 524 * area is not defined -- only the size is. This is usually on 525 * CPUs with a "floating point save context" instruction. 512 * code the option of picking the version it wants to use. Thus 513 * both must be present if either is. 514 * 515 * The SPARC supports a software based interrupt stack and these 516 * are required. 517 */ 518 519 EXTERN void *_CPU_Interrupt_stack_low; 520 EXTERN void *_CPU_Interrupt_stack_high; 521 522 #if defined(erc32) 523 524 /* 525 * ERC32 Specific Variables 526 */ 527 528 EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror; 529 530 #endif 531 532 /* 533 * The following type defines an entry in the SPARC's trap table. 534 * 535 * NOTE: The instructions chosen are RTEMS dependent although one is 536 * obligated to use two of the four instructions to perform a 537 * long jump. The other instructions load one register with the 538 * trap type (a.k.a. vector) and another with the psr. 539 */ 540 541 typedef struct { 542 unsigned32 mov_psr_l0; /* mov %psr, %l0 */ 543 unsigned32 sethi_of_handler_to_l4; /* sethi %hi(_handler), %l4 */ 544 unsigned32 jmp_to_low_of_handler_plus_l4; /* jmp %l4 + %lo(_handler) */ 545 unsigned32 mov_vector_l3; /* mov _vector, %l3 */ 546 } CPU_Trap_table_entry; 547 548 /* 549 * This is the set of opcodes for the instructions loaded into a trap 550 * table entry. The routine which installs a handler is responsible 551 * for filling in the fields for the _handler address and the _vector 552 * trap type. 553 * 554 * The constants following this structure are masks for the fields which 555 * must be filled in when the handler is installed. 556 */ 557 558 extern const CPU_Trap_table_entry _CPU_Trap_slot_template; 559 560 /* 561 * This is the executive's trap table which is installed into the TBR 562 * register. 563 * 564 * NOTE: Unfortunately, this must be aligned on a 4096 byte boundary. 565 * The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not 566 * align an entity to anything greater than a 512 byte boundary. 567 * 568 * Because of this, we pull a little bit of a trick. We allocate 569 * enough memory so we can grab an address on a 4096 byte boundary 570 * from this area. 571 */ 572 573 #define SPARC_TRAP_TABLE_ALIGNMENT 4096 574 575 EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ] 576 __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT))); 577 578 579 /* 580 * The size of the floating point context area. 526 581 */ 527 582 528 583 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp ) 584 585 #endif 529 586 530 587 /* … … 539 596 * This defines the number of entries in the ISR_Vector_table managed 540 597 * by the executive. 541 */ 542 543 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 255 598 * 599 * On the SPARC, there are really only 256 vectors. However, the executive 600 * has no easy, fast, reliable way to determine which traps are synchronous 601 * and which are asynchronous. By default, synchronous traps return to the 602 * instruction which caused the interrupt. So if you install a software 603 * trap handler as an executive interrupt handler (which is desirable since 604 * RTEMS takes care of window and register issues), then the executive needs 605 * to know that the return address is to the trap rather than the instruction 606 * following the trap. 607 * 608 * So vectors 0 through 255 are treated as regular asynchronous traps which 609 * provide the "correct" return address. Vectors 256 through 512 are assumed 610 * by the executive to be synchronous and to require that the return address 611 * be fudged. 612 * 613 * If you use this mechanism to install a trap handler which must reexecute 614 * the instruction which caused the trap, then it should be installed as 615 * an asynchronous trap. This will avoid the executive changing the return 616 * address. 617 */ 618 619 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 256 620 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511 621 622 #define SPARC_SYNCHRONOUS_TRAP_BIT_MASK 0x100 623 #define SPARC_ASYNCHRONOUS_TRAP( _trap ) (_trap) 624 #define SPARC_SYNCHRONOUS_TRAP( _trap ) ((_trap) + 256 ) 625 626 #define SPARC_REAL_TRAP_NUMBER( _trap ) ((_trap) % 256) 544 627 545 628 /* 546 629 * Should be large enough to run all tests. This insures 547 630 * that a "reasonable" small application should not have any problems. 548 */ 549 550 #define CPU_STACK_MINIMUM_SIZE (1024*2) 631 * 632 * This appears to be a fairly generous number for the SPARC since 633 * represents a call depth of about 20 routines based on the minimum 634 * stack frame. 635 */ 636 637 #define CPU_STACK_MINIMUM_SIZE (1024*2) 551 638 552 639 /* 553 640 * CPU's worst alignment requirement for data types on a byte boundary. This 554 641 * alignment does not take into account the requirements for the stack. 555 */ 556 557 #define CPU_ALIGNMENT 8 642 * 643 * On the SPARC, this is required for double word loads and stores. 644 */ 645 646 #define CPU_ALIGNMENT 8 558 647 559 648 /* … … 592 681 * 593 682 * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT. 683 * 684 * The alignment restrictions for the SPARC are not that strict but this 685 * should unsure that the stack is always sufficiently alignment that the 686 * window overflow, underflow, and flush routines can use double word loads 687 * and stores. 594 688 */ 595 689 596 690 #define CPU_STACK_ALIGNMENT 16 597 598 #endif /* ASM */599 691 600 692 #ifndef ASM … … 632 724 * Map interrupt level in task mode onto the hardware that the CPU 633 725 * actually provides. Currently, interrupt levels which do not 634 * map onto the CPU in a generic fashion are undefined. Someday, 635 * it would be nice if these were "mapped" by the application 636 * via a callout. For example, m68k has 8 levels 0 - 7, levels 637 * 8 - 255 would be available for bsp/application specific meaning. 638 * This could be used to manage a programmable interrupt controller 639 * via the rtems_task_mode directive. 726 * map onto the CPU in a straight fashion are undefined. 640 727 */ 641 728 … … 660 747 * - initializing the floating point context 661 748 * 662 * This routine generally does not set any unnecessary register663 * in the context. The state of the "general data" registers is664 * undefined at task start time.665 *666 749 * NOTE: Implemented as a subroutine for the SPARC port. 667 750 */ 668 751 669 752 void _CPU_Context_Initialize( 670 Context_Control *_the_context, 671 unsigned32 *_stack_base, 672 unsigned32 _size, 673 unsigned32 _new_level, 674 void *_entry_point 753 Context_Control *the_context, 754 unsigned32 *stack_base, 755 unsigned32 size, 756 unsigned32 new_level, 757 void *entry_point, 758 boolean is_fp 675 759 ); 676 760 677 761 /* 678 762 * This routine is responsible for somehow restarting the currently 679 * executing task. If you are lucky, then all that is necessary 680 * is restoring the context. Otherwise, there will need to be 681 * a special assembly routine which does something special in this 682 * case. Context_Restore should work most of the time. It will 683 * not work if restarting self conflicts with the stack frame 684 * assumptions of restoring a context. 763 * executing task. 764 * 765 * On the SPARC, this is is relatively painless but requires a small 766 * amount of wrapper code before using the regular restore code in 767 * of the context switch. 685 768 */ 686 769 … … 689 772 690 773 /* 691 * The purpose of this macro is to allow the initial pointer into 692 * a floating point context area (used to save the floating point 693 * context) to be at an arbitrary place in the floating point 694 * context area. 695 * 696 * This is necessary because some FP units are designed to have 697 * their context saved as a stack which grows into lower addresses. 698 * Other FP units can be saved by simply moving registers into offsets 699 * from the base of the context area. Finally some FP units provide 700 * a "dump context" instruction which could fill in from high to low 701 * or low to high based on the whim of the CPU designers. 774 * The FP context area for the SPARC is a simple structure and nothing 775 * special is required to find the "starting load point" 702 776 */ 703 777 … … 707 781 /* 708 782 * This routine initializes the FP context area passed to it to. 709 * There are a few standard ways in which to initialize the 710 * floating point context. The code included for this macro assumes 711 * that this is a CPU in which a "initial" FP context was saved into 712 * _CPU_Null_fp_context and it simply copies it to the destination 713 * context passed to it. 714 * 715 * Other models include (1) not doing anything, and (2) putting 716 * a "null FP status word" in the correct place in the FP context. 783 * 784 * The SPARC allows us to use the simple initialization model 785 * in which an "initial" FP context was saved into _CPU_Null_fp_context 786 * at CPU initialization and it is simply copied into the destination 787 * context. 717 788 */ 718 789 719 790 #define _CPU_Context_Initialize_fp( _destination ) \ 720 { \791 do { \ 721 792 *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \ 722 } 793 } while (0) 723 794 724 795 /* end of Context handler macros */ … … 733 804 734 805 #define _CPU_Fatal_halt( _error ) \ 735 { \ 736 } 806 do { \ 807 unsigned32 level; \ 808 \ 809 sparc_disable_interrupts( level ); \ 810 asm volatile ( "mov %0, %%g1 " : "=r" (level) : "0" (level) ); \ 811 while (1); /* loop forever */ \ 812 } while (0) 737 813 738 814 /* end of Fatal Error manager macros */ … … 741 817 742 818 /* 743 * This routine sets _output to the bit number of the first bit 744 * set in _value. _value is of CPU dependent type Priority_Bit_map_control. 745 * This type may be either 16 or 32 bits wide although only the 16 746 * least significant bits will be used. 747 * 748 * There are a number of variables in using a "find first bit" type 749 * instruction. 750 * 751 * (1) What happens when run on a value of zero? 752 * (2) Bits may be numbered from MSB to LSB or vice-versa. 753 * (3) The numbering may be zero or one based. 754 * (4) The "find first bit" instruction may search from MSB or LSB. 755 * 756 * The executive guarantees that (1) will never happen so it is not a concern. 757 * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and 758 * _CPU_Priority_Bits_index(). These three form a set of routines 759 * which must logically operate together. Bits in the _value are 760 * set and cleared based on masks built by _CPU_Priority_mask(). 761 * The basic major and minor values calculated by _Priority_Major() 762 * and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index() 763 * to properly range between the values returned by the "find first bit" 764 * instruction. This makes it possible for _Priority_Get_highest() to 765 * calculate the major and directly index into the minor table. 766 * This mapping is necessary to ensure that 0 (a high priority major/minor) 767 * is the first bit found. 768 * 769 * This entire "find first bit" and mapping process depends heavily 770 * on the manner in which a priority is broken into a major and minor 771 * components with the major being the 4 MSB of a priority and minor 772 * the 4 LSB. Thus (0 << 4) + 0 corresponds to priority 0 -- the highest 773 * priority. And (15 << 4) + 14 corresponds to priority 254 -- the next 774 * to the lowest priority. 775 * 776 * If your CPU does not have a "find first bit" instruction, then 777 * there are ways to make do without it. Here are a handful of ways 778 * to implement this in software: 779 * 780 * - a series of 16 bit test instructions 781 * - a "binary search using if's" 782 * - _number = 0 783 * if _value > 0x00ff 784 * _value >>=8 785 * _number = 8; 786 * 787 * if _value > 0x0000f 788 * _value >=8 789 * _number += 4 790 * 791 * _number += bit_set_table[ _value ] 792 * 793 * where bit_set_table[ 16 ] has values which indicate the first 794 * bit set 795 */ 796 797 #ifndef INIT 798 extern const unsigned char __log2table[256]; 819 * The SPARC port uses the generic C algorithm for bitfield scan if the 820 * CPU model does not have a scan instruction. 821 */ 822 823 #if ( SPARC_HAS_BITSCAN == 0 ) 824 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE 825 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE 799 826 #else 800 const unsigned char __log2table[256] = { 801 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 802 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 803 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 804 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 805 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 806 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 807 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 808 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 809 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 810 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 811 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 812 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 813 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 814 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 815 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 816 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 817 }; 827 #error "scan instruction not currently supported by RTEMS!!" 818 828 #endif 819 829 820 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \821 { \822 register __value = (_value); \823 \824 if ( !(__value & 0xff00) ) \825 (_output) = __log2table[ __value ]; \826 else \827 (_output) = __log2table[ __value >> 8 ] + 8; \828 }829 830 831 830 /* end of Bitfield handler macros */ 832 831 833 /* 834 * This routine builds the mask which corresponds to the bit fields 835 * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion 836 * for that routine. 837 */ 838 839 #define _CPU_Priority_Mask( _bit_number ) \ 840 ( 0x8000 >> (_bit_number) ) 841 842 /* 843 * This routine translates the bit numbers returned by 844 * _CPU_Bitfield_Find_first_bit() into something suitable for use as 845 * a major or minor component of a priority. See the discussion 846 * for that routine. 847 */ 848 849 #define _CPU_Priority_Bits_index( _priority ) \ 850 (15 - (_priority)) 832 /* Priority handler handler macros */ 833 834 /* 835 * The SPARC port uses the generic C algorithm for bitfield scan if the 836 * CPU model does not have a scan instruction. 837 */ 838 839 #if ( SPARC_HAS_BITSCAN == 1 ) 840 #error "scan instruction not currently supported by RTEMS!!" 841 #endif 851 842 852 843 /* end of Priority handler macros */ … … 863 854 rtems_cpu_table *cpu_table, 864 855 void (*thread_dispatch) 856 ); 857 858 /* 859 * _CPU_ISR_install_raw_handler 860 * 861 * This routine installs new_handler to be directly called from the trap 862 * table. 863 */ 864 865 void _CPU_ISR_install_raw_handler( 866 unsigned32 vector, 867 proc_ptr new_handler, 868 proc_ptr *old_handler 865 869 ); 866 870 … … 877 881 ); 878 882 879 /* 880 * _CPU_Install_interrupt_stack 881 * 882 * This routine installs the hardware interrupt stack pointer. 883 * 884 * NOTE: It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK 885 * is TRUE. 886 */ 887 888 void _CPU_Install_interrupt_stack( void ); 889 883 #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE) 884 890 885 /* 891 886 * _CPU_Internal_threads_Idle_thread_body 892 887 * 893 * This routine is the CPU dependent IDLE thread body. 894 * 895 * NOTE: It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY 896 * is TRUE. 897 */ 898 888 * Some SPARC implementations have low power, sleep, or idle modes. This 889 * tries to take advantage of those models. 890 */ 891 899 892 void _CPU_Internal_threads_Idle_thread_body( void ); 893 894 #endif /* CPU_PROVIDES_IDLE_THREAD_BODY */ 900 895 901 896 /* … … 914 909 * 915 910 * This routine is generallu used only to restart self in an 916 * efficient manner. It may simply be a label in _CPU_Context_switch. 917 * 918 * NOTE: May be unnecessary to reload some registers. 911 * efficient manner. 919 912 */ 920 913 … … 943 936 ); 944 937 945 /* The following routine swaps the endian format of an unsigned int. 938 /* 939 * CPU_swap_u32 940 * 941 * The following routine swaps the endian format of an unsigned int. 946 942 * It must be static because it is referenced indirectly. 947 943 * 948 * This version will work on any processor, but if there is a better 949 * way for your CPU PLEASE use it. The most common way to do this is to: 944 * This version will work on any processor, but if you come across a better 945 * way for the SPARC PLEASE use it. The most common way to swap a 32-bit 946 * entity as shown below is not any more efficient on the SPARC. 950 947 * 951 948 * swap least significant two bytes with 16-bit rotate … … 953 950 * swap most significant two bytes with 16-bit rotate 954 951 * 955 * Some CPUs have special instructions which swap a 32-bit quantity in 956 * a single instruction (e.g. i486). It is probably best to avoid 957 * an "endian swapping control bit" in the CPU. One good reason is 958 * that interrupts would probably have to be disabled to insure that 959 * an interrupt does not try to access the same "chunk" with the wrong 960 * endian. Another good reason is that on some CPUs, the endian bit 961 * endianness for ALL fetches -- both code and data -- so the code 962 * will be fetched incorrectly. 952 * It is not obvious how the SPARC can do significantly better than the 953 * generic code. gcc 2.7.0 only generates about 12 instructions for the 954 * following code at optimization level four (i.e. -O4). 963 955 */ 964 956 -
c/src/exec/score/cpu/sparc/cpu_asm.s
rea74482 r9700578 3 3 * This file contains the basic algorithms for all assembly code used 4 4 * in an specific CPU port of RTEMS. These algorithms must be implemented 5 * in assembly language 5 * in assembly language. 6 6 * 7 7 * $Id$ … … 11 11 #include <rtems/score/cpu.h> 12 12 13 #if (SPARC_HAS_FPU == 1) 14 13 15 /* 14 * _CPU_Context_save_fp 16 * void _CPU_Context_save_fp( 17 * void **fp_context_ptr 18 * ) 15 19 * 16 20 * This routine is responsible for saving the FP context … … 18 22 * from is changed then the pointer is modified by this routine. 19 23 * 20 * Sometimes a macro implementation of this is in cpu.h which dereferences 21 * the ** and a similarly named routine in this file is passed something 22 * like a (Context_Control_fp *). The general rule on making this decision 23 * is to avoid writing assembly language. 24 * 25 * void _CPU_Context_save_fp( 26 * void **fp_context_ptr 27 * ) 28 * { 29 * } 24 * NOTE: See the README in this directory for information on the 25 * management of the "EF" bit in the PSR. 30 26 */ 31 27 … … 33 29 PUBLIC(_CPU_Context_save_fp) 34 30 SYM(_CPU_Context_save_fp): 35 save %sp,-104,%sp 36 ld [%i0],%l0 37 std %f0,[%l0+FO_F1_OFFSET] 38 std %f2,[%l0+F2_F3_OFFSET] 39 std %f4,[%l0+F4_F5_OFFSET] 40 std %f6,[%l0+F6_F7_OFFSET] 41 std %f8,[%l0+F8_F9_OFFSET] 42 std %f10,[%l0+F1O_F11_OFFSET] 43 std %f12,[%l0+F12_F13_OFFSET] 44 std %f14,[%l0+F14_F15_OFFSET] 45 std %f16,[%l0+F16_F17_OFFSET] 46 std %f18,[%l0+F18_F19_OFFSET] 47 std %f20,[%l0+F2O_F21_OFFSET] 48 std %f22,[%l0+F22_F23_OFFSET] 49 std %f24,[%l0+F24_F25_OFFSET] 50 std %f26,[%l0+F26_F27_OFFSET] 51 std %f28,[%l0+F28_F29_OFFSET] 52 std %f30,[%l0+F3O_F31_OFFSET] 53 st %fsr,[%l0+FSR_OFFSET] 31 save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp 32 33 /* 34 * The following enables the floating point unit. 35 */ 36 37 mov %psr, %l0 38 sethi %hi(SPARC_PSR_EF_MASK), %l1 39 or %l1, %lo(SPARC_PSR_EF_MASK), %l1 40 or %l0, %l1, %l0 41 mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** 42 43 ld [%i0], %l0 44 std %f0, [%l0 + FO_F1_OFFSET] 45 std %f2, [%l0 + F2_F3_OFFSET] 46 std %f4, [%l0 + F4_F5_OFFSET] 47 std %f6, [%l0 + F6_F7_OFFSET] 48 std %f8, [%l0 + F8_F9_OFFSET] 49 std %f10, [%l0 + F1O_F11_OFFSET] 50 std %f12, [%l0 + F12_F13_OFFSET] 51 std %f14, [%l0 + F14_F15_OFFSET] 52 std %f16, [%l0 + F16_F17_OFFSET] 53 std %f18, [%l0 + F18_F19_OFFSET] 54 std %f20, [%l0 + F2O_F21_OFFSET] 55 std %f22, [%l0 + F22_F23_OFFSET] 56 std %f24, [%l0 + F24_F25_OFFSET] 57 std %f26, [%l0 + F26_F27_OFFSET] 58 std %f28, [%l0 + F28_F29_OFFSET] 59 std %f30, [%l0 + F3O_F31_OFFSET] 60 st %fsr, [%l0 + FSR_OFFSET] 54 61 ret 55 62 restore 56 63 57 64 /* 58 * _CPU_Context_restore_fp 65 * void _CPU_Context_restore_fp( 66 * void **fp_context_ptr 67 * ) 59 68 * 60 69 * This routine is responsible for restoring the FP context … … 62 71 * from is changed then the pointer is modified by this routine. 63 72 * 64 * Sometimes a macro implementation of this is in cpu.h which dereferences 65 * the ** and a similarly named routine in this file is passed something 66 * like a (Context_Control_fp *). The general rule on making this decision 67 * is to avoid writing assembly language. 68 * 69 * void _CPU_Context_restore_fp( 70 * void **fp_context_ptr 71 * ) 72 * { 73 * } 73 * NOTE: See the README in this directory for information on the 74 * management of the "EF" bit in the PSR. 74 75 */ 75 76 … … 77 78 PUBLIC(_CPU_Context_restore_fp) 78 79 SYM(_CPU_Context_restore_fp): 79 save %sp,-104,%sp 80 ld [%o0],%l0 81 ldd [%l0+FO_F1_OFFSET],%f0 82 ldd [%l0+F2_F3_OFFSET],%f2 83 ldd [%l0+F4_F5_OFFSET],%f4 84 ldd [%l0+F6_F7_OFFSET],%f6 85 ldd [%l0+F8_F9_OFFSET],%f8 86 ldd [%l0+F1O_F11_OFFSET],%f10 87 ldd [%l0+F12_F13_OFFSET],%f12 88 ldd [%l0+F14_F15_OFFSET],%f14 89 ldd [%l0+F16_F17_OFFSET],%f16 90 ldd [%l0+F18_F19_OFFSET],%f18 91 ldd [%l0+F2O_F21_OFFSET],%f20 92 ldd [%l0+F22_F23_OFFSET],%f22 93 ldd [%l0+F24_F25_OFFSET],%f24 94 ldd [%l0+F26_F27_OFFSET],%f26 95 ldd [%l0+F28_F29_OFFSET],%f28 96 ldd [%l0+F3O_F31_OFFSET],%f30 97 ld [%l0+FSR_OFFSET],%fsr 80 save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp 81 82 /* 83 * The following enables the floating point unit. 84 */ 85 86 mov %psr, %l0 87 sethi %hi(SPARC_PSR_EF_MASK), %l1 88 or %l1, %lo(SPARC_PSR_EF_MASK), %l1 89 or %l0, %l1, %l0 90 mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** 91 92 ld [%i0], %l0 93 ldd [%l0 + FO_F1_OFFSET], %f0 94 ldd [%l0 + F2_F3_OFFSET], %f2 95 ldd [%l0 + F4_F5_OFFSET], %f4 96 ldd [%l0 + F6_F7_OFFSET], %f6 97 ldd [%l0 + F8_F9_OFFSET], %f8 98 ldd [%l0 + F1O_F11_OFFSET], %f10 99 ldd [%l0 + F12_F13_OFFSET], %f12 100 ldd [%l0 + F14_F15_OFFSET], %f14 101 ldd [%l0 + F16_F17_OFFSET], %f16 102 ldd [%l0 + F18_F19_OFFSET], %f18 103 ldd [%l0 + F2O_F21_OFFSET], %f20 104 ldd [%l0 + F22_F23_OFFSET], %f22 105 ldd [%l0 + F24_F25_OFFSET], %f24 106 ldd [%l0 + F26_F27_OFFSET], %f26 107 ldd [%l0 + F28_F29_OFFSET], %f28 108 ldd [%l0 + F3O_F31_OFFSET], %f30 109 ld [%l0 + FSR_OFFSET], %fsr 98 110 ret 99 111 restore 100 112 101 /* _CPU_Context_switch 102 * 103 * This routine performs a normal non-FP context switch. 104 * 113 #endif /* SPARC_HAS_FPU */ 114 115 /* 105 116 * void _CPU_Context_switch( 106 117 * Context_Control *run, 107 118 * Context_Control *heir 108 119 * ) 109 * {110 * }120 * 121 * This routine performs a normal non-FP context switch. 111 122 */ 112 113 /* from gcc-2.7.0/config/sparc/sparc.h on register usage */114 115 /* 1 for registers that have pervasive standard uses116 and are not available for the register allocator.117 g0 is used for the condition code and not to represent %g0, which is118 hardwired to 0, so reg 0 is *not* fixed.119 On non-v9 systems:120 g1 is free to use as temporary.121 g2-g4 are reserved for applications. Gcc normally uses them as122 temporaries, but this can be disabled via the -mno-app-regs option.123 g5 through g7 are reserved for the operating system.124 On v9 systems:125 g1 and g5 are free to use as temporaries.126 g2-g4 are reserved for applications (the compiler will not normally use127 them, but they can be used as temporaries with -mapp-regs).128 g6-g7 are reserved for the operating system.129 ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must130 currently be a fixed register until this pattern is rewritten.131 Register 1 is also used when restoring call-preserved registers in large132 stack frames. */133 134 123 135 124 .align 4 136 125 PUBLIC(_CPU_Context_switch) 137 126 SYM(_CPU_Context_switch): 138 ta 0x03 /* flush registers */ 139 140 /* skip g0 */ 141 st %g1,[%o0+G1_OFFSET] /* globals */ 142 st %g2,[%o0+G2_OFFSET] 143 st %g3,[%o0+G3_OFFSET] 144 st %g4,[%o0+G4_OFFSET] 145 st %g5,[%o0+G5_OFFSET] 146 st %g6,[%o0+G6_OFFSET] 147 st %g7,[%o0+G7_OFFSET] 148 149 st %l0,[%o0+L0_OFFSET] 150 st %l1,[%o0+L1_OFFSET] 151 st %l2,[%o0+L2_OFFSET] 152 st %l3,[%o0+L3_OFFSET] 153 st %l4,[%o0+L4_OFFSET] 154 st %l5,[%o0+L5_OFFSET] 155 st %l6,[%o0+L6_OFFSET] 156 st %l7,[%o0+L7_OFFSET] 157 158 st %i0,[%o0+I0_OFFSET] 159 st %i1,[%o0+I1_OFFSET] 160 st %i2,[%o0+I2_OFFSET] 161 st %i3,[%o0+I3_OFFSET] 162 st %i4,[%o0+I4_OFFSET] 163 st %i5,[%o0+I5_OFFSET] 164 st %i6,[%o0+I6_OFFSET] 165 st %i7,[%o0+I7_OFFSET] 166 167 st %o0,[%o0+O0_OFFSET] 168 st %o1,[%o0+O1_OFFSET] 169 st %o2,[%o0+O2_OFFSET] 170 st %o3,[%o0+O3_OFFSET] 171 st %o4,[%o0+O4_OFFSET] 172 st %o5,[%o0+O5_OFFSET] 173 st %o6,[%o0+O6_OFFSET] 174 st %o7,[%o0+O7_OFFSET] 175 176 rd %psr,%o2 177 st %o2,[%o0+PSR_OFFSET] /* save status register */ 178 179 /* enter here with o1 = context to restore */ 180 /* o2 = psr */ 181 restore: 182 183 ld [%o1+PSR_OFFSET],%o0 184 and %o2,31,%o2 /* g1 = cwp */ 185 and %o0,-32,%o0 /* o0 = psr w/o cwp */ 186 or %o0,%o2,%o2 /* o2 = new psr */ 187 wr %o2,0,%psr /* restore status register */ 188 189 /* skip g0 */ 190 ld [%o1+G1_OFFSET],%g1 191 ld [%o1+G2_OFFSET],%g2 192 ld [%o1+G3_OFFSET],%g3 193 ld [%o1+G4_OFFSET],%g4 194 ld [%o1+G5_OFFSET],%g5 195 ld [%o1+G6_OFFSET],%g6 196 ld [%o1+G7_OFFSET],%g7 197 198 ld [%o1+L0_OFFSET],%l0 199 ld [%o1+L1_OFFSET],%l1 200 ld [%o1+L2_OFFSET],%l2 201 ld [%o1+L3_OFFSET],%l3 202 ld [%o1+L4_OFFSET],%l4 203 ld [%o1+L5_OFFSET],%l5 204 ld [%o1+L6_OFFSET],%l6 205 ld [%o1+L7_OFFSET],%l7 206 207 ld [%o1+I0_OFFSET],%i0 208 ld [%o1+I1_OFFSET],%i1 209 ld [%o1+I2_OFFSET],%i2 210 ld [%o1+I3_OFFSET],%i3 211 ld [%o1+I4_OFFSET],%i4 212 ld [%o1+I5_OFFSET],%i5 213 ld [%o1+I6_OFFSET],%i6 214 ld [%o1+I7_OFFSET],%i7 215 216 ld [%o1+O0_OFFSET],%o0 217 /* do o1 last to avoid destroying heir context pointer */ 218 ld [%o1+O2_OFFSET],%o2 219 ld [%o1+O3_OFFSET],%o3 220 ld [%o1+O4_OFFSET],%o4 221 ld [%o1+O5_OFFSET],%o5 222 ld [%o1+O6_OFFSET],%o6 223 ld [%o1+O7_OFFSET],%o7 224 225 ld [%o1+O1_OFFSET],%o1 /* overwrite heir pointer */ 226 227 jmp %o7 + 8 /* return */ 228 nop /* delay slot */ 229 127 ! skip g0 128 st %g1, [%o0 + G1_OFFSET] ! save the global registers 129 std %g2, [%o0 + G2_OFFSET] 130 std %g4, [%o0 + G4_OFFSET] 131 std %g6, [%o0 + G6_OFFSET] 132 133 std %l0, [%o0 + L0_OFFSET] ! save the local registers 134 std %l2, [%o0 + L2_OFFSET] 135 std %l4, [%o0 + L4_OFFSET] 136 std %l6, [%o0 + L6_OFFSET] 137 138 std %i0, [%o0 + I0_OFFSET] ! save the input registers 139 std %i2, [%o0 + I2_OFFSET] 140 std %i4, [%o0 + I4_OFFSET] 141 std %i6, [%o0 + I6_FP_OFFSET] 142 143 std %o0, [%o0 + O0_OFFSET] ! save the output registers 144 std %o2, [%o0 + O2_OFFSET] 145 std %o4, [%o0 + O4_OFFSET] 146 std %o6, [%o0 + O6_SP_OFFSET] 147 148 rd %psr, %o2 149 st %o2, [%o0 + PSR_OFFSET] ! save status register 150 151 /* 152 * This is entered from _CPU_Context_restore with: 153 * o1 = context to restore 154 * o2 = psr 155 */ 156 157 PUBLIC(_CPU_Context_restore_heir) 158 SYM(_CPU_Context_restore_heir): 159 /* 160 * Flush all windows with valid contents except the current one. 161 * In examining the set register windows, one may logically divide 162 * the windows into sets (some of which may be empty) based on their 163 * current status: 164 * 165 * + current (i.e. in use), 166 * + used (i.e. a restore would not trap) 167 * + invalid (i.e. 1 in corresponding bit in WIM) 168 * + unused 169 * 170 * Either the used or unused set of windows may be empty. 171 * 172 * NOTE: We assume only one bit is set in the WIM at a time. 173 * 174 * Given a CWP of 5 and a WIM of 0x1, the registers are divided 175 * into sets as follows: 176 * 177 * + 0 - invalid 178 * + 1-4 - unused 179 * + 5 - current 180 * + 6-7 - used 181 * 182 * In this case, we only would save the used windows -- 6 and 7. 183 * 184 * Traps are disabled for the same logical period as in a 185 * flush all windows trap handler. 186 * 187 * Register Usage while saving the windows: 188 * g1 = current PSR 189 * g2 = current wim 190 * g3 = CWP 191 * g4 = wim scratch 192 * g5 = scratch 193 */ 194 195 ld [%o1 + PSR_OFFSET], %g1 ! g1 = saved psr 196 197 and %o2, SPARC_PSR_CWP_MASK, %g3 ! g3 = CWP 198 ! g1 = psr w/o cwp 199 andn %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1 200 or %g1, %g3, %g1 ! g1 = heirs psr 201 mov %g1, %psr ! restore status register and 202 ! **** DISABLE TRAPS **** 203 mov %wim, %g2 ! g2 = wim 204 mov 1, %g4 205 sll %g4, %g3, %g4 ! g4 = WIM mask for CW invalid 206 207 save_frame_loop: 208 sll %g4, 1, %g5 ! rotate the "wim" left 1 209 srl %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4 210 or %g4, %g5, %g4 ! g4 = wim if we do one restore 211 212 /* 213 * If a restore would not underflow, then continue. 214 */ 215 216 andcc %g4, %g2, %g0 ! Any windows to flush? 217 bnz done_flushing ! No, then continue 218 nop 219 220 restore ! back one window 221 222 /* 223 * Now save the window just as if we overflowed to it. 224 */ 225 226 std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] 227 std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] 228 std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] 229 std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] 230 231 std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] 232 std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] 233 std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] 234 std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] 235 236 ba save_frame_loop 237 nop 238 239 done_flushing: 240 241 add %g3, 1, %g3 ! calculate desired WIM 242 and %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3 243 mov 1, %g4 244 sll %g4, %g3, %g4 ! g4 = new WIM 245 mov %g4, %wim 246 247 or %g1, SPARC_PSR_ET_MASK, %g1 248 mov %g1, %psr ! **** ENABLE TRAPS **** 249 ! and restore CWP 250 nop 251 nop 252 nop 253 254 ! skip g0 255 ld [%o1 + G1_OFFSET], %g1 ! restore the global registers 256 ldd [%o1 + G2_OFFSET], %g2 257 ldd [%o1 + G4_OFFSET], %g4 258 ldd [%o1 + G6_OFFSET], %g6 259 260 ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers 261 ldd [%o1 + L2_OFFSET], %l2 262 ldd [%o1 + L4_OFFSET], %l4 263 ldd [%o1 + L6_OFFSET], %l6 264 265 ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers 266 ldd [%o1 + I2_OFFSET], %i2 267 ldd [%o1 + I4_OFFSET], %i4 268 ldd [%o1 + I6_FP_OFFSET], %i6 269 270 ldd [%o1 + O2_OFFSET], %o2 ! restore the output registers 271 ldd [%o1 + O4_OFFSET], %o4 272 ldd [%o1 + O6_SP_OFFSET], %o6 273 ! do o0/o1 last to avoid destroying heir context pointer 274 ldd [%o1 + O0_OFFSET], %o0 ! overwrite heir pointer 275 276 jmp %o7 + 8 ! return 277 nop ! delay slot 230 278 231 279 /* 232 * _CPU_Context_restore233 *234 * This routine is generallu used only to restart self in an235 * efficient manner. It may simply be a label in _CPU_Context_switch.236 *237 * NOTE: May be unnecessary to reload some registers.238 *239 280 * void _CPU_Context_restore( 240 281 * Context_Control *new_context 241 282 * ) 242 * { 243 * } 283 * 284 * This routine is generally used only to perform restart self. 285 * 286 * NOTE: It is unnecessary to reload some registers. 244 287 */ 245 288 … … 247 290 PUBLIC(_CPU_Context_restore) 248 291 SYM(_CPU_Context_restore): 249 save %sp, - 104, %sp /* save a stack frame */250 ta 0x03 /* flush registers */251 rd %psr,%o2252 ba restore253 mov %i0,%o1 /* in the delay slot */ 254 255 /* void _ISR_Handler()292 save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp 293 rd %psr, %o2 294 ba SYM(_CPU_Context_restore_heir) 295 mov %i0, %o1 ! in the delay slot 296 297 /* 298 * void _ISR_Handler() 256 299 * 257 300 * This routine provides the RTEMS interrupt management. 258 301 * 259 * void _ISR_Handler() 260 * { 261 * } 302 * We enter this handler from the 4 instructions in the trap table with 303 * the following registers assumed to be set as shown: 304 * 305 * l0 = PSR 306 * l1 = PC 307 * l2 = nPC 308 * l3 = trap type 309 * 310 * NOTE: By an executive defined convention, trap type is between 0 and 255 if 311 * it is an asynchonous trap and 256 and 511 if it is synchronous. 262 312 */ 263 313 … … 265 315 PUBLIC(_ISR_Handler) 266 316 SYM(_ISR_Handler): 267 ret 268 269 /* 270 * This discussion ignores a lot of the ugly details in a real 271 * implementation such as saving enough registers/state to be 272 * able to do something real. Keep in mind that the goal is 273 * to invoke a user's ISR handler which is written in C and 274 * uses a certain set of registers. 275 * 276 * Also note that the exact order is to a large extent flexible. 277 * Hardware will dictate a sequence for a certain subset of 278 * _ISR_Handler while requirements for setting 279 */ 280 281 /* 282 * At entry to "common" _ISR_Handler, the vector number must be 283 * available. On some CPUs the hardware puts either the vector 284 * number or the offset into the vector table for this ISR in a 285 * known place. If the hardware does not give us this information, 286 * then the assembly portion of RTEMS for this port will contain 287 * a set of distinct interrupt entry points which somehow place 288 * the vector number in a known place (which is safe if another 289 * interrupt nests this one) and branches to _ISR_Handler. 290 * 291 * save some or all context on stack 292 * may need to save some special interrupt information for exit 293 * 294 * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) 295 * if ( _ISR_Nest_level == 0 ) 296 * switch to software interrupt stack 297 * #endif 298 * 299 * _ISR_Nest_level++; 300 * 301 * _Thread_Dispatch_disable_level++; 302 * 303 * (*_ISR_Vector_table[ vector ])( vector ); 304 * 305 * --_ISR_Nest_level; 306 * 307 * if ( _ISR_Nest_level ) 308 * goto the label "exit interrupt (simple case)" 309 * 310 * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) 311 * restore stack 312 * #endif 313 * 314 * if ( !_Context_Switch_necessary ) 315 * goto the label "exit interrupt (simple case)" 316 * 317 * if ( !_ISR_Signals_to_thread_executing ) 318 * goto the label "exit interrupt (simple case)" 319 * 320 * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch 321 * 322 * prepare to get out of interrupt 323 * return from interrupt (maybe to _ISR_Dispatch) 324 * 325 * LABEL "exit interrupt (simple case): 326 * prepare to get out of interrupt 327 * return from interrupt 328 */ 317 /* 318 * Fix the return address for synchronous traps. 319 */ 320 321 andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 322 ! Is this a synchronous trap? 323 be,a win_ovflow ! No, then skip the adjustment 324 nop ! DELAY 325 mov %l2, %l1 ! do not return to the instruction 326 add %l2, 4, %l2 ! indicated 327 328 win_ovflow: 329 /* 330 * Save the globals this block uses. 331 * 332 * These registers are not restored from the locals. Their contents 333 * are saved directly from the locals into the ISF below. 334 */ 335 336 mov %g4, %l4 ! save the globals this block uses 337 mov %g5, %l5 338 339 /* 340 * When at a "window overflow" trap, (wim == (1 << cwp)). 341 * If we get here like that, then process a window overflow. 342 */ 343 344 rd %wim, %g4 345 srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP 346 ! are LS 5 bits ; how convenient :) 347 cmp %g5, 1 ! Is this an invalid window? 348 bne dont_do_the_window ! No, then skip all this stuff 349 ! we are using the delay slot 350 351 /* 352 * The following is same as a 1 position right rotate of WIM 353 */ 354 355 srl %g4, 1, %g5 ! g5 = WIM >> 1 356 sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 357 ! g4 = WIM << (Number Windows - 1) 358 or %g4, %g5, %g4 ! g4 = (WIM >> 1) | 359 ! (WIM << (Number Windows - 1)) 360 361 /* 362 * At this point: 363 * 364 * g4 = the new WIM 365 * g5 is free 366 */ 367 368 /* 369 * Since we are tinkering with the register windows, we need to 370 * make sure that all the required information is in global registers. 371 */ 372 373 save ! Save into the window 374 wr %g4, 0, %wim ! WIM = new WIM 375 nop ! delay slots 376 nop 377 nop 378 379 /* 380 * Now save the window just as if we overflowed to it. 381 */ 382 383 std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] 384 std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] 385 std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] 386 std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] 387 388 std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] 389 std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] 390 std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] 391 std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] 392 393 restore 394 nop 395 396 dont_do_the_window: 397 /* 398 * Global registers %g4 and %g5 are saved directly from %l4 and 399 * %l5 directly into the ISF below. 400 */ 401 402 save_isf: 403 404 /* 405 * Save the state of the interrupted task -- especially the global 406 * registers -- in the Interrupt Stack Frame. Note that the ISF 407 * includes a regular minimum stack frame which will be used if 408 * needed by register window overflow and underflow handlers. 409 * 410 * REGISTERS SAME AS AT _ISR_Handler 411 */ 412 413 sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp 414 ! make space for ISF 415 416 std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC 417 st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC 418 st %g1, [%sp + ISF_G1_OFFSET] ! save g1 419 std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 420 std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above 421 std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7 422 423 std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 424 std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 425 std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 426 std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 427 428 rd %y, %g1 429 st %g1, [%sp + ISF_Y_OFFSET] ! save y 430 431 mov %sp, %o1 ! 2nd arg to ISR Handler 432 433 /* 434 * Increment ISR nest level and Thread dispatch disable level. 435 * 436 * Register usage for this section: 437 * 438 * l4 = _Thread_Dispatch_disable_level pointer 439 * l5 = _ISR_Nest_level pointer 440 * l6 = _Thread_Dispatch_disable_level value 441 * l7 = _ISR_Nest_level value 442 * 443 * NOTE: It is assumed that l4 - l7 will be preserved until the ISR 444 * nest and thread dispatch disable levels are unnested. 445 */ 446 447 sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 448 ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 449 sethi %hi(SYM(_ISR_Nest_level)), %l5 450 ld [%l5 + %lo(SYM(_ISR_Nest_level))], %l7 451 452 add %l6, 1, %l6 453 st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] 454 455 add %l7, 1, %l7 456 st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] 457 458 /* 459 * If ISR nest level was zero (now 1), then switch stack. 460 */ 461 462 mov %sp, %fp 463 subcc %l7, 1, %l7 ! outermost interrupt handler? 464 bnz dont_switch_stacks ! No, then do not switch stacks 465 466 sethi %hi(SYM(_CPU_Interrupt_stack_high)), %g4 467 ld [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp 468 469 dont_switch_stacks: 470 /* 471 * Make sure we have a place on the stack for the window overflow 472 * trap handler to write into. At this point it is safe to 473 * enable traps again. 474 */ 475 476 sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp 477 478 wr %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** 479 480 /* 481 * Vector to user's handler. 482 * 483 * NOTE: TBR may no longer have vector number in it since 484 * we just enabled traps. It is definitely in l3. 485 */ 486 487 sethi %hi(SYM(_ISR_Vector_table)), %g4 488 or %g4, %lo(SYM(_ISR_Vector_table)), %g4 489 and %l3, 0xFF, %g5 ! remove synchronous trap indicator 490 sll %g5, 2, %g5 ! g5 = offset into table 491 ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] 492 493 494 ! o1 = 2nd arg = address of the ISF 495 ! WAS LOADED WHEN ISF WAS SAVED!!! 496 mov %l3, %o0 ! o0 = 1st arg = vector number 497 call %g4, 0 498 nop ! delay slot 499 500 /* 501 * Redisable traps so we can finish up the interrupt processing. 502 * This is a VERY conservative place to do this. 503 * 504 * NOTE: %l0 has the PSR which was in place when we took the trap. 505 */ 506 507 mov %l0, %psr ! **** DISABLE TRAPS **** 508 509 /* 510 * Decrement ISR nest level and Thread dispatch disable level. 511 * 512 * Register usage for this section: 513 * 514 * l4 = _Thread_Dispatch_disable_level pointer 515 * l5 = _ISR_Nest_level pointer 516 * l6 = _Thread_Dispatch_disable_level value 517 * l7 = _ISR_Nest_level value 518 */ 519 520 sub %l6, 1, %l6 521 st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] 522 523 st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] 524 525 /* 526 * If dispatching is disabled (includes nested interrupt case), 527 * then do a "simple" exit. 528 */ 529 530 orcc %l6, %g0, %g0 ! Is dispatching disabled? 531 bnz simple_return ! Yes, then do a "simple" exit 532 nop ! delay slot 533 534 /* 535 * If a context switch is necessary, then do fudge stack to 536 * return to the interrupt dispatcher. 537 */ 538 539 sethi %hi(SYM(_Context_Switch_necessary)), %l4 540 ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5 541 542 orcc %l5, %g0, %g0 ! Is thread switch necessary? 543 bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher 544 nop ! delay slot 545 546 /* 547 * Finally, check to see if signals were sent to the currently 548 * executing task. If so, we need to invoke the interrupt dispatcher. 549 */ 550 551 sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6 552 ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7 553 554 orcc %l7, %g0, %g0 ! Were signals sent to the currently 555 ! executing thread? 556 bz simple_return ! yes, then invoke the dispatcher 557 nop ! delay slot 558 559 /* 560 * Invoke interrupt dispatcher. 561 */ 562 563 PUBLIC(_ISR_Dispatch) 564 SYM(_ISR_Dispatch): 565 566 /* 567 * The following subtract should get us back on the interrupted 568 * tasks stack and add enough room to invoke the dispatcher. 569 * When we enable traps, we are mostly back in the context 570 * of the task and subsequent interrupts can operate normally. 571 */ 572 573 sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp 574 575 or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1 576 mov %l7, %psr ! **** ENABLE TRAPS **** 577 nop 578 nop 579 nop 580 581 call SYM(_Thread_Dispatch), 0 582 nop 583 584 /* 585 * The CWP in place at this point may be different from 586 * that which was in effect at the beginning of the ISR if we 587 * have been context switched between the beginning of this invocation 588 * of _ISR_Handler and this point. Thus the CWP and WIM should 589 * not be changed back to their values at ISR entry time. Any 590 * changes to the PSR must preserve the CWP. 591 */ 592 593 simple_return: 594 ld [%fp + ISF_Y_OFFSET], %l5 ! restore y 595 wr %l5, 0, %y 596 597 ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC 598 ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC 599 rd %psr, %l3 600 and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP 601 andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task 602 or %l3, %l0, %l0 ! install it later... 603 andn %l0, SPARC_PSR_ET_MASK, %l0 604 605 /* 606 * Restore tasks global and out registers 607 */ 608 609 mov %fp, %g1 610 611 ! g1 is restored later 612 ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 613 ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 614 ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7 615 616 ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 617 ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 618 ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 619 ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 620 621 /* 622 * Registers: 623 * 624 * ALL global registers EXCEPT G1 and the input registers have 625 * already been restored and thuse off limits. 626 * 627 * The following is the contents of the local registers: 628 * 629 * l0 = original psr 630 * l1 = return address (i.e. PC) 631 * l2 = nPC 632 * l3 = CWP 633 */ 634 635 /* 636 * if (CWP + 1) is an invalid window then we need to reload it. 637 * 638 * WARNING: Traps should now be disabled 639 */ 640 641 mov %l0, %psr ! **** DISABLE TRAPS **** 642 nop 643 nop 644 nop 645 rd %wim, %l4 646 add %l0, 1, %l6 ! l6 = cwp + 1 647 and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it 648 srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count 649 ! and CWP are conveniently LS 5 bits 650 cmp %l5, 1 ! Is tasks window invalid? 651 bne good_task_window 652 653 /* 654 * The following code is the same as a 1 position left rotate of WIM. 655 */ 656 657 sll %l4, 1, %l5 ! l5 = WIM << 1 658 srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 659 ! l4 = WIM >> (Number Windows - 1) 660 or %l4, %l5, %l4 ! l4 = (WIM << 1) | 661 ! (WIM >> (Number Windows - 1)) 662 663 /* 664 * Now restore the window just as if we underflowed to it. 665 */ 666 667 wr %l4, 0, %wim ! WIM = new WIM 668 restore ! now into the tasks window 669 670 ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 671 ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 672 ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 673 ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 674 ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 675 ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 676 ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 677 ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 678 ! reload of sp clobbers ISF 679 save ! Back to ISR dispatch window 680 681 good_task_window: 682 683 mov %l0, %psr ! **** DISABLE TRAPS **** 684 ! and restore condition codes. 685 ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 686 jmp %l1 ! transfer control and 687 rett %l2 ! go back to tasks window 688 689 /* end of file */ -
c/src/exec/score/cpu/sparc/rtems.s
rea74482 r9700578 23 23 * 24 24 * void RTEMS() 25 * {26 * }27 25 */ 28 26 … … 30 28 PUBLIC(RTEMS) 31 29 SYM(RTEMS): 32 ret 30 /* 31 * g2 was chosen because gcc uses it as a scratch register in 32 * similar code scenarios and the other locals, ins, and outs 33 * are off limits to this routine unless it does a "save" and 34 * copies its in registers to the outs which only works up until 35 * 6 parameters. Best to take the simple approach in this case. 36 */ 37 sethi SYM(_Entry_points), %g2 38 or %g2, %lo(SYM(_Entry_points)), %g2 39 sll %g1, 2, %g1 40 add %g1, %g2, %g2 41 jmp %g2 42 nop 33 43 -
c/src/exec/score/cpu/sparc/sparc.h
rea74482 r9700578 1 1 /* sparc.h 2 2 * 3 * This include file contains information pertaining to the Motorola4 * SPARCprocessor family.3 * This include file contains information pertaining to the SPARC 4 * processor family. 5 5 * 6 6 * $Id$ … … 38 38 /* 39 39 * This file contains the information required to build 40 * RTEMS for a particular member of the "sparc" 41 * family when executing in protected mode. It does 40 * RTEMS for a particular member of the "sparc" family. It does 42 41 * this by setting variables to indicate which implementation 43 42 * dependent features are present in a particular member … … 52 51 * + SPARC_HAS_BITSCAN 53 52 * 0 - does not have scan instructions 54 * 1 - has scan instruction (no supportimplemented)53 * 1 - has scan instruction (not currently implemented) 55 54 * 55 * + SPARC_NUMBER_OF_REGISTER_WINDOWS 56 * 8 is the most common number supported by SPARC implementations. 57 * SPARC_PSR_CWP_MASK is derived from this value. 58 * 59 * + SPARC_HAS_LOW_POWER_MODE 60 * 0 - does not have low power mode support (or not supported) 61 * 1 - has low power mode and thus a CPU model dependent idle task. 62 * 56 63 */ 57 64 58 65 #if defined(erc32) 59 66 60 #define CPU_MODEL_NAME "erc32" 61 #define SPARC_HAS_FPU 1 62 #define SPARC_HAS_BITSCAN 0 67 #define CPU_MODEL_NAME "erc32" 68 #define SPARC_HAS_FPU 1 69 #define SPARC_HAS_BITSCAN 0 70 #define SPARC_NUMBER_OF_REGISTER_WINDOWS 8 71 #define SPARC_HAS_LOW_POWER_MODE 1 63 72 64 73 #else … … 75 84 76 85 /* 86 * Miscellaneous constants 87 */ 88 89 /* 90 * PSR masks and starting bit positions 91 * 92 * NOTE: Reserved bits are ignored. 93 */ 94 95 #if (SPARC_NUMBER_OF_REGISTER_WINDOWS == 8) 96 #define SPARC_PSR_CWP_MASK 0x07 /* bits 0 - 4 */ 97 #elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 16) 98 #define SPARC_PSR_CWP_MASK 0x0F /* bits 0 - 4 */ 99 #elif (SPARC_NUMBER_OF_REGISTER_WINDOWS == 32) 100 #define SPARC_PSR_CWP_MASK 0x1F /* bits 0 - 4 */ 101 #else 102 #error "Unsupported number of register windows for this cpu" 103 #endif 104 105 #define SPARC_PSR_ET_MASK 0x00000020 /* bit 5 */ 106 #define SPARC_PSR_PS_MASK 0x00000040 /* bit 6 */ 107 #define SPARC_PSR_S_MASK 0x00000080 /* bit 7 */ 108 #define SPARC_PSR_PIL_MASK 0x00000F00 /* bits 8 - 11 */ 109 #define SPARC_PSR_EF_MASK 0x00001000 /* bit 12 */ 110 #define SPARC_PSR_EC_MASK 0x00002000 /* bit 13 */ 111 #define SPARC_PSR_ICC_MASK 0x00F00000 /* bits 20 - 23 */ 112 #define SPARC_PSR_VER_MASK 0x0F000000 /* bits 24 - 27 */ 113 #define SPARC_PSR_IMPL_MASK 0xF0000000 /* bits 28 - 31 */ 114 115 #define SPARC_PSR_CWP_BIT_POSITION 0 /* bits 0 - 4 */ 116 #define SPARC_PSR_ET_BIT_POSITION 5 /* bit 5 */ 117 #define SPARC_PSR_PS_BIT_POSITION 6 /* bit 6 */ 118 #define SPARC_PSR_S_BIT_POSITION 7 /* bit 7 */ 119 #define SPARC_PSR_PIL_BIT_POSITION 8 /* bits 8 - 11 */ 120 #define SPARC_PSR_EF_BIT_POSITION 12 /* bit 12 */ 121 #define SPARC_PSR_EC_BIT_POSITION 13 /* bit 13 */ 122 #define SPARC_PSR_ICC_BIT_POSITION 20 /* bits 20 - 23 */ 123 #define SPARC_PSR_VER_BIT_POSITION 24 /* bits 24 - 27 */ 124 #define SPARC_PSR_IMPL_BIT_POSITION 28 /* bits 28 - 31 */ 125 126 #ifndef ASM 127 128 /* 77 129 * Standard nop 78 130 */ … … 84 136 85 137 /* 86 * Some macros to aid in accessing special registers.138 * Get and set the PSR 87 139 */ 88 140 … … 95 147 #define sparc_set_psr( _psr ) \ 96 148 do { \ 97 asm volatile ( "wr %%g0,%0,%%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \ 98 nop(); nop(); nop(); \ 99 } while ( 0 ) 149 asm volatile ( "mov %0, %%psr " : "=r" ((_psr)) : "0" ((_psr)) ); \ 150 nop(); \ 151 nop(); \ 152 nop(); \ 153 } while ( 0 ) 154 155 /* 156 * Get and set the TBR 157 */ 100 158 101 159 #define sparc_get_tbr( _tbr ) \ 102 160 do { \ 161 (_tbr) = 0; /* to avoid unitialized warnings */ \ 103 162 asm volatile( "rd %%tbr, %0" : "=r" (_tbr) : "0" (_tbr) ); \ 104 163 } while ( 0 ) … … 106 165 #define sparc_set_tbr( _tbr ) \ 107 166 do { \ 108 } while ( 0 ) 167 asm volatile( "wr %0, 0, %%tbr" : "=r" (_tbr) : "0" (_tbr) ); \ 168 } while ( 0 ) 169 170 /* 171 * Get and set the WIM 172 */ 109 173 110 174 #define sparc_get_wim( _wim ) \ 111 175 do { \ 112 176 asm volatile( "rd %%wim, %0" : "=r" (_wim) : "0" (_wim) ); \ 113 177 } while ( 0 ) 114 178 115 179 #define sparc_set_wim( _wim ) \ 116 180 do { \ 181 asm volatile( "wr %0, %%wim" : "=r" (_wim) : "0" (_wim) ); \ 182 nop(); \ 183 nop(); \ 184 nop(); \ 185 } while ( 0 ) 186 187 /* 188 * Get and set the Y 189 */ 190 191 #define sparc_get_y( _y ) \ 192 do { \ 193 asm volatile( "rd %%y, %0" : "=r" (_y) : "0" (_y) ); \ 194 } while ( 0 ) 195 196 #define sparc_set_y( _y ) \ 197 do { \ 198 asm volatile( "wr %0, %%y" : "=r" (_y) : "0" (_y) ); \ 117 199 } while ( 0 ) 118 200 … … 122 204 */ 123 205 124 #define SPARC_PIL_MASK 0x00000F00125 126 206 #define sparc_disable_interrupts( _level ) \ 127 do { register unsigned int _mask = SPARC_PIL_MASK; \ 128 (_level) = 0; \ 129 \ 130 asm volatile ( "rd %%psr,%0 ; \ 131 wr %0,%1,%%psr " \ 132 : "=r" ((_level)), "=r" (_mask) \ 133 : "0" ((_level)), "1" (_mask) \ 134 ); \ 135 nop(); nop(); nop(); \ 207 do { \ 208 register unsigned int _newlevel; \ 209 \ 210 sparc_get_psr( _level ); \ 211 (_newlevel) = (_level) | SPARC_PSR_PIL_MASK; \ 212 sparc_set_psr( _newlevel ); \ 136 213 } while ( 0 ) 137 214 138 215 #define sparc_enable_interrupts( _level ) \ 139 do { unsigned int _tmp; \ 216 do { \ 217 unsigned int _tmp; \ 218 \ 140 219 sparc_get_psr( _tmp ); \ 141 _tmp &= ~SPARC_P IL_MASK; \142 _tmp |= (_level) & SPARC_P IL_MASK; \220 _tmp &= ~SPARC_PSR_PIL_MASK; \ 221 _tmp |= (_level) & SPARC_PSR_PIL_MASK; \ 143 222 sparc_set_psr( _tmp ); \ 144 223 } while ( 0 ) 145 224 146 147 225 #define sparc_flash_interrupts( _level ) \ 148 226 do { \ 149 register unsigned32 _ignored = 0; \ 150 sparc_enable_interrupts( (_level) ); \ 151 sparc_disable_interrupts( _ignored ); \ 227 register unsigned32 _ignored = 0; \ 228 \ 229 sparc_enable_interrupts( (_level) ); \ 230 sparc_disable_interrupts( _ignored ); \ 152 231 } while ( 0 ) 153 232 154 233 #define sparc_set_interrupt_level( _new_level ) \ 155 do { register unsigned32 _new_psr_level = 0; \ 234 do { \ 235 register unsigned32 _new_psr_level = 0; \ 156 236 \ 157 237 sparc_get_psr( _new_psr_level ); \ 158 _new_psr_level &= ~SPARC_PIL_MASK; \ 159 _new_psr_level |= (((_new_level) << 8) & SPARC_PIL_MASK); \ 238 _new_psr_level &= ~SPARC_PSR_PIL_MASK; \ 239 _new_psr_level |= \ 240 (((_new_level) << SPARC_PSR_PIL_BIT_POSITION) & SPARC_PSR_PIL_MASK); \ 160 241 sparc_set_psr( _new_psr_level ); \ 161 242 } while ( 0 ) … … 166 247 \ 167 248 sparc_get_psr( _psr_level ); \ 168 (_level) = (_psr_level & SPARC_PIL_MASK) >> 8; \ 169 } while ( 0 ) 249 (_level) = \ 250 (_psr_level & SPARC_PSR_PIL_MASK) >> SPARC_PSR_PIL_BIT_POSITION; \ 251 } while ( 0 ) 252 253 #endif 170 254 171 255 #ifdef __cplusplus -
c/src/exec/score/cpu/sparc/sparctypes.h
rea74482 r9700578 1 1 /* sparctypes.h 2 2 * 3 * This include file contains type definitions pertaining to the Intel3 * This include file contains type definitions pertaining to the 4 4 * SPARC processor family. 5 5 * … … 20 20 */ 21 21 22 typedef unsigned char unsigned8;/* unsigned 8-bit integer */23 typedef unsigned short unsigned16;/* unsigned 16-bit integer */24 typedef unsigned int unsigned32;/* unsigned 32-bit integer */25 typedef unsigned long long unsigned64; /* unsigned 64-bit integer */22 typedef unsigned char unsigned8; /* unsigned 8-bit integer */ 23 typedef unsigned short unsigned16; /* unsigned 16-bit integer */ 24 typedef unsigned int unsigned32; /* unsigned 32-bit integer */ 25 typedef unsigned long long unsigned64; /* unsigned 64-bit integer */ 26 26 27 typedef unsigned16 Priority_Bit_map_control;27 typedef unsigned16 Priority_Bit_map_control; 28 28 29 typedef signed char signed8;/* 8-bit signed integer */30 typedef signed short signed16;/* 16-bit signed integer */31 typedef signed int signed32;/* 32-bit signed integer */32 typedef signed long long signed64;/* 64 bit signed integer */29 typedef signed char signed8; /* 8-bit signed integer */ 30 typedef signed short signed16; /* 16-bit signed integer */ 31 typedef signed int signed32; /* 32-bit signed integer */ 32 typedef signed long long signed64; /* 64 bit signed integer */ 33 33 34 typedef unsigned32 boolean;/* Boolean value */34 typedef unsigned32 boolean; /* Boolean value */ 35 35 36 typedef float single_precision; /* single precision float */37 typedef double double_precision; /* double precision float */36 typedef float single_precision; /* single precision float */ 37 typedef double double_precision; /* double precision float */ 38 38 39 39 typedef void sparc_isr; -
c/src/exec/score/cpu/unix/cpu.c
rea74482 r9700578 342 342 * _CPU_Internal_threads_Idle_thread_body 343 343 * 344 * NOTES: 345 * 346 * 1. This is the same as the regular CPU independent algorithm. 347 * 348 * 2. If you implement this using a "halt", "idle", or "shutdown" 349 * instruction, then don't forget to put it in an infinite loop. 350 * 351 * 3. Be warned. Some processors with onboard DMA have been known 352 * to stop the DMA if the CPU were put in IDLE mode. This might 353 * also be a problem with other on-chip peripherals. So use this 354 * hook with caution. 344 * Stop until we get a signal which is the logically the same thing 345 * entering low-power or sleep mode on a real processor and waiting for 346 * an interrupt. This significantly reduces the consumption of host 347 * CPU cycles which is again similar to low power mode. 355 348 */ 356 349 … … 371 364 unsigned32 _size, 372 365 unsigned32 _new_level, 373 void *_entry_point 366 void *_entry_point, 367 boolean _is_fp 374 368 ) 375 369 { … … 698 692 } 699 693 700 /*PAGE701 *702 * _CPU_ffs703 */704 705 int _CPU_ffs(unsigned32 value)706 {707 int output;708 extern int ffs( int );709 710 output = ffs(value);711 output = output - 1;712 713 return output;714 }715 716 717 694 /* 718 695 * Special Purpose Routines to hide the use of UNIX system calls. 719 696 */ 720 697 721 #if 0722 /* XXX clock had this set of #define's */723 724 /*725 * In order to get the types and prototypes used in this file under726 * Solaris 2.3, it is necessary to pull the following magic.727 */728 729 #if defined(solaris)730 #warning "Ignore the undefining __STDC__ warning"731 #undef __STDC__732 #define __STDC__ 0733 #undef _POSIX_C_SOURCE734 #endif735 #endif736 737 698 int _CPU_Get_clock_vector( void ) 738 699 { 739 700 return SIGALRM; 740 701 } 741 742 702 743 703 void _CPU_Start_clock( -
c/src/exec/score/cpu/unix/cpu.h
rea74482 r9700578 550 550 */ 551 551 552 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 64 552 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 64 553 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1) 553 554 554 555 /* … … 722 723 unsigned32 _size, 723 724 unsigned32 _new_level, 724 void *_entry_point 725 void *_entry_point, 726 boolean _is_fp 725 727 ); 726 728 … … 758 760 * RTEMS guarantees that (1) will never happen so it is not a concern. 759 761 * (2),(3), (4) are handled by the macros _CPU_Priority_mask() and 760 * _CPU_Priority_ Bits_index(). These three form a set of routines762 * _CPU_Priority_bits_index(). These three form a set of routines 761 763 * which must logically operate together. Bits in the _value are 762 764 * set and cleared based on masks built by _CPU_Priority_mask(). 763 765 * The basic major and minor values calculated by _Priority_Major() 764 * and _Priority_Minor() are "massaged" by _CPU_Priority_ Bits_index()766 * and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index() 765 767 * to properly range between the values returned by the "find first bit" 766 768 * instruction. This makes it possible for _Priority_Get_highest() to … … 797 799 */ 798 800 799 #define _CPU_Bitfield_Find_first_bit( _value, _output ) \ 800 _output = _CPU_ffs( _value ) 801 801 /* 802 * The UNIX port uses the generic C algorithm for bitfield scan to avoid 803 * dependencies on either a native bitscan instruction or an ffs() in the 804 * C library. 805 */ 806 807 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE 808 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE 809 802 810 /* end of Bitfield handler macros */ 803 804 /* 805 * This routine builds the mask which corresponds to the bit fields 806 * as searched by _CPU_Bitfield_Find_first_bit(). See the discussion 807 * for that routine. 808 */ 809 810 #define _CPU_Priority_Mask( _bit_number ) \ 811 ( 1 << (_bit_number) ) 812 813 /* 814 * This routine translates the bit numbers returned by 815 * _CPU_Bitfield_Find_first_bit() into something suitable for use as 816 * a major or minor component of a priority. See the discussion 817 * for that routine. 818 */ 819 820 #define _CPU_Priority_Bits_index( _priority ) \ 821 (_priority) 822 811 812 /* Priority handler handler macros */ 813 814 /* 815 * The UNIX port uses the generic C algorithm for bitfield scan to avoid 816 * dependencies on either a native bitscan instruction or an ffs() in the 817 * C library. 818 */ 819 823 820 /* end of Priority handler macros */ 824 821 … … 934 931 void _CPU_Fatal_error( 935 932 unsigned32 _error 936 );937 938 int _CPU_ffs(939 unsigned32 _value940 933 ); 941 934 -
c/src/exec/score/headers/bitfield.h
rea74482 r9700578 39 39 */ 40 40 41 #if ( CPU_USE_GENERIC_BITFIELD_DATA == TRUE ) 42 43 #ifndef INIT 44 extern const unsigned char __log2table[256]; 45 #else 46 const unsigned char __log2table[256] = { 47 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 48 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 49 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 50 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 51 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 52 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 53 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 54 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 63 }; 64 #endif 65 66 #endif 67 68 #if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE ) 69 41 70 #define _Bitfield_Find_first_bit( _value, _bit_number ) \ 42 71 _CPU_Bitfield_Find_first_bit( _value, _bit_number ) 72 73 #else 74 75 /* 76 * The following must be a macro because if a CPU specific version 77 * is used it will most likely use inline assembly. 78 */ 79 80 #define _Bitfield_Find_first_bit( _value, _bit_number ) \ 81 { \ 82 register __value = (_value); \ 83 register const unsigned char *__p = __log2table; \ 84 \ 85 if ( __value < 0x100 ) \ 86 (_bit_number) = __p[ __value ] + 8; \ 87 else \ 88 (_bit_number) = __p[ __value >> 8 ]; \ 89 } 90 91 #endif 43 92 44 93 #ifdef __cplusplus -
c/src/exec/score/headers/context.h
rea74482 r9700578 48 48 */ 49 49 50 #define _Context_Initialize( _the_context, _stack, _size, _isr, _entry ) \ 51 _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry ) 50 #define \ 51 _Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp ) \ 52 _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp ) 52 53 53 54 /* -
c/src/exec/score/headers/isr.h
rea74482 r9700578 51 51 ); 52 52 /* 53 * This constant promotes out the number of vectors supported by 54 * the current CPU being used. 53 * This constant promotes out the number of vectors truly supported by 54 * the current CPU being used. This is usually the number of distinct vectors 55 * the cpu can vector. 55 56 */ 56 57 57 #define ISR_NUMBER_OF_VECTORS CPU_INTERRUPT_NUMBER_OF_VECTORS 58 #define ISR_NUMBER_OF_VECTORS CPU_INTERRUPT_NUMBER_OF_VECTORS 59 60 /* 61 * This constant promotes out the highest valid interrupt vector number. 62 */ 63 64 #define ISR_INTERRUPT_MAXIMUM_VECTOR_NUMBER CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 58 65 59 66 /* … … 76 83 */ 77 84 78 EXTERN ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS];85 EXTERN ISR_Handler_entry _ISR_Vector_table[ ISR_NUMBER_OF_VECTORS ]; 79 86 80 87 /* -
c/src/exec/score/headers/priority.h
rea74482 r9700578 115 115 116 116 /* 117 * _Priority_Mask 118 * 119 * DESCRIPTION: 120 * 121 * This function returns the mask associated with the major or minor 122 * number passed to it. 123 */ 124 125 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 126 127 STATIC INLINE unsigned32 _Priority_Mask ( 128 unsigned32 bit_number 129 ); 130 131 #else 132 133 #define _Priority_Mask( _bit_number ) \ 134 _CPU_Priority_Mask( _bit_number ) 135 136 #endif 137 138 /* 139 * _Priority_Bits_index 140 * 141 * DESCRIPTION: 142 * 143 * This function translates the bit numbers returned by the bit scan 144 * of a priority bit field into something suitable for use as 145 * a major or minor component of a priority. 146 */ 147 148 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 149 150 STATIC INLINE unsigned32 _Priority_Bits_index ( 151 unsigned32 bit_number 152 ); 153 154 #else 155 156 #define _Priority_Bits_index( _priority ) \ 157 _CPU_Priority_bits_index( _priority ) 158 159 #endif 160 161 /* 117 162 * _Priority_Add_to_bit_map 118 163 * -
c/src/exec/score/include/rtems/score/bitfield.h
rea74482 r9700578 39 39 */ 40 40 41 #if ( CPU_USE_GENERIC_BITFIELD_DATA == TRUE ) 42 43 #ifndef INIT 44 extern const unsigned char __log2table[256]; 45 #else 46 const unsigned char __log2table[256] = { 47 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 48 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 49 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 50 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 51 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 52 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 53 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 54 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 63 }; 64 #endif 65 66 #endif 67 68 #if ( CPU_USE_GENERIC_BITFIELD_CODE == FALSE ) 69 41 70 #define _Bitfield_Find_first_bit( _value, _bit_number ) \ 42 71 _CPU_Bitfield_Find_first_bit( _value, _bit_number ) 72 73 #else 74 75 /* 76 * The following must be a macro because if a CPU specific version 77 * is used it will most likely use inline assembly. 78 */ 79 80 #define _Bitfield_Find_first_bit( _value, _bit_number ) \ 81 { \ 82 register __value = (_value); \ 83 register const unsigned char *__p = __log2table; \ 84 \ 85 if ( __value < 0x100 ) \ 86 (_bit_number) = __p[ __value ] + 8; \ 87 else \ 88 (_bit_number) = __p[ __value >> 8 ]; \ 89 } 90 91 #endif 43 92 44 93 #ifdef __cplusplus -
c/src/exec/score/include/rtems/score/context.h
rea74482 r9700578 48 48 */ 49 49 50 #define _Context_Initialize( _the_context, _stack, _size, _isr, _entry ) \ 51 _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry ) 50 #define \ 51 _Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp ) \ 52 _CPU_Context_Initialize( _the_context, _stack, _size, _isr, _entry, _is_fp ) 52 53 53 54 /* -
c/src/exec/score/include/rtems/score/isr.h
rea74482 r9700578 51 51 ); 52 52 /* 53 * This constant promotes out the number of vectors supported by 54 * the current CPU being used. 53 * This constant promotes out the number of vectors truly supported by 54 * the current CPU being used. This is usually the number of distinct vectors 55 * the cpu can vector. 55 56 */ 56 57 57 #define ISR_NUMBER_OF_VECTORS CPU_INTERRUPT_NUMBER_OF_VECTORS 58 #define ISR_NUMBER_OF_VECTORS CPU_INTERRUPT_NUMBER_OF_VECTORS 59 60 /* 61 * This constant promotes out the highest valid interrupt vector number. 62 */ 63 64 #define ISR_INTERRUPT_MAXIMUM_VECTOR_NUMBER CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 58 65 59 66 /* … … 76 83 */ 77 84 78 EXTERN ISR_Handler_entry _ISR_Vector_table[ CPU_INTERRUPT_NUMBER_OF_VECTORS];85 EXTERN ISR_Handler_entry _ISR_Vector_table[ ISR_NUMBER_OF_VECTORS ]; 79 86 80 87 /* -
c/src/exec/score/include/rtems/score/priority.h
rea74482 r9700578 115 115 116 116 /* 117 * _Priority_Mask 118 * 119 * DESCRIPTION: 120 * 121 * This function returns the mask associated with the major or minor 122 * number passed to it. 123 */ 124 125 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 126 127 STATIC INLINE unsigned32 _Priority_Mask ( 128 unsigned32 bit_number 129 ); 130 131 #else 132 133 #define _Priority_Mask( _bit_number ) \ 134 _CPU_Priority_Mask( _bit_number ) 135 136 #endif 137 138 /* 139 * _Priority_Bits_index 140 * 141 * DESCRIPTION: 142 * 143 * This function translates the bit numbers returned by the bit scan 144 * of a priority bit field into something suitable for use as 145 * a major or minor component of a priority. 146 */ 147 148 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 149 150 STATIC INLINE unsigned32 _Priority_Bits_index ( 151 unsigned32 bit_number 152 ); 153 154 #else 155 156 #define _Priority_Bits_index( _priority ) \ 157 _CPU_Priority_bits_index( _priority ) 158 159 #endif 160 161 /* 117 162 * _Priority_Add_to_bit_map 118 163 * -
c/src/exec/score/inline/isr.inl
rea74482 r9700578 39 39 ) 40 40 { 41 return ( vector < CPU_INTERRUPT_NUMBER_OF_VECTORS);41 return ( vector <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER ); 42 42 } 43 43 -
c/src/exec/score/inline/priority.inl
rea74482 r9700578 79 79 } 80 80 81 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 82 83 /*PAGE 84 * 85 * _Priority_Mask 86 * 87 */ 88 89 STATIC INLINE unsigned32 _Priority_Mask ( 90 unsigned32 bit_number 91 ) 92 { 93 return (0x8000 >> bit_number); 94 } 95 96 97 /*PAGE 98 * 99 * _Priority_Bits_index 100 * 101 */ 102 103 STATIC INLINE unsigned32 _Priority_Bits_index ( 104 unsigned32 bit_number 105 ) 106 { 107 return bit_number; 108 } 109 110 #endif 111 81 112 /*PAGE 82 113 * … … 122 153 _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor ); 123 154 124 return (_ CPU_Priority_Bits_index( major ) << 4) +125 _ CPU_Priority_Bits_index( minor );155 return (_Priority_Bits_index( major ) << 4) + 156 _Priority_Bits_index( minor ); 126 157 } 127 158 … … 145 176 146 177 the_priority_map->minor = 147 &_Priority_Bit_map[ _ CPU_Priority_Bits_index(major) ];148 149 mask = _ CPU_Priority_Mask( major );178 &_Priority_Bit_map[ _Priority_Bits_index(major) ]; 179 180 mask = _Priority_Mask( major ); 150 181 the_priority_map->ready_major = mask; 151 182 the_priority_map->block_major = ~mask; 152 183 153 mask = _ CPU_Priority_Mask( minor );184 mask = _Priority_Mask( minor ); 154 185 the_priority_map->ready_minor = mask; 155 186 the_priority_map->block_minor = ~mask; -
c/src/exec/score/inline/rtems/score/isr.inl
rea74482 r9700578 39 39 ) 40 40 { 41 return ( vector < CPU_INTERRUPT_NUMBER_OF_VECTORS);41 return ( vector <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER ); 42 42 } 43 43 -
c/src/exec/score/inline/rtems/score/priority.inl
rea74482 r9700578 79 79 } 80 80 81 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 82 83 /*PAGE 84 * 85 * _Priority_Mask 86 * 87 */ 88 89 STATIC INLINE unsigned32 _Priority_Mask ( 90 unsigned32 bit_number 91 ) 92 { 93 return (0x8000 >> bit_number); 94 } 95 96 97 /*PAGE 98 * 99 * _Priority_Bits_index 100 * 101 */ 102 103 STATIC INLINE unsigned32 _Priority_Bits_index ( 104 unsigned32 bit_number 105 ) 106 { 107 return bit_number; 108 } 109 110 #endif 111 81 112 /*PAGE 82 113 * … … 122 153 _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor ); 123 154 124 return (_ CPU_Priority_Bits_index( major ) << 4) +125 _ CPU_Priority_Bits_index( minor );155 return (_Priority_Bits_index( major ) << 4) + 156 _Priority_Bits_index( minor ); 126 157 } 127 158 … … 145 176 146 177 the_priority_map->minor = 147 &_Priority_Bit_map[ _ CPU_Priority_Bits_index(major) ];148 149 mask = _ CPU_Priority_Mask( major );178 &_Priority_Bit_map[ _Priority_Bits_index(major) ]; 179 180 mask = _Priority_Mask( major ); 150 181 the_priority_map->ready_major = mask; 151 182 the_priority_map->block_major = ~mask; 152 183 153 mask = _ CPU_Priority_Mask( minor );184 mask = _Priority_Mask( minor ); 154 185 the_priority_map->ready_minor = mask; 155 186 the_priority_map->block_minor = ~mask; -
c/src/exec/score/macros/isr.inl
rea74482 r9700578 34 34 35 35 #define _ISR_Is_vector_number_valid( _vector ) \ 36 ( (_vector) < CPU_INTERRUPT_NUMBER_OF_VECTORS)36 ( (_vector) <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER ) 37 37 38 38 /*PAGE -
c/src/exec/score/macros/priority.inl
rea74482 r9700578 65 65 #define _Priority_Minor( _the_priority ) ( (_the_priority) % 16 ) 66 66 67 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 68 69 /*PAGE 70 * 71 * _Priority_Mask 72 * 73 */ 74 75 #define _Priority_Mask( _bit_number ) \ 76 (0x8000 >> _bit_number) 77 78 /*PAGE 79 * 80 * _Priority_Bits_index 81 * 82 */ 83 84 #define _Priority_Bits_index( _bit_number ) \ 85 (_bit_number) 86 87 #endif 88 67 89 /*PAGE 68 90 * … … 104 126 _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor ); \ 105 127 \ 106 (_high_priority) = (_ CPU_Priority_Bits_index( major ) * 16) + \107 _CPU_Priority_Bits_index( minor ); \128 (_high_priority) = (_Priority_Bits_index( major ) * 16) + \ 129 _Priority_Bits_index( minor ); \ 108 130 } 109 131 … … 125 147 \ 126 148 (_the_priority_map)->minor = \ 127 &_Priority_Bit_map[ _ CPU_Priority_Bits_index(_major) ]; \149 &_Priority_Bit_map[ _Priority_Bits_index(_major) ]; \ 128 150 \ 129 _mask = _ CPU_Priority_Mask( _major ); \151 _mask = _Priority_Mask( _major ); \ 130 152 (_the_priority_map)->ready_major = _mask; \ 131 153 (_the_priority_map)->block_major = ~_mask; \ 132 154 \ 133 _mask = _ CPU_Priority_Mask( _minor ); \155 _mask = _Priority_Mask( _minor ); \ 134 156 (_the_priority_map)->ready_minor = _mask; \ 135 157 (_the_priority_map)->block_minor = ~_mask; \ -
c/src/exec/score/macros/rtems/score/isr.inl
rea74482 r9700578 34 34 35 35 #define _ISR_Is_vector_number_valid( _vector ) \ 36 ( (_vector) < CPU_INTERRUPT_NUMBER_OF_VECTORS)36 ( (_vector) <= CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER ) 37 37 38 38 /*PAGE -
c/src/exec/score/macros/rtems/score/priority.inl
rea74482 r9700578 65 65 #define _Priority_Minor( _the_priority ) ( (_the_priority) % 16 ) 66 66 67 #if ( CPU_USE_GENERIC_BITFIELD_CODE == TRUE ) 68 69 /*PAGE 70 * 71 * _Priority_Mask 72 * 73 */ 74 75 #define _Priority_Mask( _bit_number ) \ 76 (0x8000 >> _bit_number) 77 78 /*PAGE 79 * 80 * _Priority_Bits_index 81 * 82 */ 83 84 #define _Priority_Bits_index( _bit_number ) \ 85 (_bit_number) 86 87 #endif 88 67 89 /*PAGE 68 90 * … … 104 126 _Bitfield_Find_first_bit( _Priority_Bit_map[major], minor ); \ 105 127 \ 106 (_high_priority) = (_ CPU_Priority_Bits_index( major ) * 16) + \107 _CPU_Priority_Bits_index( minor ); \128 (_high_priority) = (_Priority_Bits_index( major ) * 16) + \ 129 _Priority_Bits_index( minor ); \ 108 130 } 109 131 … … 125 147 \ 126 148 (_the_priority_map)->minor = \ 127 &_Priority_Bit_map[ _ CPU_Priority_Bits_index(_major) ]; \149 &_Priority_Bit_map[ _Priority_Bits_index(_major) ]; \ 128 150 \ 129 _mask = _ CPU_Priority_Mask( _major ); \151 _mask = _Priority_Mask( _major ); \ 130 152 (_the_priority_map)->ready_major = _mask; \ 131 153 (_the_priority_map)->block_major = ~_mask; \ 132 154 \ 133 _mask = _ CPU_Priority_Mask( _minor ); \155 _mask = _Priority_Mask( _minor ); \ 134 156 (_the_priority_map)->ready_minor = _mask; \ 135 157 (_the_priority_map)->block_minor = ~_mask; \ -
c/src/exec/score/src/thread.c
rea74482 r9700578 806 806 ) 807 807 { 808 boolean is_fp = FALSE; 809 808 810 if ( the_thread->Start.fp_context ) { 809 811 the_thread->fp_context = the_thread->Start.fp_context; 810 812 _Context_Initialize_fp( &the_thread->fp_context ); 813 is_fp = TRUE; 811 814 } 812 815 … … 819 822 the_thread->Start.Initial_stack.size, 820 823 the_thread->Start.isr_level, 821 _Thread_Handler 824 _Thread_Handler, 825 is_fp 822 826 ); 823 827 -
c/src/lib/include/ringbuf.h
rea74482 r9700578 11 11 12 12 #ifndef RINGBUF_QUEUE_LENGTH 13 #define RINGBUF_QUEUE_LENGTH 20013 #define RINGBUF_QUEUE_LENGTH 128 14 14 #endif 15 15 16 16 typedef struct { 17 17 char buffer[RINGBUF_QUEUE_LENGTH]; 18 int head;19 int tail;18 volatile int head; 19 volatile int tail; 20 20 } Ring_buffer_t; 21 21 … … 28 28 ( (_buffer)->head == (_buffer)->tail ) 29 29 30 #define Ring_buffer_Is_full( _buffer ) \ 31 ( (_buffer)->head == ((_buffer)->tail + 1) % RINGBUF_QUEUE_LENGTH ) 32 30 33 #define Ring_buffer_Add_character( _buffer, _ch ) \ 31 34 do { \ 32 (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \ 33 (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \ 35 rtems_unsigned32 isrlevel; \ 36 \ 37 rtems_interrupt_disable( isrlevel ); \ 38 (_buffer)->tail = ((_buffer)->tail+1) % RINGBUF_QUEUE_LENGTH; \ 39 (_buffer)->buffer[ (_buffer)->tail ] = (_ch); \ 40 rtems_interrupt_enable( isrlevel ); \ 34 41 } while ( 0 ) 35 42 36 43 #define Ring_buffer_Remove_character( _buffer, _ch ) \ 37 44 do { \ 38 (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \ 39 (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \ 45 rtems_unsigned32 isrlevel; \ 46 \ 47 rtems_interrupt_disable( isrlevel ); \ 48 (_buffer)->head = ((_buffer)->head+1) % RINGBUF_QUEUE_LENGTH; \ 49 (_ch) = (_buffer)->buffer[ (_buffer)->head ]; \ 50 rtems_interrupt_enable( isrlevel ); \ 40 51 } while ( 0 ) 41 52 -
c/src/lib/libbsp/hppa1.1/simhppa/startup/bspstart.c
rea74482 r9700578 30 30 #include <bsp.h> 31 31 #include <rtems/libio.h> 32 #include <rtems/ score/intthrd.h>32 #include <rtems/intthrd.h> 33 33 34 34 #include <libcsupport.h> … … 244 244 bsp_postdriver_hook(void) 245 245 { 246 int stdin_fd, stdout_fd, stderr_fd; 247 248 if ((stdin_fd = __open("/dev/tty00", O_RDONLY, 0)) == -1) 249 rtems_fatal_error_occurred('STD0'); 250 251 if ((stdout_fd = __open("/dev/tty00", O_WRONLY, 0)) == -1) 252 rtems_fatal_error_occurred('STD1'); 253 254 if ((stderr_fd = __open("/dev/tty00", O_WRONLY, 0)) == -1) 255 rtems_fatal_error_occurred('STD2'); 256 257 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 258 rtems_fatal_error_occurred('STIO'); 246 int stdin_fd, stdout_fd, stderr_fd; 247 int error_code; 248 249 error_code = 'S' << 24 | 'T' << 16; 250 251 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 252 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 253 254 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 255 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 256 257 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 258 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 259 260 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 261 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 259 262 } 260 263 -
c/src/lib/libbsp/i386/force386/clock/ckinit.c
rea74482 r9700578 76 76 } 77 77 78 void ReInstall_clock(79 rtems_isr_entry clock_isr80 )81 {82 rtems_unsigned32 isrlevel = 0;83 84 rtems_interrupt_disable( isrlevel );85 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );86 rtems_interrupt_enable( isrlevel );87 }88 89 78 void Clock_exit( void ) 90 79 { … … 120 109 ) 121 110 { 111 rtems_unsigned32 isrlevel; 122 112 rtems_libio_ioctl_args_t *args = pargp; 123 113 … … 136 126 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 137 127 { 138 ReInstall_clock(args->buffer); 128 rtems_interrupt_disable( isrlevel ); 129 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 130 rtems_interrupt_enable( isrlevel ); 139 131 } 140 132 -
c/src/lib/libbsp/i386/force386/startup/bspstart.c
rea74482 r9700578 123 123 { 124 124 int stdin_fd, stdout_fd, stderr_fd; 125 int error_code; 126 127 error_code = 'S' << 24 | 'T' << 16; 125 128 126 129 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 127 rtems_fatal_error_occurred( 'STD0');130 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 128 131 129 132 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 130 rtems_fatal_error_occurred( 'STD1');133 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 131 134 132 135 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 133 rtems_fatal_error_occurred( 'STD2');136 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 134 137 135 138 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 136 rtems_fatal_error_occurred( 'STIO');139 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 137 140 } 138 141 -
c/src/lib/libbsp/i386/go32/clock/ckinit.c
rea74482 r9700578 109 109 } 110 110 111 void ReInstall_clock(112 rtems_isr_entry clock_isr113 )114 {115 rtems_unsigned32 isrlevel = 0;116 117 rtems_interrupt_disable( isrlevel );118 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );119 rtems_interrupt_enable( isrlevel );120 }121 122 111 void Clock_exit( void ) 123 112 { … … 162 151 ) 163 152 { 153 rtems_unsigned32 isrlevel; 164 154 rtems_libio_ioctl_args_t *args = pargp; 165 155 … … 178 168 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 179 169 { 180 ReInstall_clock(args->buffer); 170 rtems_interrupt_disable( isrlevel ); 171 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 172 rtems_interrupt_enable( isrlevel ); 181 173 } 182 174 -
c/src/lib/libbsp/i386/go32/startup/bspstart.c
rea74482 r9700578 130 130 { 131 131 int stdin_fd, stdout_fd, stderr_fd; 132 int error_code; 133 134 error_code = 'S' << 24 | 'T' << 16; 132 135 133 136 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 134 rtems_fatal_error_occurred( 'STD0');137 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 135 138 136 139 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 137 rtems_fatal_error_occurred( 'STD1');140 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 138 141 139 142 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 140 rtems_fatal_error_occurred( 'STD2');143 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 141 144 142 145 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 143 rtems_fatal_error_occurred( 'STIO');146 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 144 147 } 145 148 -
c/src/lib/libbsp/i960/cvme961/clock/ckinit.c
rea74482 r9700578 61 61 } 62 62 63 void ReInstall_clock(64 rtems_isr_entry clock_isr65 )66 {67 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );68 }69 70 63 void Clock_exit() 71 64 { … … 106 99 ) 107 100 { 101 rtems_unsigned32 isrlevel; 108 102 rtems_libio_ioctl_args_t *args = pargp; 109 103 … … 122 116 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 123 117 { 124 ReInstall_clock(args->buffer); 118 rtems_interrupt_disable( isrlevel ); 119 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 120 rtems_interrupt_enable( isrlevel ); 125 121 } 126 122 -
c/src/lib/libbsp/i960/cvme961/startup/bspstart.c
rea74482 r9700578 125 125 { 126 126 int stdin_fd, stdout_fd, stderr_fd; 127 int error_code; 128 129 error_code = 'S' << 24 | 'T' << 16; 127 130 128 131 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 129 rtems_fatal_error_occurred( 'STD0');132 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 130 133 131 134 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 132 rtems_fatal_error_occurred( 'STD1');135 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 133 136 134 137 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 135 rtems_fatal_error_occurred( 'STD2');138 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 136 139 137 140 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 138 rtems_fatal_error_occurred( 'STIO');141 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 139 142 } 140 143 -
c/src/lib/libbsp/m68k/dmv152/clock/ckinit.c
rea74482 r9700578 98 98 } 99 99 100 void ReInstall_clock(101 rtems_isr_entry clock_isr102 )103 {104 rtems_unsigned32 isrlevel = 0 ;105 106 rtems_interrupt_disable( isrlevel );107 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );108 rtems_interrupt_enable( isrlevel );109 }110 111 100 void Clock_exit( void ) 112 101 { … … 146 135 ) 147 136 { 137 rtems_unsigned32 isrlevel; 148 138 rtems_libio_ioctl_args_t *args = pargp; 149 139 … … 162 152 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 163 153 { 164 ReInstall_clock(args->buffer); 154 rtems_interrupt_disable( isrlevel ); 155 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 156 rtems_interrupt_enable( isrlevel ); 165 157 } 166 158 -
c/src/lib/libbsp/m68k/dmv152/startup/bspstart.c
rea74482 r9700578 124 124 { 125 125 int stdin_fd, stdout_fd, stderr_fd; 126 int error_code; 127 128 error_code = 'S' << 24 | 'T' << 16; 126 129 127 130 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 128 rtems_fatal_error_occurred( 'STD0');131 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 129 132 130 133 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 131 rtems_fatal_error_occurred( 'STD1');134 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 132 135 133 136 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 134 rtems_fatal_error_occurred( 'STD2');137 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 135 138 136 139 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 137 rtems_fatal_error_occurred( 'STIO');140 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 138 141 } 139 142 -
c/src/lib/libbsp/m68k/efi332/clock/ckinit.c
rea74482 r9700578 71 71 } 72 72 73 void ReInstall_clock(74 rtems_isr_entry clock_isr75 )76 {77 rtems_unsigned32 isrlevel = 0 ;78 79 rtems_interrupt_disable( isrlevel );80 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );81 rtems_interrupt_enable( isrlevel );82 }83 84 73 void Clock_exit( void ) 85 74 { … … 121 110 ) 122 111 { 112 rtems_unsigned32 isrlevel; 123 113 rtems_libio_ioctl_args_t *args = pargp; 124 114 … … 137 127 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 138 128 { 139 ReInstall_clock(args->buffer); 129 rtems_interrupt_disable( isrlevel ); 130 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 131 rtems_interrupt_enable( isrlevel ); 140 132 } 141 133 -
c/src/lib/libbsp/m68k/efi332/startup/bspstart.c
rea74482 r9700578 124 124 { 125 125 int stdin_fd, stdout_fd, stderr_fd; 126 int error_code; 127 128 error_code = 'S' << 24 | 'T' << 16; 126 129 127 130 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 128 rtems_fatal_error_occurred( 'STD0');131 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 129 132 130 133 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 131 rtems_fatal_error_occurred( 'STD1');134 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 132 135 133 136 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 134 rtems_fatal_error_occurred( 'STD2');137 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 135 138 136 139 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 137 rtems_fatal_error_occurred( 'STIO');140 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 138 141 } 139 142 -
c/src/lib/libbsp/m68k/efi68k/clock/ckinit.c
rea74482 r9700578 94 94 } 95 95 96 void ReInstall_clock(97 rtems_isr_entry clock_isr98 )99 {100 rtems_unsigned32 isrlevel = 0 ;101 102 rtems_interrupt_disable( isrlevel );103 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );104 rtems_interrupt_enable( isrlevel );105 }106 107 96 void Clock_exit( void ) 108 97 { … … 142 131 ) 143 132 { 133 rtems_unsigned32 isrlevel; 144 134 rtems_libio_ioctl_args_t *args = pargp; 145 135 … … 158 148 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 159 149 { 160 ReInstall_clock(args->buffer); 150 rtems_interrupt_disable( isrlevel ); 151 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 152 rtems_interrupt_enable( isrlevel ); 161 153 } 162 154 -
c/src/lib/libbsp/m68k/efi68k/startup/bspstart.c
rea74482 r9700578 131 131 { 132 132 int stdin_fd, stdout_fd, stderr_fd; 133 int error_code; 134 135 error_code = 'S' << 24 | 'T' << 16; 133 136 134 137 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 135 rtems_fatal_error_occurred( 'STD0');138 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 136 139 137 140 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 138 rtems_fatal_error_occurred( 'STD1');141 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 139 142 140 143 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 141 rtems_fatal_error_occurred( 'STD2');144 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 142 145 143 146 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 144 rtems_fatal_error_occurred( 'STIO');147 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 145 148 } 146 149 -
c/src/lib/libbsp/m68k/gen68302/clock/ckinit.c
rea74482 r9700578 90 90 91 91 if ( BSP_Configuration.ticks_per_timeslice ) { 92 /* set_vector( clock_isr, CLOCK_VECTOR, 1 );*/ 92 set_vector( clock_isr, CLOCK_VECTOR, 1 ); 93 93 94 94 m302.reg.trr1 = TRR1_VAL; /* set timer reference register */ … … 101 101 atexit( Clock_exit ); 102 102 } 103 }104 105 void ReInstall_clock(106 rtems_isr_entry clock_isr107 )108 {109 rtems_unsigned32 isrlevel;110 111 rtems_interrupt_disable( isrlevel );112 /* (void) set_vector( clock_isr, CLOCK_VECTOR, 1 ); */113 rtems_interrupt_enable( isrlevel );114 103 } 115 104 … … 146 135 ) 147 136 { 137 rtems_unsigned32 isrlevel; 148 138 rtems_libio_ioctl_args_t *args = pargp; 149 139 … … 162 152 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 163 153 { 164 ReInstall_clock(args->buffer); 154 rtems_interrupt_disable( isrlevel ); 155 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 156 rtems_interrupt_enable( isrlevel ); 165 157 } 166 158 -
c/src/lib/libbsp/m68k/gen68302/start/start302.s
rea74482 r9700578 194 194 | move.l #_cnsl_isr,vbase+0x028 | SCC2 195 195 move.l #timerisr,vbase+0x018 | Timer ISR 196 move.l #RTC_ISR,vbase+0x024 | Real Time Clock ISR197 196 198 197 | … … 237 236 238 237 nop 239 RTC_ISR:240 movem.l d0-d1/a0-a1,a7@- | save d0-d1,a0-a1241 addql #1,_ISR_Nest_level | one nest level deeper242 addql #1,_Thread_Dispatch_disable_level243 | disable multitasking244 245 jbsr Clock_isr | invoke the user ISR246 jmp _ISR_Exit247 238 END_CODE 248 239 -
c/src/lib/libbsp/m68k/gen68302/start302/start302.s
rea74482 r9700578 194 194 | move.l #_cnsl_isr,vbase+0x028 | SCC2 195 195 move.l #timerisr,vbase+0x018 | Timer ISR 196 move.l #RTC_ISR,vbase+0x024 | Real Time Clock ISR197 196 198 197 | … … 237 236 238 237 nop 239 RTC_ISR:240 movem.l d0-d1/a0-a1,a7@- | save d0-d1,a0-a1241 addql #1,_ISR_Nest_level | one nest level deeper242 addql #1,_Thread_Dispatch_disable_level243 | disable multitasking244 245 jbsr Clock_isr | invoke the user ISR246 jmp _ISR_Exit247 238 END_CODE 248 239 -
c/src/lib/libbsp/m68k/gen68302/startup/bspstart.c
rea74482 r9700578 131 131 { 132 132 int stdin_fd, stdout_fd, stderr_fd; 133 int error_code; 134 135 error_code = 'S' << 24 | 'T' << 16; 133 136 134 137 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 135 rtems_fatal_error_occurred( 'STD0');138 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 136 139 137 140 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 138 rtems_fatal_error_occurred( 'STD1');141 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 139 142 140 143 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 141 rtems_fatal_error_occurred( 'STD2');144 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 142 145 143 146 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 144 rtems_fatal_error_occurred( 'STIO');147 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 145 148 } 146 149 -
c/src/lib/libbsp/m68k/idp/clock/ckinit.c
rea74482 r9700578 131 131 } 132 132 133 void ReInstall_clock( clock_isr )134 rtems_isr_entry clock_isr;135 {136 rtems_unsigned32 isrlevel = 0 ;137 138 rtems_interrupt_disable( isrlevel );139 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );140 rtems_interrupt_enable( isrlevel );141 }142 143 133 /* The following was added for debugging purposes */ 144 134 void Clock_exit( void ) … … 182 172 ) 183 173 { 174 rtems_unsigned32 isrlevel; 184 175 rtems_libio_ioctl_args_t *args = pargp; 185 176 … … 198 189 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 199 190 { 200 ReInstall_clock(args->buffer); 191 rtems_interrupt_disable( isrlevel ); 192 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 193 rtems_interrupt_enable( isrlevel ); 201 194 } 202 195 -
c/src/lib/libbsp/m68k/idp/console/duart.c
rea74482 r9700578 1 # 2 #$Id$3 # 1 /* 2 * $Id$ 3 */ 4 4 5 5 /*######################################################### -
c/src/lib/libbsp/m68k/idp/startup/bspstart.c
rea74482 r9700578 132 132 { 133 133 int stdin_fd, stdout_fd, stderr_fd; 134 int error_code; 135 136 error_code = 'S' << 24 | 'T' << 16; 134 137 135 138 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 136 rtems_fatal_error_occurred( 'STD0');139 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 137 140 138 141 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 139 rtems_fatal_error_occurred( 'STD1');142 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 140 143 141 144 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 142 rtems_fatal_error_occurred( 'STD2');145 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 143 146 144 147 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 145 rtems_fatal_error_occurred( 'STIO');148 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 146 149 } 147 150 -
c/src/lib/libbsp/m68k/mvme136/clock/ckinit.c
rea74482 r9700578 109 109 } 110 110 111 void ReInstall_clock(112 rtems_isr_entry clock_isr113 )114 {115 rtems_unsigned32 isrlevel;116 117 rtems_interrupt_disable( isrlevel );118 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );119 rtems_interrupt_enable( isrlevel );120 }121 122 111 void Clock_exit( void ) 123 112 { … … 158 147 ) 159 148 { 149 rtems_unsigned32 isrlevel; 160 150 rtems_libio_ioctl_args_t *args = pargp; 161 151 … … 174 164 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 175 165 { 176 ReInstall_clock(args->buffer); 166 rtems_interrupt_disable( isrlevel ); 167 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 168 rtems_interrupt_enable( isrlevel ); 177 169 } 178 170 -
c/src/lib/libbsp/m68k/mvme136/startup/bspstart.c
rea74482 r9700578 125 125 { 126 126 int stdin_fd, stdout_fd, stderr_fd; 127 127 int error_code; 128 129 error_code = 'S' << 24 | 'T' << 16; 130 128 131 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 129 rtems_fatal_error_occurred( 'STD0');132 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 130 133 131 134 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 132 rtems_fatal_error_occurred( 'STD1');135 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 133 136 134 137 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 135 rtems_fatal_error_occurred( 'STD2');138 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 136 139 137 140 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 138 rtems_fatal_error_occurred( 'STIO');141 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 139 142 } 140 143 -
c/src/lib/libbsp/m68k/mvme162/clock/ckinit.c
rea74482 r9700578 90 90 } 91 91 92 void ReInstall_clock(rtems_isr_entry clock_isr)93 {94 rtems_unsigned32 isrlevel;95 96 rtems_interrupt_disable( isrlevel );97 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );98 rtems_interrupt_enable( isrlevel );99 }100 101 92 void Clock_exit( void ) 102 93 { … … 128 119 ) 129 120 { 121 rtems_unsigned32 isrlevel; 130 122 rtems_libio_ioctl_args_t *args = pargp; 131 123 … … 144 136 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 145 137 { 146 ReInstall_clock(args->buffer); 138 rtems_interrupt_disable( isrlevel ); 139 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 140 rtems_interrupt_enable( isrlevel ); 147 141 } 148 142 -
c/src/lib/libbsp/m68k/mvme162/startup/bspstart.c
rea74482 r9700578 131 131 { 132 132 int stdin_fd, stdout_fd, stderr_fd; 133 int error_code; 134 135 error_code = 'S' << 24 | 'T' << 16; 133 136 134 137 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 135 rtems_fatal_error_occurred( 'STD0');138 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 136 139 137 140 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 138 rtems_fatal_error_occurred( 'STD1');141 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 139 142 140 143 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 141 rtems_fatal_error_occurred( 'STD2');144 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 142 145 143 146 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 144 rtems_fatal_error_occurred( 'STIO');147 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 145 148 } 146 149 -
c/src/lib/libbsp/no_cpu/no_bsp/clock/ckinit.c
rea74482 r9700578 106 106 107 107 if ( BSP_Configuration.ticks_per_timeslice ) { 108 Old_ticker = ( rtems_isr_entry) set_vector( clock_isr, CLOCK_VECTOR, 1 );108 Old_ticker = (rtems_isr_entry) set_vector( clock_isr, CLOCK_VECTOR, 1 ); 109 109 /* 110 110 * Hardware specific initialize goes here … … 119 119 120 120 atexit( Clock_exit ); 121 }122 123 /*124 * Reinstall_clock125 *126 * Install a clock tick handler without reprogramming the chip. This127 * is used by the polling shared memory device driver.128 */129 130 void ReInstall_clock(131 rtems_isr_entry clock_isr132 )133 {134 rtems_unsigned32 isrlevel = 0;135 136 /*137 * Disable interrupts and install the clock ISR vector using the138 * BSP dependent set_vector routine. In the below example, the clock139 * ISR is on vector 4 and is an RTEMS interrupt.140 */141 142 rtems_interrupt_disable( isrlevel );143 (void) set_vector( clock_isr, CLOCK_VECTOR, 1 );144 rtems_interrupt_enable( isrlevel );145 121 } 146 122 … … 189 165 ) 190 166 { 167 rtems_unsigned32 isrlevel; 191 168 rtems_libio_ioctl_args_t *args = pargp; 192 169 … … 205 182 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 206 183 { 207 ReInstall_clock(args->buffer); 184 rtems_interrupt_disable( isrlevel ); 185 (void) set_vector( args->buffer, CLOCK_VECTOR, 1 ); 186 rtems_interrupt_enable( isrlevel ); 208 187 } 209 188 -
c/src/lib/libbsp/no_cpu/no_bsp/startup/bspstart.c
rea74482 r9700578 132 132 { 133 133 int stdin_fd, stdout_fd, stderr_fd; 134 int error_code; 135 136 error_code = 'S' << 24 | 'T' << 16; 134 137 135 138 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 136 rtems_fatal_error_occurred( 'STD0');139 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 137 140 138 141 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 139 rtems_fatal_error_occurred( 'STD1');142 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 140 143 141 144 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 142 rtems_fatal_error_occurred( 'STD2');145 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 143 146 144 147 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 145 rtems_fatal_error_occurred( 'STIO');148 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 146 149 } 147 150 -
c/src/lib/libbsp/powerpc/papyrus/startup/bspstart.c
rea74482 r9700578 150 150 { 151 151 int stdin_fd, stdout_fd, stderr_fd; 152 int error_code; 153 154 error_code = 'S' << 24 | 'T' << 16; 152 155 153 156 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 154 rtems_fatal_error_occurred( 'STD0');157 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 155 158 156 159 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 157 rtems_fatal_error_occurred( 'STD1');160 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 158 161 159 162 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 160 rtems_fatal_error_occurred( 'STD2');163 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 161 164 162 165 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 163 rtems_fatal_error_occurred( 'STIO');166 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 164 167 } 165 168 -
c/src/lib/libbsp/unix/posix/clock/clock.c
rea74482 r9700578 32 32 rtems_device_minor_number rtems_clock_minor; 33 33 34 void 35 Install_clock(rtems_isr_entry clock_isr) 34 void Install_clock(rtems_isr_entry clock_isr) 36 35 { 37 36 Clock_driver_ticks = 0; 38 37 39 (void) set_vector(clock_isr, Clock_driver_vector, 1);38 (void) set_vector( clock_isr, Clock_driver_vector, 1 ); 40 39 41 40 _CPU_Start_clock( BSP_Configuration.microseconds_per_tick ); … … 44 43 } 45 44 46 void 47 ReInstall_clock(rtems_isr_entry new_clock_isr) 48 { 49 rtems_unsigned32 isrlevel = 0; 50 51 rtems_interrupt_disable(isrlevel); 52 (void)set_vector(new_clock_isr, Clock_driver_vector, 1); 53 rtems_interrupt_enable(isrlevel); 54 } 55 56 void 57 Clock_isr(int vector) 45 void Clock_isr(int vector) 58 46 { 59 47 Clock_driver_ticks++; … … 66 54 */ 67 55 68 void 69 Clock_exit(void) 56 void Clock_exit(void) 70 57 { 71 58 _CPU_Stop_clock(); 72 59 73 (void) set_vector(0, Clock_driver_vector, 1);60 (void) set_vector( 0, Clock_driver_vector, 1 ); 74 61 } 75 62 76 rtems_device_driver 77 Clock_initialize( 63 rtems_device_driver Clock_initialize( 78 64 rtems_device_major_number major, 79 65 rtems_device_minor_number minor, … … 100 86 ) 101 87 { 88 rtems_unsigned32 isrlevel; 102 89 rtems_libio_ioctl_args_t *args = pargp; 103 90 … … 116 103 else if (args->command == rtems_build_name('N', 'E', 'W', ' ')) 117 104 { 118 ReInstall_clock(args->buffer); 105 rtems_interrupt_disable( isrlevel ); 106 (void) set_vector( args->buffer, Clock_driver_vector, 1 ); 107 rtems_interrupt_enable( isrlevel ); 119 108 } 120 109 -
c/src/lib/libbsp/unix/posix/startup/bspstart.c
rea74482 r9700578 183 183 #if 0 184 184 int stdin_fd, stdout_fd, stderr_fd; 185 int error_code; 186 187 error_code = 'S' << 24 | 'T' << 16; 185 188 186 189 if ((stdin_fd = __open("/dev/console", O_RDONLY, 0)) == -1) 187 rtems_fatal_error_occurred( 'STD0');190 rtems_fatal_error_occurred( error_code | 'D' << 8 | '0' ); 188 191 189 192 if ((stdout_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 190 rtems_fatal_error_occurred( 'STD1');193 rtems_fatal_error_occurred( error_code | 'D' << 8 | '1' ); 191 194 192 195 if ((stderr_fd = __open("/dev/console", O_WRONLY, 0)) == -1) 193 rtems_fatal_error_occurred( 'STD2');196 rtems_fatal_error_occurred( error_code | 'D' << 8 | '2' ); 194 197 195 198 if ((stdin_fd != 0) || (stdout_fd != 1) || (stderr_fd != 2)) 196 rtems_fatal_error_occurred( 'STIO');199 rtems_fatal_error_occurred( error_code | 'I' << 8 | 'O' ); 197 200 #endif 198 201 -
c/src/tests/sptests/sp04/task1.c
rea74482 r9700578 31 31 rtems_time_of_day time; 32 32 rtems_status_code status; 33 rtems_unsigned32 start ;34 rtems_unsigned32 end ;33 rtems_unsigned32 start_time; 34 rtems_unsigned32 end_time; 35 35 36 36 puts( "TA1 - rtems_task_suspend - on Task 2" ); … … 42 42 directive_failed( status, "rtems_task_suspend of TA3" ); 43 43 44 status = rtems_clock_get( RTEMS_CLOCK_GET_SECONDS_SINCE_EPOCH, &start );44 status = rtems_clock_get( RTEMS_CLOCK_GET_SECONDS_SINCE_EPOCH, &start_time ); 45 45 directive_failed( status, "rtems_clock_get" ); 46 46 … … 48 48 49 49 for ( ; ; ) { 50 status = rtems_clock_get( RTEMS_CLOCK_GET_SECONDS_SINCE_EPOCH, &end );50 status = rtems_clock_get( RTEMS_CLOCK_GET_SECONDS_SINCE_EPOCH, &end_time ); 51 51 directive_failed( status, "rtems_clock_get" ); 52 52 53 if ( end > (start+ 2) )53 if ( end_time > (start_time + 2) ) 54 54 break; 55 55 } -
c/src/tests/sptests/sp09/screen09.c
rea74482 r9700578 26 26 rtems_isr_entry old_service_routine; 27 27 28 status = rtems_interrupt_catch( Service_routine, 500, &old_service_routine ); 28 status = rtems_interrupt_catch( 29 Service_routine, 30 ISR_INTERRUPT_MAXIMUM_VECTOR_NUMBER + 10, 31 &old_service_routine 32 ); 29 33 fatal_directive_status( 30 34 status, -
c/src/tests/sptests/sp11/sp11.scn
rea74482 r9700578 12 12 TA2 - rtems_event_send - send RTEMS_EVENT_14 to TA1 13 13 TA2 - rtems_clock_set - 08:15:00 02/12/1988 14 TA2 - rtems_event_send - sending RTEMS_EVENT_10 to self after 5seconds14 TA2 - rtems_event_send - sending RTEMS_EVENT_10 to self after 4 seconds 15 15 TA2 - rtems_event_receive - waiting forever on RTEMS_EVENT_10 16 16 TA1 - RTEMS_EVENT_14 received - eventout => 00004000 -
c/src/tests/sptests/sp11/task2.c
rea74482 r9700578 74 74 directive_failed( status, "TA2 rtems_clock_set" ); 75 75 76 time.second += 5;76 time.second += 4; 77 77 puts( 78 "TA2 - rtems_event_send - sending RTEMS_EVENT_10 to self after 5seconds"78 "TA2 - rtems_event_send - sending RTEMS_EVENT_10 to self after 4 seconds" 79 79 ); 80 80 status = rtems_timer_fire_when( … … 84 84 NULL 85 85 ); 86 directive_failed( status, "rtems_timer_fire_when after 5seconds" );86 directive_failed( status, "rtems_timer_fire_when after 4 seconds" ); 87 87 88 88 puts( "TA2 - rtems_event_receive - waiting forever on RTEMS_EVENT_10" ); -
c/src/tests/sptests/sp12/system.h
rea74482 r9700578 58 58 #define CONFIGURE_TEST_NEEDS_CLOCK_DRIVER 59 59 60 #define CONFIGURE_INIT_TASK_STACK_SIZE (RTEMS_MINIMUM_STACK_SIZE * 2) 60 61 #define CONFIGURE_MAXIMUM_SEMAPHORES 10 61 62 #define CONFIGURE_TICKS_PER_TIMESLICE 100 -
c/src/tests/sptests/sp19/system.h
rea74482 r9700578 39 39 #define CONFIGURE_SPTEST 40 40 41 #define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_FLOATING_POINT 42 41 43 #define CONFIGURE_TEST_NEEDS_CONSOLE_DRIVER 42 44 #define CONFIGURE_TEST_NEEDS_CLOCK_DRIVER -
c/src/tests/sptests/sp20/init.c
rea74482 r9700578 45 45 Task_name[ index ], 46 46 Priorities[ index ], 47 RTEMS_MINIMUM_STACK_SIZE ,47 RTEMS_MINIMUM_STACK_SIZE * 4, 48 48 RTEMS_DEFAULT_MODES, 49 49 RTEMS_DEFAULT_ATTRIBUTES, -
c/src/tests/sptests/sp24/init.c
rea74482 r9700578 53 53 Task_name[ index ], 54 54 1, 55 RTEMS_MINIMUM_STACK_SIZE ,55 RTEMS_MINIMUM_STACK_SIZE * 2, 56 56 RTEMS_DEFAULT_MODES, 57 57 RTEMS_DEFAULT_ATTRIBUTES, -
c/src/tests/sptests/spsize/init.c
rea74482 r9700578 37 37 setvbuf(stdout, 0, _IONBF, 0); 38 38 39 puts( "\n*** RTEMS SIZE PROGRAM ***" ); 40 size_rtems( 1 ); 41 puts( "*** END OF RTEMS SIZE PROGRAM ***" ); 42 exit( 0 ); 43 #if 0 39 44 do { 40 puts( "\n*** RTEMS SIZE PROGRAM ***" );41 45 printf( "\n\nPlease select program mode:\n" ); 42 46 printf( " 1) Print Formulas\n" ); … … 53 57 } 54 58 } while ( FOREVER ); 59 #endif 55 60 } -
c/src/tests/sptests/spsize/size.c
rea74482 r9700578 391 391 #endif 392 392 393 #ifdef sparc 394 395 /* cpu.h */ 396 uninitialized += (sizeof _CPU_Interrupt_stack_low) + 397 (sizeof _CPU_Interrupt_stack_high) + 398 (sizeof _CPU_Null_fp_context) + 399 (sizeof _CPU_Trap_Table_area); 400 401 #ifdef erc32 402 uninitialized += (sizeof _ERC32_MEC_Timer_Control_Mirror); 403 #endif 404 405 406 #endif 407 408 393 409 #ifdef no_cpu 394 410 … … 422 438 (sizeof _TOD_Days_since_last_leap_year); 423 439 440 #ifdef sparc 441 442 initialized += (sizeof _CPU_Trap_slot_template); 443 444 #endif 445 424 446 puts( "" ); 425 #ifdef i960CA 426 print_formula(); 427 #else 447 428 448 if ( mode == 0 ) help_size(); 429 449 else print_formula(); 430 #endif431 450 432 451 printf( "\n" ); -
c/src/tests/tmtests/tm08/task1.c
rea74482 r9700578 199 199 200 200 put_time( 201 "rtems_task_ set_note",201 "rtems_task_get_note", 202 202 end_time, 203 203 OPERATION_COUNT, -
c/src/tests/tmtests/tm27/task1.c
rea74482 r9700578 10 10 * 11 11 * $Id$ 12 */ 13 14 /* 15 * WARNING!!!!!!!!! 16 * 17 * THIS TEST USES INTERNAL RTEMS VARIABLES!!! 12 18 */ 13 19 … … 33 39 rtems_vector_number vector 34 40 ); 35 36 /*37 * INTERNAL RTEMS VARIABLES!!!38 */39 40 extern rtems_unsigned32 _Thread_Dispatch_disable_level;41 extern rtems_unsigned32 _Context_Switch_necessary;42 extern Chain_Control *_Thread_Ready_chain;43 extern rtems_tcb *_Thread_Heir;44 41 45 42 rtems_task Init( … … 170 167 Timer_initialize(); 171 168 Cause_tm27_intr(); 172 /* goes to Isr_handler */ 173 } 169 170 /* 171 * goes to Isr_handler and then returns 172 */ 173 174 puts( "*** END OF TEST 27 ***" ); 175 exit( 0 ); 176 } 177 178 /* 179 * NOTE: When this task is executing, some of the assumptions made 180 * regarding the placement of the currently executing task's TCB 181 * on the ready chains have been violated. At least the assumption 182 * that this task is at the head of the chain for its priority 183 * has been violated. 184 */ 174 185 175 186 rtems_task Task_2( … … 198 209 ); 199 210 200 puts( "*** END OF TEST 27 ***" ); 201 exit( 0 ); 211 fflush( stdout ); 212 213 /* 214 * Switch back to the other task to exit the test. 215 */ 216 217 _Thread_Dispatch_disable_level = 0; 218 219 _Thread_Heir = (rtems_tcb *) _Thread_Ready_chain[254].first; 220 221 _Context_Switch_necessary = 1; 222 223 _Thread_Dispatch(); 224 202 225 } 203 226 -
cpukit/libcsupport/include/ringbuf.h
rea74482 r9700578 11 11 12 12 #ifndef RINGBUF_QUEUE_LENGTH 13 #define RINGBUF_QUEUE_LENGTH 20013 #define RINGBUF_QUEUE_LENGTH 128 14 14 #endif 15 15 16 16 typedef struct { 17 17 char buffer[RINGBUF_QUEUE_LENGTH]; 18 int head;19 int tail;18 volatile int head; 19 volatile int tail; 20 20 } Ring_buffer_t; 21 21 … … 28 28 ( (_buffer)->head == (_buffer)->tail ) 29 29 30 #define Ring_buffer_Is_full( _buffer ) \ 31 ( (_buffer)->head == ((_buffer)->tail + 1) % RINGBUF_QUEUE_LENGTH ) 32 30 33