source: rtems/cpukit/score/cpu/sparc/rtems/score/cpu.h @ b73e57b

4.104.114.84.95
Last change on this file since b73e57b was b73e57b, checked in by Joel Sherrill <joel.sherrill@…>, on Jul 9, 1999 at 5:08:48 PM

Patch from Jiri Gaisler <jgais@…>:

+ interrupt masking correction
+ FPU rev.B workaround
+ minor erc32 related fixes

  • Property mode set to 100644
File size: 28.9 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989-1998.
7 *  On-Line Applications Research Corporation (OAR).
8 *  Copyright assigned to U.S. Government, 1994.
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.OARcorp.com/rtems/license.html.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#ifndef __CPU_h
25#define __CPU_h
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31#include <rtems/score/sparc.h>               /* pick up machine definitions */
32#ifndef ASM
33#include <rtems/score/sparctypes.h>
34#endif
35
36/* conditional compilation parameters */
37
38/*
39 *  Should the calls to _Thread_Enable_dispatch be inlined?
40 *
41 *  If TRUE, then they are inlined.
42 *  If FALSE, then a subroutine call is made.
43 */
44
45#define CPU_INLINE_ENABLE_DISPATCH       TRUE
46
47/*
48 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
49 *  be unrolled one time?  In unrolled each iteration of the loop examines
50 *  two "nodes" on the chain being searched.  Otherwise, only one node
51 *  is examined per iteration.
52 *
53 *  If TRUE, then the loops are unrolled.
54 *  If FALSE, then the loops are not unrolled.
55 *
56 *  This parameter could go either way on the SPARC.  The interrupt flash
57 *  code is relatively lengthy given the requirements for nops following
58 *  writes to the psr.  But if the clock speed were high enough, this would
59 *  not represent a great deal of time.
60 */
61
62#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
63
64/*
65 *  Does the executive manage a dedicated interrupt stack in software?
66 *
67 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
68 *  If FALSE, nothing is done.
69 *
70 *  The SPARC does not have a dedicated HW interrupt stack and one has
71 *  been implemented in SW.
72 */
73
74#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
75
76/*
77 *  Does this CPU have hardware support for a dedicated interrupt stack?
78 *
79 *  If TRUE, then it must be installed during initialization.
80 *  If FALSE, then no installation is performed.
81 *
82 *  The SPARC does not have a dedicated HW interrupt stack.
83 */
84
85#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
86
87/*
88 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
89 *
90 *  If TRUE, then the memory is allocated during initialization.
91 *  If FALSE, then the memory is allocated during initialization.
92 */
93
94#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
95
96/*
97 *  Does the RTEMS invoke the user's ISR with the vector number and
98 *  a pointer to the saved interrupt frame (1) or just the vector
99 *  number (0)?
100 */
101
102#define CPU_ISR_PASSES_FRAME_POINTER 0
103
104/*
105 *  Does the CPU have hardware floating point?
106 *
107 *  If TRUE, then the FLOATING_POINT task attribute is supported.
108 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
109 */
110
111#if ( SPARC_HAS_FPU == 1 )
112#define CPU_HARDWARE_FP     TRUE
113#else
114#define CPU_HARDWARE_FP     FALSE
115#endif
116
117/*
118 *  Are all tasks FLOATING_POINT tasks implicitly?
119 *
120 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
121 *  If FALSE, then the FLOATING_POINT task attribute is followed.
122 */
123
124#define CPU_ALL_TASKS_ARE_FP     FALSE
125
126/*
127 *  Should the IDLE task have a floating point context?
128 *
129 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
130 *  and it has a floating point context which is switched in and out.
131 *  If FALSE, then the IDLE task does not have a floating point context.
132 */
133
134#define CPU_IDLE_TASK_IS_FP      FALSE
135
136/*
137 *  Should the saving of the floating point registers be deferred
138 *  until a context switch is made to another different floating point
139 *  task?
140 *
141 *  If TRUE, then the floating point context will not be stored until
142 *  necessary.  It will remain in the floating point registers and not
143 *  disturned until another floating point task is switched to.
144 *
145 *  If FALSE, then the floating point context is saved when a floating
146 *  point task is switched out and restored when the next floating point
147 *  task is restored.  The state of the floating point registers between
148 *  those two operations is not specified.
149 */
150
151#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
152
153/*
154 *  Does this port provide a CPU dependent IDLE task implementation?
155 *
156 *  If TRUE, then the routine _CPU_Thread_Idle_body
157 *  must be provided and is the default IDLE thread body instead of
158 *  _CPU_Thread_Idle_body.
159 *
160 *  If FALSE, then use the generic IDLE thread body if the BSP does
161 *  not provide one.
162 */
163
164#if (SPARC_HAS_LOW_POWER_MODE == 1)
165#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
166#else
167#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
168#endif
169
170/*
171 *  Does the stack grow up (toward higher addresses) or down
172 *  (toward lower addresses)?
173 *
174 *  If TRUE, then the grows upward.
175 *  If FALSE, then the grows toward smaller addresses.
176 *
177 *  The stack grows to lower addresses on the SPARC.
178 */
179
180#define CPU_STACK_GROWS_UP               FALSE
181
182/*
183 *  The following is the variable attribute used to force alignment
184 *  of critical data structures.  On some processors it may make
185 *  sense to have these aligned on tighter boundaries than
186 *  the minimum requirements of the compiler in order to have as
187 *  much of the critical data area as possible in a cache line.
188 *
189 *  The SPARC does not appear to have particularly strict alignment
190 *  requirements.  This value was chosen to take advantages of caches.
191 */
192
193#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
194
195/*
196 *  Define what is required to specify how the network to host conversion
197 *  routines are handled.
198 */
199
200#define CPU_CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
201#define CPU_BIG_ENDIAN                           TRUE
202#define CPU_LITTLE_ENDIAN                        FALSE
203
204/*
205 *  The following defines the number of bits actually used in the
206 *  interrupt field of the task mode.  How those bits map to the
207 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
208 *
209 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
210 */
211
212#define CPU_MODES_INTERRUPT_MASK   0x0000000F
213
214/*
215 *  This structure represents the organization of the minimum stack frame
216 *  for the SPARC.  More framing information is required in certain situaions
217 *  such as when there are a large number of out parameters or when the callee
218 *  must save floating point registers.
219 */
220
221#ifndef ASM
222
223typedef struct {
224  unsigned32  l0;
225  unsigned32  l1;
226  unsigned32  l2;
227  unsigned32  l3;
228  unsigned32  l4;
229  unsigned32  l5;
230  unsigned32  l6;
231  unsigned32  l7;
232  unsigned32  i0;
233  unsigned32  i1;
234  unsigned32  i2;
235  unsigned32  i3;
236  unsigned32  i4;
237  unsigned32  i5;
238  unsigned32  i6_fp;
239  unsigned32  i7;
240  void       *structure_return_address;
241  /*
242   *  The following are for the callee to save the register arguments in
243   *  should this be necessary.
244   */
245  unsigned32  saved_arg0;
246  unsigned32  saved_arg1;
247  unsigned32  saved_arg2;
248  unsigned32  saved_arg3;
249  unsigned32  saved_arg4;
250  unsigned32  saved_arg5;
251  unsigned32  pad0;
252}  CPU_Minimum_stack_frame;
253
254#endif /* ASM */
255
256#define CPU_STACK_FRAME_L0_OFFSET             0x00
257#define CPU_STACK_FRAME_L1_OFFSET             0x04
258#define CPU_STACK_FRAME_L2_OFFSET             0x08
259#define CPU_STACK_FRAME_L3_OFFSET             0x0c
260#define CPU_STACK_FRAME_L4_OFFSET             0x10
261#define CPU_STACK_FRAME_L5_OFFSET             0x14
262#define CPU_STACK_FRAME_L6_OFFSET             0x18
263#define CPU_STACK_FRAME_L7_OFFSET             0x1c
264#define CPU_STACK_FRAME_I0_OFFSET             0x20
265#define CPU_STACK_FRAME_I1_OFFSET             0x24
266#define CPU_STACK_FRAME_I2_OFFSET             0x28
267#define CPU_STACK_FRAME_I3_OFFSET             0x2c
268#define CPU_STACK_FRAME_I4_OFFSET             0x30
269#define CPU_STACK_FRAME_I5_OFFSET             0x34
270#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
271#define CPU_STACK_FRAME_I7_OFFSET             0x3c
272#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
273#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
274#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
275#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
276#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
277#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
278#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
279#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
280
281#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
282
283/*
284 * Contexts
285 *
286 *  Generally there are 2 types of context to save.
287 *     1. Interrupt registers to save
288 *     2. Task level registers to save
289 *
290 *  This means we have the following 3 context items:
291 *     1. task level context stuff::  Context_Control
292 *     2. floating point task stuff:: Context_Control_fp
293 *     3. special interrupt level context :: Context_Control_interrupt
294 *
295 *  On the SPARC, we are relatively conservative in that we save most
296 *  of the CPU state in the context area.  The ET (enable trap) bit and
297 *  the CWP (current window pointer) fields of the PSR are considered
298 *  system wide resources and are not maintained on a per-thread basis.
299 */
300
301#ifndef ASM
302
303typedef struct {
304    /*
305     *  Using a double g0_g1 will put everything in this structure on a
306     *  double word boundary which allows us to use double word loads
307     *  and stores safely in the context switch.
308     */
309    double     g0_g1;
310    unsigned32 g2;
311    unsigned32 g3;
312    unsigned32 g4;
313    unsigned32 g5;
314    unsigned32 g6;
315    unsigned32 g7;
316
317    unsigned32 l0;
318    unsigned32 l1;
319    unsigned32 l2;
320    unsigned32 l3;
321    unsigned32 l4;
322    unsigned32 l5;
323    unsigned32 l6;
324    unsigned32 l7;
325
326    unsigned32 i0;
327    unsigned32 i1;
328    unsigned32 i2;
329    unsigned32 i3;
330    unsigned32 i4;
331    unsigned32 i5;
332    unsigned32 i6_fp;
333    unsigned32 i7;
334
335    unsigned32 o0;
336    unsigned32 o1;
337    unsigned32 o2;
338    unsigned32 o3;
339    unsigned32 o4;
340    unsigned32 o5;
341    unsigned32 o6_sp;
342    unsigned32 o7;
343
344    unsigned32 psr;
345} Context_Control;
346
347#endif /* ASM */
348
349/*
350 *  Offsets of fields with Context_Control for assembly routines.
351 */
352
353#define G0_OFFSET    0x00
354#define G1_OFFSET    0x04
355#define G2_OFFSET    0x08
356#define G3_OFFSET    0x0C
357#define G4_OFFSET    0x10
358#define G5_OFFSET    0x14
359#define G6_OFFSET    0x18
360#define G7_OFFSET    0x1C
361
362#define L0_OFFSET    0x20
363#define L1_OFFSET    0x24
364#define L2_OFFSET    0x28
365#define L3_OFFSET    0x2C
366#define L4_OFFSET    0x30
367#define L5_OFFSET    0x34
368#define L6_OFFSET    0x38
369#define L7_OFFSET    0x3C
370
371#define I0_OFFSET    0x40
372#define I1_OFFSET    0x44
373#define I2_OFFSET    0x48
374#define I3_OFFSET    0x4C
375#define I4_OFFSET    0x50
376#define I5_OFFSET    0x54
377#define I6_FP_OFFSET 0x58
378#define I7_OFFSET    0x5C
379
380#define O0_OFFSET    0x60
381#define O1_OFFSET    0x64
382#define O2_OFFSET    0x68
383#define O3_OFFSET    0x6C
384#define O4_OFFSET    0x70
385#define O5_OFFSET    0x74
386#define O6_SP_OFFSET 0x78
387#define O7_OFFSET    0x7C
388
389#define PSR_OFFSET   0x80
390
391#define CONTEXT_CONTROL_SIZE 0x84
392
393/*
394 *  The floating point context area.
395 */
396
397#ifndef ASM
398
399typedef struct {
400    double      f0_f1;
401    double      f2_f3;
402    double      f4_f5;
403    double      f6_f7;
404    double      f8_f9;
405    double      f10_f11;
406    double      f12_f13;
407    double      f14_f15;
408    double      f16_f17;
409    double      f18_f19;
410    double      f20_f21;
411    double      f22_f23;
412    double      f24_f25;
413    double      f26_f27;
414    double      f28_f29;
415    double      f30_f31;
416    unsigned32  fsr;
417} Context_Control_fp;
418
419#endif /* ASM */
420
421/*
422 *  Offsets of fields with Context_Control_fp for assembly routines.
423 */
424
425#define FO_F1_OFFSET     0x00
426#define F2_F3_OFFSET     0x08
427#define F4_F5_OFFSET     0x10
428#define F6_F7_OFFSET     0x18
429#define F8_F9_OFFSET     0x20
430#define F1O_F11_OFFSET   0x28
431#define F12_F13_OFFSET   0x30
432#define F14_F15_OFFSET   0x38
433#define F16_F17_OFFSET   0x40
434#define F18_F19_OFFSET   0x48
435#define F2O_F21_OFFSET   0x50
436#define F22_F23_OFFSET   0x58
437#define F24_F25_OFFSET   0x60
438#define F26_F27_OFFSET   0x68
439#define F28_F29_OFFSET   0x70
440#define F3O_F31_OFFSET   0x78
441#define FSR_OFFSET       0x80
442
443#define CONTEXT_CONTROL_FP_SIZE 0x84
444
445#ifndef ASM
446
447/*
448 *  Context saved on stack for an interrupt.
449 *
450 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
451 *         benefit of the user's handler.
452 */
453
454typedef struct {
455  CPU_Minimum_stack_frame  Stack_frame;
456  unsigned32               psr;
457  unsigned32               pc;
458  unsigned32               npc;
459  unsigned32               g1;
460  unsigned32               g2;
461  unsigned32               g3;
462  unsigned32               g4;
463  unsigned32               g5;
464  unsigned32               g6;
465  unsigned32               g7;
466  unsigned32               i0;
467  unsigned32               i1;
468  unsigned32               i2;
469  unsigned32               i3;
470  unsigned32               i4;
471  unsigned32               i5;
472  unsigned32               i6_fp;
473  unsigned32               i7;
474  unsigned32               y;
475  unsigned32               tpc;
476} CPU_Interrupt_frame;
477
478#endif /* ASM */
479
480/*
481 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
482 */
483
484#define ISF_STACK_FRAME_OFFSET 0x00
485#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
486#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
487#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
488#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
489#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
490#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
491#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
492#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
493#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
494#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
495#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
496#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
497#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
498#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
499#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
500#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
501#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
502#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
503#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
504#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
505
506#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
507#ifndef ASM
508
509/*
510 *  The following table contains the information required to configure
511 *  the processor specific parameters.
512 */
513
514typedef struct {
515  void       (*pretasking_hook)( void );
516  void       (*predriver_hook)( void );
517  void       (*postdriver_hook)( void );
518  void       (*idle_task)( void );
519  boolean      do_zero_of_workspace;
520  unsigned32   idle_task_stack_size;
521  unsigned32   interrupt_stack_size;
522  unsigned32   extra_mpci_receive_server_stack;
523  void *     (*stack_allocate_hook)( unsigned32 );
524  void       (*stack_free_hook)( void* );
525  /* end of fields required on all CPUs */
526
527}   rtems_cpu_table;
528
529/*
530 *  This variable is contains the initialize context for the FP unit.
531 *  It is filled in by _CPU_Initialize and copied into the task's FP
532 *  context area during _CPU_Context_Initialize.
533 */
534
535SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
536
537/*
538 *  This stack is allocated by the Interrupt Manager and the switch
539 *  is performed in _ISR_Handler.  These variables contain pointers
540 *  to the lowest and highest addresses in the chunk of memory allocated
541 *  for the interrupt stack.  Since it is unknown whether the stack
542 *  grows up or down (in general), this give the CPU dependent
543 *  code the option of picking the version it wants to use.  Thus
544 *  both must be present if either is.
545 *
546 *  The SPARC supports a software based interrupt stack and these
547 *  are required.
548 */
549
550SCORE_EXTERN void *_CPU_Interrupt_stack_low;
551SCORE_EXTERN void *_CPU_Interrupt_stack_high;
552
553#if defined(erc32)
554
555/*
556 *  ERC32 Specific Variables
557 */
558
559SCORE_EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror;
560
561#endif
562
563/*
564 *  The following type defines an entry in the SPARC's trap table.
565 *
566 *  NOTE: The instructions chosen are RTEMS dependent although one is
567 *        obligated to use two of the four instructions to perform a
568 *        long jump.  The other instructions load one register with the
569 *        trap type (a.k.a. vector) and another with the psr.
570 */
571 
572typedef struct {
573  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
574  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
575  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
576  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
577} CPU_Trap_table_entry;
578 
579/*
580 *  This is the set of opcodes for the instructions loaded into a trap
581 *  table entry.  The routine which installs a handler is responsible
582 *  for filling in the fields for the _handler address and the _vector
583 *  trap type.
584 *
585 *  The constants following this structure are masks for the fields which
586 *  must be filled in when the handler is installed.
587 */
588 
589extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
590
591/*
592 *  This is the executive's trap table which is installed into the TBR
593 *  register.
594 *
595 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
596 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
597 *         align an entity to anything greater than a 512 byte boundary.
598 *
599 *         Because of this, we pull a little bit of a trick.  We allocate
600 *         enough memory so we can grab an address on a 4096 byte boundary
601 *         from this area.
602 */
603 
604#define SPARC_TRAP_TABLE_ALIGNMENT 4096
605 
606#ifndef NO_TABLE_MOVE
607
608SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
609           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
610#endif
611 
612
613/*
614 *  The size of the floating point context area. 
615 */
616
617#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
618
619#endif
620
621/*
622 *  Amount of extra stack (above minimum stack size) required by
623 *  MPCI receive server thread.  Remember that in a multiprocessor
624 *  system this thread must exist and be able to process all directives.
625 */
626
627#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
628
629/*
630 *  This defines the number of entries in the ISR_Vector_table managed
631 *  by the executive.
632 *
633 *  On the SPARC, there are really only 256 vectors.  However, the executive
634 *  has no easy, fast, reliable way to determine which traps are synchronous
635 *  and which are asynchronous.  By default, synchronous traps return to the
636 *  instruction which caused the interrupt.  So if you install a software
637 *  trap handler as an executive interrupt handler (which is desirable since
638 *  RTEMS takes care of window and register issues), then the executive needs
639 *  to know that the return address is to the trap rather than the instruction
640 *  following the trap.
641 *
642 *  So vectors 0 through 255 are treated as regular asynchronous traps which
643 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
644 *  by the executive to be synchronous and to require that the return address
645 *  be fudged.
646 *
647 *  If you use this mechanism to install a trap handler which must reexecute
648 *  the instruction which caused the trap, then it should be installed as
649 *  an asynchronous trap.  This will avoid the executive changing the return
650 *  address.
651 */
652
653#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
654#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
655
656#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
657#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
658#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
659
660#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
661
662/*
663 *  Should be large enough to run all tests.  This insures
664 *  that a "reasonable" small application should not have any problems.
665 *
666 *  This appears to be a fairly generous number for the SPARC since
667 *  represents a call depth of about 20 routines based on the minimum
668 *  stack frame.
669 */
670
671#define CPU_STACK_MINIMUM_SIZE  (1024*2 + 512)
672
673/*
674 *  CPU's worst alignment requirement for data types on a byte boundary.  This
675 *  alignment does not take into account the requirements for the stack.
676 *
677 *  On the SPARC, this is required for double word loads and stores.
678 */
679
680#define CPU_ALIGNMENT      8
681
682/*
683 *  This number corresponds to the byte alignment requirement for the
684 *  heap handler.  This alignment requirement may be stricter than that
685 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
686 *  common for the heap to follow the same alignment requirement as
687 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
688 *  then this should be set to CPU_ALIGNMENT.
689 *
690 *  NOTE:  This does not have to be a power of 2.  It does have to
691 *         be greater or equal to than CPU_ALIGNMENT.
692 */
693
694#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
695
696/*
697 *  This number corresponds to the byte alignment requirement for memory
698 *  buffers allocated by the partition manager.  This alignment requirement
699 *  may be stricter than that for the data types alignment specified by
700 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
701 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
702 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
703 *
704 *  NOTE:  This does not have to be a power of 2.  It does have to
705 *         be greater or equal to than CPU_ALIGNMENT.
706 */
707
708#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
709
710/*
711 *  This number corresponds to the byte alignment requirement for the
712 *  stack.  This alignment requirement may be stricter than that for the
713 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
714 *  is strict enough for the stack, then this should be set to 0.
715 *
716 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
717 *
718 *  The alignment restrictions for the SPARC are not that strict but this
719 *  should unsure that the stack is always sufficiently alignment that the
720 *  window overflow, underflow, and flush routines can use double word loads
721 *  and stores.
722 */
723
724#define CPU_STACK_ALIGNMENT        16
725
726#ifndef ASM
727
728extern unsigned int sparc_disable_interrupts();
729extern void sparc_enable_interrupts();
730
731/* ISR handler macros */
732
733/*
734 *  Disable all interrupts for a critical section.  The previous
735 *  level is returned in _level.
736 */
737
738#define _CPU_ISR_Disable( _level ) \
739  (_level) = sparc_disable_interrupts()
740 
741/*
742 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
743 *  This indicates the end of a critical section.  The parameter
744 *  _level is not modified.
745 */
746
747#define _CPU_ISR_Enable( _level ) \
748  sparc_enable_interrupts( _level )
749/*
750 *  This temporarily restores the interrupt to _level before immediately
751 *  disabling them again.  This is used to divide long critical
752 *  sections into two or more parts.  The parameter _level is not
753 *  modified.
754 */
755
756#define _CPU_ISR_Flash( _level ) \
757  sparc_flash_interrupts( _level )
758 
759/*
760 *  Map interrupt level in task mode onto the hardware that the CPU
761 *  actually provides.  Currently, interrupt levels which do not
762 *  map onto the CPU in a straight fashion are undefined. 
763 */
764
765#define _CPU_ISR_Set_level( _newlevel ) \
766   sparc_enable_interrupts( _newlevel << 8)
767 
768unsigned32 _CPU_ISR_Get_level( void );
769 
770/* end of ISR handler macros */
771
772/* Context handler macros */
773
774/*
775 *  Initialize the context to a state suitable for starting a
776 *  task after a context restore operation.  Generally, this
777 *  involves:
778 *
779 *     - setting a starting address
780 *     - preparing the stack
781 *     - preparing the stack and frame pointers
782 *     - setting the proper interrupt level in the context
783 *     - initializing the floating point context
784 *
785 *  NOTE:  Implemented as a subroutine for the SPARC port.
786 */
787
788void _CPU_Context_Initialize(
789  Context_Control  *the_context,
790  unsigned32       *stack_base,
791  unsigned32        size,
792  unsigned32        new_level,
793  void             *entry_point,
794  boolean           is_fp
795);
796
797/*
798 *  This routine is responsible for somehow restarting the currently
799 *  executing task. 
800 *
801 *  On the SPARC, this is is relatively painless but requires a small
802 *  amount of wrapper code before using the regular restore code in
803 *  of the context switch.
804 */
805
806#define _CPU_Context_Restart_self( _the_context ) \
807   _CPU_Context_restore( (_the_context) );
808
809/*
810 *  The FP context area for the SPARC is a simple structure and nothing
811 *  special is required to find the "starting load point"
812 */
813
814#define _CPU_Context_Fp_start( _base, _offset ) \
815   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
816
817/*
818 *  This routine initializes the FP context area passed to it to.
819 *
820 *  The SPARC allows us to use the simple initialization model
821 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
822 *  at CPU initialization and it is simply copied into the destination
823 *  context.
824 */
825
826#define _CPU_Context_Initialize_fp( _destination ) \
827  do { \
828   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
829  } while (0)
830
831/* end of Context handler macros */
832
833/* Fatal Error manager macros */
834
835/*
836 *  This routine copies _error into a known place -- typically a stack
837 *  location or a register, optionally disables interrupts, and
838 *  halts/stops the CPU.
839 */
840
841#define _CPU_Fatal_halt( _error ) \
842  do { \
843    unsigned32 level; \
844    \
845    level = sparc_disable_interrupts(); \
846    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
847    while (1); /* loop forever */ \
848  } while (0)
849
850/* end of Fatal Error manager macros */
851
852/* Bitfield handler macros */
853
854/*
855 *  The SPARC port uses the generic C algorithm for bitfield scan if the
856 *  CPU model does not have a scan instruction.
857 */
858
859#if ( SPARC_HAS_BITSCAN == 0 )
860#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
861#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
862#else
863#error "scan instruction not currently supported by RTEMS!!"
864#endif
865
866/* end of Bitfield handler macros */
867
868/* Priority handler handler macros */
869
870/*
871 *  The SPARC port uses the generic C algorithm for bitfield scan if the
872 *  CPU model does not have a scan instruction.
873 */
874
875#if ( SPARC_HAS_BITSCAN == 1 )
876#error "scan instruction not currently supported by RTEMS!!"
877#endif
878
879/* end of Priority handler macros */
880
881/* functions */
882
883/*
884 *  _CPU_Initialize
885 *
886 *  This routine performs CPU dependent initialization.
887 */
888
889void _CPU_Initialize(
890  rtems_cpu_table  *cpu_table,
891  void            (*thread_dispatch)
892);
893
894/*
895 *  _CPU_ISR_install_raw_handler
896 *
897 *  This routine installs new_handler to be directly called from the trap
898 *  table.
899 */
900 
901void _CPU_ISR_install_raw_handler(
902  unsigned32  vector,
903  proc_ptr    new_handler,
904  proc_ptr   *old_handler
905);
906
907/*
908 *  _CPU_ISR_install_vector
909 *
910 *  This routine installs an interrupt vector.
911 */
912
913void _CPU_ISR_install_vector(
914  unsigned32  vector,
915  proc_ptr    new_handler,
916  proc_ptr   *old_handler
917);
918
919#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
920 
921/*
922 *  _CPU_Thread_Idle_body
923 *
924 *  Some SPARC implementations have low power, sleep, or idle modes.  This
925 *  tries to take advantage of those models.
926 */
927 
928void _CPU_Thread_Idle_body( void );
929 
930#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
931
932/*
933 *  _CPU_Context_switch
934 *
935 *  This routine switches from the run context to the heir context.
936 */
937
938void _CPU_Context_switch(
939  Context_Control  *run,
940  Context_Control  *heir
941);
942
943/*
944 *  _CPU_Context_restore
945 *
946 *  This routine is generally used only to restart self in an
947 *  efficient manner.
948 */
949
950void _CPU_Context_restore(
951  Context_Control *new_context
952);
953
954/*
955 *  _CPU_Context_save_fp
956 *
957 *  This routine saves the floating point context passed to it.
958 */
959
960void _CPU_Context_save_fp(
961  void **fp_context_ptr
962);
963
964/*
965 *  _CPU_Context_restore_fp
966 *
967 *  This routine restores the floating point context passed to it.
968 */
969
970void _CPU_Context_restore_fp(
971  void **fp_context_ptr
972);
973
974/*
975 *  CPU_swap_u32
976 *
977 *  The following routine swaps the endian format of an unsigned int.
978 *  It must be static because it is referenced indirectly.
979 *
980 *  This version will work on any processor, but if you come across a better
981 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
982 *  entity as shown below is not any more efficient on the SPARC.
983 *
984 *     swap least significant two bytes with 16-bit rotate
985 *     swap upper and lower 16-bits
986 *     swap most significant two bytes with 16-bit rotate
987 *
988 *  It is not obvious how the SPARC can do significantly better than the
989 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
990 *  following code at optimization level four (i.e. -O4).
991 */
992 
993static inline unsigned int CPU_swap_u32(
994  unsigned int value
995)
996{
997  unsigned32 byte1, byte2, byte3, byte4, swapped;
998 
999  byte4 = (value >> 24) & 0xff;
1000  byte3 = (value >> 16) & 0xff;
1001  byte2 = (value >> 8)  & 0xff;
1002  byte1 =  value        & 0xff;
1003 
1004  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1005  return( swapped );
1006}
1007
1008#define CPU_swap_u16( value ) \
1009  (((value&0xff) << 8) | ((value >> 8)&0xff))
1010
1011#endif ASM
1012
1013#ifdef __cplusplus
1014}
1015#endif
1016
1017#endif
Note: See TracBrowser for help on using the repository browser.