source: rtems/c/src/exec/score/cpu/sparc/rtems/score/cpu.h @ 4159370

4.104.114.84.95
Last change on this file since 4159370 was 4159370, checked in by Joel Sherrill <joel.sherrill@…>, on Jul 11, 2000 at 9:16:53 PM

Reworked score/cpu/sparc so it can be safely compiled multilib. All
routines and structures that require CPU model specific information
are now in libcpu. This primarily required moving erc32 specific
information from score/cpu files to libcpu/sparc and the erc32 BSP.

  • Property mode set to 100644
File size: 28.6 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.OARcorp.com/rtems/license.html.
12 *
13 *  $Id$
14 */
15
16#ifndef __CPU_h
17#define __CPU_h
18
19#ifdef __cplusplus
20extern "C" {
21#endif
22
23#include <rtems/score/sparc.h>               /* pick up machine definitions */
24#ifndef ASM
25#include <rtems/score/sparctypes.h>
26#endif
27
28/* conditional compilation parameters */
29
30/*
31 *  Should the calls to _Thread_Enable_dispatch be inlined?
32 *
33 *  If TRUE, then they are inlined.
34 *  If FALSE, then a subroutine call is made.
35 */
36
37#define CPU_INLINE_ENABLE_DISPATCH       TRUE
38
39/*
40 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
41 *  be unrolled one time?  In unrolled each iteration of the loop examines
42 *  two "nodes" on the chain being searched.  Otherwise, only one node
43 *  is examined per iteration.
44 *
45 *  If TRUE, then the loops are unrolled.
46 *  If FALSE, then the loops are not unrolled.
47 *
48 *  This parameter could go either way on the SPARC.  The interrupt flash
49 *  code is relatively lengthy given the requirements for nops following
50 *  writes to the psr.  But if the clock speed were high enough, this would
51 *  not represent a great deal of time.
52 */
53
54#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
55
56/*
57 *  Does the executive manage a dedicated interrupt stack in software?
58 *
59 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
60 *  If FALSE, nothing is done.
61 *
62 *  The SPARC does not have a dedicated HW interrupt stack and one has
63 *  been implemented in SW.
64 */
65
66#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
67
68/*
69 *  Does this CPU have hardware support for a dedicated interrupt stack?
70 *
71 *  If TRUE, then it must be installed during initialization.
72 *  If FALSE, then no installation is performed.
73 *
74 *  The SPARC does not have a dedicated HW interrupt stack.
75 */
76
77#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
78
79/*
80 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
81 *
82 *  If TRUE, then the memory is allocated during initialization.
83 *  If FALSE, then the memory is allocated during initialization.
84 */
85
86#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
87
88/*
89 *  Does the RTEMS invoke the user's ISR with the vector number and
90 *  a pointer to the saved interrupt frame (1) or just the vector
91 *  number (0)?
92 */
93
94#define CPU_ISR_PASSES_FRAME_POINTER 0
95
96/*
97 *  Does the CPU have hardware floating point?
98 *
99 *  If TRUE, then the FLOATING_POINT task attribute is supported.
100 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
101 */
102
103#if ( SPARC_HAS_FPU == 1 )
104#define CPU_HARDWARE_FP     TRUE
105#else
106#define CPU_HARDWARE_FP     FALSE
107#endif
108
109/*
110 *  Are all tasks FLOATING_POINT tasks implicitly?
111 *
112 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
113 *  If FALSE, then the FLOATING_POINT task attribute is followed.
114 */
115
116#define CPU_ALL_TASKS_ARE_FP     FALSE
117
118/*
119 *  Should the IDLE task have a floating point context?
120 *
121 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
122 *  and it has a floating point context which is switched in and out.
123 *  If FALSE, then the IDLE task does not have a floating point context.
124 */
125
126#define CPU_IDLE_TASK_IS_FP      FALSE
127
128/*
129 *  Should the saving of the floating point registers be deferred
130 *  until a context switch is made to another different floating point
131 *  task?
132 *
133 *  If TRUE, then the floating point context will not be stored until
134 *  necessary.  It will remain in the floating point registers and not
135 *  disturned until another floating point task is switched to.
136 *
137 *  If FALSE, then the floating point context is saved when a floating
138 *  point task is switched out and restored when the next floating point
139 *  task is restored.  The state of the floating point registers between
140 *  those two operations is not specified.
141 */
142
143#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
144
145/*
146 *  Does this port provide a CPU dependent IDLE task implementation?
147 *
148 *  If TRUE, then the routine _CPU_Thread_Idle_body
149 *  must be provided and is the default IDLE thread body instead of
150 *  _CPU_Thread_Idle_body.
151 *
152 *  If FALSE, then use the generic IDLE thread body if the BSP does
153 *  not provide one.
154 */
155
156#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
157
158/*
159 *  Does the stack grow up (toward higher addresses) or down
160 *  (toward lower addresses)?
161 *
162 *  If TRUE, then the grows upward.
163 *  If FALSE, then the grows toward smaller addresses.
164 *
165 *  The stack grows to lower addresses on the SPARC.
166 */
167
168#define CPU_STACK_GROWS_UP               FALSE
169
170/*
171 *  The following is the variable attribute used to force alignment
172 *  of critical data structures.  On some processors it may make
173 *  sense to have these aligned on tighter boundaries than
174 *  the minimum requirements of the compiler in order to have as
175 *  much of the critical data area as possible in a cache line.
176 *
177 *  The SPARC does not appear to have particularly strict alignment
178 *  requirements.  This value was chosen to take advantages of caches.
179 */
180
181#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
182
183/*
184 *  Define what is required to specify how the network to host conversion
185 *  routines are handled.
186 */
187
188#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
189#define CPU_BIG_ENDIAN                           TRUE
190#define CPU_LITTLE_ENDIAN                        FALSE
191
192/*
193 *  The following defines the number of bits actually used in the
194 *  interrupt field of the task mode.  How those bits map to the
195 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
196 *
197 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
198 */
199
200#define CPU_MODES_INTERRUPT_MASK   0x0000000F
201
202/*
203 *  This structure represents the organization of the minimum stack frame
204 *  for the SPARC.  More framing information is required in certain situaions
205 *  such as when there are a large number of out parameters or when the callee
206 *  must save floating point registers.
207 */
208
209#ifndef ASM
210
211typedef struct {
212  unsigned32  l0;
213  unsigned32  l1;
214  unsigned32  l2;
215  unsigned32  l3;
216  unsigned32  l4;
217  unsigned32  l5;
218  unsigned32  l6;
219  unsigned32  l7;
220  unsigned32  i0;
221  unsigned32  i1;
222  unsigned32  i2;
223  unsigned32  i3;
224  unsigned32  i4;
225  unsigned32  i5;
226  unsigned32  i6_fp;
227  unsigned32  i7;
228  void       *structure_return_address;
229  /*
230   *  The following are for the callee to save the register arguments in
231   *  should this be necessary.
232   */
233  unsigned32  saved_arg0;
234  unsigned32  saved_arg1;
235  unsigned32  saved_arg2;
236  unsigned32  saved_arg3;
237  unsigned32  saved_arg4;
238  unsigned32  saved_arg5;
239  unsigned32  pad0;
240}  CPU_Minimum_stack_frame;
241
242#endif /* ASM */
243
244#define CPU_STACK_FRAME_L0_OFFSET             0x00
245#define CPU_STACK_FRAME_L1_OFFSET             0x04
246#define CPU_STACK_FRAME_L2_OFFSET             0x08
247#define CPU_STACK_FRAME_L3_OFFSET             0x0c
248#define CPU_STACK_FRAME_L4_OFFSET             0x10
249#define CPU_STACK_FRAME_L5_OFFSET             0x14
250#define CPU_STACK_FRAME_L6_OFFSET             0x18
251#define CPU_STACK_FRAME_L7_OFFSET             0x1c
252#define CPU_STACK_FRAME_I0_OFFSET             0x20
253#define CPU_STACK_FRAME_I1_OFFSET             0x24
254#define CPU_STACK_FRAME_I2_OFFSET             0x28
255#define CPU_STACK_FRAME_I3_OFFSET             0x2c
256#define CPU_STACK_FRAME_I4_OFFSET             0x30
257#define CPU_STACK_FRAME_I5_OFFSET             0x34
258#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
259#define CPU_STACK_FRAME_I7_OFFSET             0x3c
260#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
261#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
262#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
263#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
264#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
265#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
266#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
267#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
268
269#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
270
271/*
272 * Contexts
273 *
274 *  Generally there are 2 types of context to save.
275 *     1. Interrupt registers to save
276 *     2. Task level registers to save
277 *
278 *  This means we have the following 3 context items:
279 *     1. task level context stuff::  Context_Control
280 *     2. floating point task stuff:: Context_Control_fp
281 *     3. special interrupt level context :: Context_Control_interrupt
282 *
283 *  On the SPARC, we are relatively conservative in that we save most
284 *  of the CPU state in the context area.  The ET (enable trap) bit and
285 *  the CWP (current window pointer) fields of the PSR are considered
286 *  system wide resources and are not maintained on a per-thread basis.
287 */
288
289#ifndef ASM
290
291typedef struct {
292    /*
293     *  Using a double g0_g1 will put everything in this structure on a
294     *  double word boundary which allows us to use double word loads
295     *  and stores safely in the context switch.
296     */
297    double     g0_g1;
298    unsigned32 g2;
299    unsigned32 g3;
300    unsigned32 g4;
301    unsigned32 g5;
302    unsigned32 g6;
303    unsigned32 g7;
304
305    unsigned32 l0;
306    unsigned32 l1;
307    unsigned32 l2;
308    unsigned32 l3;
309    unsigned32 l4;
310    unsigned32 l5;
311    unsigned32 l6;
312    unsigned32 l7;
313
314    unsigned32 i0;
315    unsigned32 i1;
316    unsigned32 i2;
317    unsigned32 i3;
318    unsigned32 i4;
319    unsigned32 i5;
320    unsigned32 i6_fp;
321    unsigned32 i7;
322
323    unsigned32 o0;
324    unsigned32 o1;
325    unsigned32 o2;
326    unsigned32 o3;
327    unsigned32 o4;
328    unsigned32 o5;
329    unsigned32 o6_sp;
330    unsigned32 o7;
331
332    unsigned32 psr;
333} Context_Control;
334
335#endif /* ASM */
336
337/*
338 *  Offsets of fields with Context_Control for assembly routines.
339 */
340
341#define G0_OFFSET    0x00
342#define G1_OFFSET    0x04
343#define G2_OFFSET    0x08
344#define G3_OFFSET    0x0C
345#define G4_OFFSET    0x10
346#define G5_OFFSET    0x14
347#define G6_OFFSET    0x18
348#define G7_OFFSET    0x1C
349
350#define L0_OFFSET    0x20
351#define L1_OFFSET    0x24
352#define L2_OFFSET    0x28
353#define L3_OFFSET    0x2C
354#define L4_OFFSET    0x30
355#define L5_OFFSET    0x34
356#define L6_OFFSET    0x38
357#define L7_OFFSET    0x3C
358
359#define I0_OFFSET    0x40
360#define I1_OFFSET    0x44
361#define I2_OFFSET    0x48
362#define I3_OFFSET    0x4C
363#define I4_OFFSET    0x50
364#define I5_OFFSET    0x54
365#define I6_FP_OFFSET 0x58
366#define I7_OFFSET    0x5C
367
368#define O0_OFFSET    0x60
369#define O1_OFFSET    0x64
370#define O2_OFFSET    0x68
371#define O3_OFFSET    0x6C
372#define O4_OFFSET    0x70
373#define O5_OFFSET    0x74
374#define O6_SP_OFFSET 0x78
375#define O7_OFFSET    0x7C
376
377#define PSR_OFFSET   0x80
378
379#define CONTEXT_CONTROL_SIZE 0x84
380
381/*
382 *  The floating point context area.
383 */
384
385#ifndef ASM
386
387typedef struct {
388    double      f0_f1;
389    double      f2_f3;
390    double      f4_f5;
391    double      f6_f7;
392    double      f8_f9;
393    double      f10_f11;
394    double      f12_f13;
395    double      f14_f15;
396    double      f16_f17;
397    double      f18_f19;
398    double      f20_f21;
399    double      f22_f23;
400    double      f24_f25;
401    double      f26_f27;
402    double      f28_f29;
403    double      f30_f31;
404    unsigned32  fsr;
405} Context_Control_fp;
406
407#endif /* ASM */
408
409/*
410 *  Offsets of fields with Context_Control_fp for assembly routines.
411 */
412
413#define FO_F1_OFFSET     0x00
414#define F2_F3_OFFSET     0x08
415#define F4_F5_OFFSET     0x10
416#define F6_F7_OFFSET     0x18
417#define F8_F9_OFFSET     0x20
418#define F1O_F11_OFFSET   0x28
419#define F12_F13_OFFSET   0x30
420#define F14_F15_OFFSET   0x38
421#define F16_F17_OFFSET   0x40
422#define F18_F19_OFFSET   0x48
423#define F2O_F21_OFFSET   0x50
424#define F22_F23_OFFSET   0x58
425#define F24_F25_OFFSET   0x60
426#define F26_F27_OFFSET   0x68
427#define F28_F29_OFFSET   0x70
428#define F3O_F31_OFFSET   0x78
429#define FSR_OFFSET       0x80
430
431#define CONTEXT_CONTROL_FP_SIZE 0x84
432
433#ifndef ASM
434
435/*
436 *  Context saved on stack for an interrupt.
437 *
438 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
439 *         benefit of the user's handler.
440 */
441
442typedef struct {
443  CPU_Minimum_stack_frame  Stack_frame;
444  unsigned32               psr;
445  unsigned32               pc;
446  unsigned32               npc;
447  unsigned32               g1;
448  unsigned32               g2;
449  unsigned32               g3;
450  unsigned32               g4;
451  unsigned32               g5;
452  unsigned32               g6;
453  unsigned32               g7;
454  unsigned32               i0;
455  unsigned32               i1;
456  unsigned32               i2;
457  unsigned32               i3;
458  unsigned32               i4;
459  unsigned32               i5;
460  unsigned32               i6_fp;
461  unsigned32               i7;
462  unsigned32               y;
463  unsigned32               tpc;
464} CPU_Interrupt_frame;
465
466#endif /* ASM */
467
468/*
469 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
470 */
471
472#define ISF_STACK_FRAME_OFFSET 0x00
473#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
474#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
475#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
476#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
477#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
478#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
479#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
480#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
481#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
482#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
483#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
484#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
485#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
486#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
487#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
488#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
489#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
490#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
491#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
492#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
493
494#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
495#ifndef ASM
496
497/*
498 *  The following table contains the information required to configure
499 *  the processor specific parameters.
500 */
501
502typedef struct {
503  void       (*pretasking_hook)( void );
504  void       (*predriver_hook)( void );
505  void       (*postdriver_hook)( void );
506  void       (*idle_task)( void );
507  boolean      do_zero_of_workspace;
508  unsigned32   idle_task_stack_size;
509  unsigned32   interrupt_stack_size;
510  unsigned32   extra_mpci_receive_server_stack;
511  void *     (*stack_allocate_hook)( unsigned32 );
512  void       (*stack_free_hook)( void* );
513  /* end of fields required on all CPUs */
514
515}   rtems_cpu_table;
516
517/*
518 *  Macros to access required entires in the CPU Table are in
519 *  the file rtems/system.h.
520 */
521
522/*
523 *  Macros to access SPARC specific additions to the CPU Table
524 */
525
526/* There are no CPU specific additions to the CPU Table for this port. */
527
528/*
529 *  This variable is contains the initialize context for the FP unit.
530 *  It is filled in by _CPU_Initialize and copied into the task's FP
531 *  context area during _CPU_Context_Initialize.
532 */
533
534SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
535
536/*
537 *  This stack is allocated by the Interrupt Manager and the switch
538 *  is performed in _ISR_Handler.  These variables contain pointers
539 *  to the lowest and highest addresses in the chunk of memory allocated
540 *  for the interrupt stack.  Since it is unknown whether the stack
541 *  grows up or down (in general), this give the CPU dependent
542 *  code the option of picking the version it wants to use.  Thus
543 *  both must be present if either is.
544 *
545 *  The SPARC supports a software based interrupt stack and these
546 *  are required.
547 */
548
549SCORE_EXTERN void *_CPU_Interrupt_stack_low;
550SCORE_EXTERN void *_CPU_Interrupt_stack_high;
551
552/*
553 *  The following type defines an entry in the SPARC's trap table.
554 *
555 *  NOTE: The instructions chosen are RTEMS dependent although one is
556 *        obligated to use two of the four instructions to perform a
557 *        long jump.  The other instructions load one register with the
558 *        trap type (a.k.a. vector) and another with the psr.
559 */
560 
561typedef struct {
562  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
563  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
564  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
565  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
566} CPU_Trap_table_entry;
567 
568/*
569 *  This is the set of opcodes for the instructions loaded into a trap
570 *  table entry.  The routine which installs a handler is responsible
571 *  for filling in the fields for the _handler address and the _vector
572 *  trap type.
573 *
574 *  The constants following this structure are masks for the fields which
575 *  must be filled in when the handler is installed.
576 */
577 
578extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
579
580/*
581 *  This is the executive's trap table which is installed into the TBR
582 *  register.
583 *
584 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
585 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
586 *         align an entity to anything greater than a 512 byte boundary.
587 *
588 *         Because of this, we pull a little bit of a trick.  We allocate
589 *         enough memory so we can grab an address on a 4096 byte boundary
590 *         from this area.
591 */
592 
593#define SPARC_TRAP_TABLE_ALIGNMENT 4096
594 
595#ifndef NO_TABLE_MOVE
596
597SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
598           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
599#endif
600 
601
602/*
603 *  The size of the floating point context area. 
604 */
605
606#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
607
608#endif
609
610/*
611 *  Amount of extra stack (above minimum stack size) required by
612 *  MPCI receive server thread.  Remember that in a multiprocessor
613 *  system this thread must exist and be able to process all directives.
614 */
615
616#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
617
618/*
619 *  This defines the number of entries in the ISR_Vector_table managed
620 *  by the executive.
621 *
622 *  On the SPARC, there are really only 256 vectors.  However, the executive
623 *  has no easy, fast, reliable way to determine which traps are synchronous
624 *  and which are asynchronous.  By default, synchronous traps return to the
625 *  instruction which caused the interrupt.  So if you install a software
626 *  trap handler as an executive interrupt handler (which is desirable since
627 *  RTEMS takes care of window and register issues), then the executive needs
628 *  to know that the return address is to the trap rather than the instruction
629 *  following the trap.
630 *
631 *  So vectors 0 through 255 are treated as regular asynchronous traps which
632 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
633 *  by the executive to be synchronous and to require that the return address
634 *  be fudged.
635 *
636 *  If you use this mechanism to install a trap handler which must reexecute
637 *  the instruction which caused the trap, then it should be installed as
638 *  an asynchronous trap.  This will avoid the executive changing the return
639 *  address.
640 */
641
642#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
643#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
644
645#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
646#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
647#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
648
649#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
650
651/*
652 *  Should be large enough to run all tests.  This insures
653 *  that a "reasonable" small application should not have any problems.
654 *
655 *  This appears to be a fairly generous number for the SPARC since
656 *  represents a call depth of about 20 routines based on the minimum
657 *  stack frame.
658 */
659
660#define CPU_STACK_MINIMUM_SIZE  (1024*4)
661
662/*
663 *  CPU's worst alignment requirement for data types on a byte boundary.  This
664 *  alignment does not take into account the requirements for the stack.
665 *
666 *  On the SPARC, this is required for double word loads and stores.
667 */
668
669#define CPU_ALIGNMENT      8
670
671/*
672 *  This number corresponds to the byte alignment requirement for the
673 *  heap handler.  This alignment requirement may be stricter than that
674 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
675 *  common for the heap to follow the same alignment requirement as
676 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
677 *  then this should be set to CPU_ALIGNMENT.
678 *
679 *  NOTE:  This does not have to be a power of 2.  It does have to
680 *         be greater or equal to than CPU_ALIGNMENT.
681 */
682
683#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
684
685/*
686 *  This number corresponds to the byte alignment requirement for memory
687 *  buffers allocated by the partition manager.  This alignment requirement
688 *  may be stricter than that for the data types alignment specified by
689 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
690 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
691 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
692 *
693 *  NOTE:  This does not have to be a power of 2.  It does have to
694 *         be greater or equal to than CPU_ALIGNMENT.
695 */
696
697#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
698
699/*
700 *  This number corresponds to the byte alignment requirement for the
701 *  stack.  This alignment requirement may be stricter than that for the
702 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
703 *  is strict enough for the stack, then this should be set to 0.
704 *
705 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
706 *
707 *  The alignment restrictions for the SPARC are not that strict but this
708 *  should unsure that the stack is always sufficiently alignment that the
709 *  window overflow, underflow, and flush routines can use double word loads
710 *  and stores.
711 */
712
713#define CPU_STACK_ALIGNMENT        16
714
715#ifndef ASM
716
717extern unsigned int sparc_disable_interrupts();
718extern void sparc_enable_interrupts();
719
720/* ISR handler macros */
721
722/*
723 *  Disable all interrupts for a critical section.  The previous
724 *  level is returned in _level.
725 */
726
727#define _CPU_ISR_Disable( _level ) \
728  (_level) = sparc_disable_interrupts()
729 
730/*
731 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
732 *  This indicates the end of a critical section.  The parameter
733 *  _level is not modified.
734 */
735
736#define _CPU_ISR_Enable( _level ) \
737  sparc_enable_interrupts( _level )
738/*
739 *  This temporarily restores the interrupt to _level before immediately
740 *  disabling them again.  This is used to divide long critical
741 *  sections into two or more parts.  The parameter _level is not
742 *  modified.
743 */
744
745#define _CPU_ISR_Flash( _level ) \
746  sparc_flash_interrupts( _level )
747 
748/*
749 *  Map interrupt level in task mode onto the hardware that the CPU
750 *  actually provides.  Currently, interrupt levels which do not
751 *  map onto the CPU in a straight fashion are undefined. 
752 */
753
754#define _CPU_ISR_Set_level( _newlevel ) \
755   sparc_enable_interrupts( _newlevel << 8)
756 
757unsigned32 _CPU_ISR_Get_level( void );
758 
759/* end of ISR handler macros */
760
761/* Context handler macros */
762
763/*
764 *  Initialize the context to a state suitable for starting a
765 *  task after a context restore operation.  Generally, this
766 *  involves:
767 *
768 *     - setting a starting address
769 *     - preparing the stack
770 *     - preparing the stack and frame pointers
771 *     - setting the proper interrupt level in the context
772 *     - initializing the floating point context
773 *
774 *  NOTE:  Implemented as a subroutine for the SPARC port.
775 */
776
777void _CPU_Context_Initialize(
778  Context_Control  *the_context,
779  unsigned32       *stack_base,
780  unsigned32        size,
781  unsigned32        new_level,
782  void             *entry_point,
783  boolean           is_fp
784);
785
786/*
787 *  This routine is responsible for somehow restarting the currently
788 *  executing task. 
789 *
790 *  On the SPARC, this is is relatively painless but requires a small
791 *  amount of wrapper code before using the regular restore code in
792 *  of the context switch.
793 */
794
795#define _CPU_Context_Restart_self( _the_context ) \
796   _CPU_Context_restore( (_the_context) );
797
798/*
799 *  The FP context area for the SPARC is a simple structure and nothing
800 *  special is required to find the "starting load point"
801 */
802
803#define _CPU_Context_Fp_start( _base, _offset ) \
804   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
805
806/*
807 *  This routine initializes the FP context area passed to it to.
808 *
809 *  The SPARC allows us to use the simple initialization model
810 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
811 *  at CPU initialization and it is simply copied into the destination
812 *  context.
813 */
814
815#define _CPU_Context_Initialize_fp( _destination ) \
816  do { \
817   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
818  } while (0)
819
820/* end of Context handler macros */
821
822/* Fatal Error manager macros */
823
824/*
825 *  This routine copies _error into a known place -- typically a stack
826 *  location or a register, optionally disables interrupts, and
827 *  halts/stops the CPU.
828 */
829
830#define _CPU_Fatal_halt( _error ) \
831  do { \
832    unsigned32 level; \
833    \
834    level = sparc_disable_interrupts(); \
835    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
836    while (1); /* loop forever */ \
837  } while (0)
838
839/* end of Fatal Error manager macros */
840
841/* Bitfield handler macros */
842
843/*
844 *  The SPARC port uses the generic C algorithm for bitfield scan if the
845 *  CPU model does not have a scan instruction.
846 */
847
848#if ( SPARC_HAS_BITSCAN == 0 )
849#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
850#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
851#else
852#error "scan instruction not currently supported by RTEMS!!"
853#endif
854
855/* end of Bitfield handler macros */
856
857/* Priority handler handler macros */
858
859/*
860 *  The SPARC port uses the generic C algorithm for bitfield scan if the
861 *  CPU model does not have a scan instruction.
862 */
863
864#if ( SPARC_HAS_BITSCAN == 1 )
865#error "scan instruction not currently supported by RTEMS!!"
866#endif
867
868/* end of Priority handler macros */
869
870/* functions */
871
872/*
873 *  _CPU_Initialize
874 *
875 *  This routine performs CPU dependent initialization.
876 */
877
878void _CPU_Initialize(
879  rtems_cpu_table  *cpu_table,
880  void            (*thread_dispatch)
881);
882
883/*
884 *  _CPU_ISR_install_raw_handler
885 *
886 *  This routine installs new_handler to be directly called from the trap
887 *  table.
888 */
889 
890void _CPU_ISR_install_raw_handler(
891  unsigned32  vector,
892  proc_ptr    new_handler,
893  proc_ptr   *old_handler
894);
895
896/*
897 *  _CPU_ISR_install_vector
898 *
899 *  This routine installs an interrupt vector.
900 */
901
902void _CPU_ISR_install_vector(
903  unsigned32  vector,
904  proc_ptr    new_handler,
905  proc_ptr   *old_handler
906);
907
908#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
909 
910/*
911 *  _CPU_Thread_Idle_body
912 *
913 *  Some SPARC implementations have low power, sleep, or idle modes.  This
914 *  tries to take advantage of those models.
915 */
916 
917void _CPU_Thread_Idle_body( void );
918 
919#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
920
921/*
922 *  _CPU_Context_switch
923 *
924 *  This routine switches from the run context to the heir context.
925 */
926
927void _CPU_Context_switch(
928  Context_Control  *run,
929  Context_Control  *heir
930);
931
932/*
933 *  _CPU_Context_restore
934 *
935 *  This routine is generally used only to restart self in an
936 *  efficient manner.
937 */
938
939void _CPU_Context_restore(
940  Context_Control *new_context
941);
942
943/*
944 *  _CPU_Context_save_fp
945 *
946 *  This routine saves the floating point context passed to it.
947 */
948
949void _CPU_Context_save_fp(
950  void **fp_context_ptr
951);
952
953/*
954 *  _CPU_Context_restore_fp
955 *
956 *  This routine restores the floating point context passed to it.
957 */
958
959void _CPU_Context_restore_fp(
960  void **fp_context_ptr
961);
962
963/*
964 *  CPU_swap_u32
965 *
966 *  The following routine swaps the endian format of an unsigned int.
967 *  It must be static because it is referenced indirectly.
968 *
969 *  This version will work on any processor, but if you come across a better
970 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
971 *  entity as shown below is not any more efficient on the SPARC.
972 *
973 *     swap least significant two bytes with 16-bit rotate
974 *     swap upper and lower 16-bits
975 *     swap most significant two bytes with 16-bit rotate
976 *
977 *  It is not obvious how the SPARC can do significantly better than the
978 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
979 *  following code at optimization level four (i.e. -O4).
980 */
981 
982static inline unsigned int CPU_swap_u32(
983  unsigned int value
984)
985{
986  unsigned32 byte1, byte2, byte3, byte4, swapped;
987 
988  byte4 = (value >> 24) & 0xff;
989  byte3 = (value >> 16) & 0xff;
990  byte2 = (value >> 8)  & 0xff;
991  byte1 =  value        & 0xff;
992 
993  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
994  return( swapped );
995}
996
997#define CPU_swap_u16( value ) \
998  (((value&0xff) << 8) | ((value >> 8)&0xff))
999
1000#endif ASM
1001
1002#ifdef __cplusplus
1003}
1004#endif
1005
1006#endif
Note: See TracBrowser for help on using the repository browser.