source: rtems/cpukit/score/cpu/sparc/rtems/score/cpu.h @ 22ed9d0f

4.104.114.84.95
Last change on this file since 22ed9d0f was 22ed9d0f, checked in by Joel Sherrill <joel.sherrill@…>, on 04/03/02 at 14:07:10

2001-04-03 Joel Sherrill <joel@…>

  • Per PR94, all rtems/score/CPUtypes.h are named rtems/score/types.h.
  • rtems/score/sparctypes.h: Removed.
  • rtems/score/types.h: New file via CVS magic.
  • Makefile.am, rtems/score/cpu.h: Account for name change.
  • Property mode set to 100644
File size: 28.9 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.OARcorp.com/rtems/license.html.
12 *
13 *  $Id$
14 */
15
16#ifndef __CPU_h
17#define __CPU_h
18
19#ifdef __cplusplus
20extern "C" {
21#endif
22
23#include <rtems/score/sparc.h>               /* pick up machine definitions */
24#ifndef ASM
25#include <rtems/score/types.h>
26#endif
27
28/* conditional compilation parameters */
29
30/*
31 *  Should the calls to _Thread_Enable_dispatch be inlined?
32 *
33 *  If TRUE, then they are inlined.
34 *  If FALSE, then a subroutine call is made.
35 */
36
37#define CPU_INLINE_ENABLE_DISPATCH       TRUE
38
39/*
40 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
41 *  be unrolled one time?  In unrolled each iteration of the loop examines
42 *  two "nodes" on the chain being searched.  Otherwise, only one node
43 *  is examined per iteration.
44 *
45 *  If TRUE, then the loops are unrolled.
46 *  If FALSE, then the loops are not unrolled.
47 *
48 *  This parameter could go either way on the SPARC.  The interrupt flash
49 *  code is relatively lengthy given the requirements for nops following
50 *  writes to the psr.  But if the clock speed were high enough, this would
51 *  not represent a great deal of time.
52 */
53
54#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
55
56/*
57 *  Does the executive manage a dedicated interrupt stack in software?
58 *
59 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
60 *  If FALSE, nothing is done.
61 *
62 *  The SPARC does not have a dedicated HW interrupt stack and one has
63 *  been implemented in SW.
64 */
65
66#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
67
68/*
69 *  Does this CPU have hardware support for a dedicated interrupt stack?
70 *
71 *  If TRUE, then it must be installed during initialization.
72 *  If FALSE, then no installation is performed.
73 *
74 *  The SPARC does not have a dedicated HW interrupt stack.
75 */
76
77#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
78
79/*
80 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
81 *
82 *  If TRUE, then the memory is allocated during initialization.
83 *  If FALSE, then the memory is allocated during initialization.
84 */
85
86#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
87
88/*
89 *  Does the RTEMS invoke the user's ISR with the vector number and
90 *  a pointer to the saved interrupt frame (1) or just the vector
91 *  number (0)?
92 */
93
94#define CPU_ISR_PASSES_FRAME_POINTER 0
95
96/*
97 *  Does the CPU have hardware floating point?
98 *
99 *  If TRUE, then the FLOATING_POINT task attribute is supported.
100 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
101 */
102
103#if ( SPARC_HAS_FPU == 1 )
104#define CPU_HARDWARE_FP     TRUE
105#else
106#define CPU_HARDWARE_FP     FALSE
107#endif
108#define CPU_SOFTWARE_FP     FALSE
109
110/*
111 *  Are all tasks FLOATING_POINT tasks implicitly?
112 *
113 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
114 *  If FALSE, then the FLOATING_POINT task attribute is followed.
115 */
116
117#define CPU_ALL_TASKS_ARE_FP     FALSE
118
119/*
120 *  Should the IDLE task have a floating point context?
121 *
122 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
123 *  and it has a floating point context which is switched in and out.
124 *  If FALSE, then the IDLE task does not have a floating point context.
125 */
126
127#define CPU_IDLE_TASK_IS_FP      FALSE
128
129/*
130 *  Should the saving of the floating point registers be deferred
131 *  until a context switch is made to another different floating point
132 *  task?
133 *
134 *  If TRUE, then the floating point context will not be stored until
135 *  necessary.  It will remain in the floating point registers and not
136 *  disturned until another floating point task is switched to.
137 *
138 *  If FALSE, then the floating point context is saved when a floating
139 *  point task is switched out and restored when the next floating point
140 *  task is restored.  The state of the floating point registers between
141 *  those two operations is not specified.
142 */
143
144#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
145
146/*
147 *  Does this port provide a CPU dependent IDLE task implementation?
148 *
149 *  If TRUE, then the routine _CPU_Thread_Idle_body
150 *  must be provided and is the default IDLE thread body instead of
151 *  _CPU_Thread_Idle_body.
152 *
153 *  If FALSE, then use the generic IDLE thread body if the BSP does
154 *  not provide one.
155 */
156
157#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
158
159/*
160 *  Does the stack grow up (toward higher addresses) or down
161 *  (toward lower addresses)?
162 *
163 *  If TRUE, then the grows upward.
164 *  If FALSE, then the grows toward smaller addresses.
165 *
166 *  The stack grows to lower addresses on the SPARC.
167 */
168
169#define CPU_STACK_GROWS_UP               FALSE
170
171/*
172 *  The following is the variable attribute used to force alignment
173 *  of critical data structures.  On some processors it may make
174 *  sense to have these aligned on tighter boundaries than
175 *  the minimum requirements of the compiler in order to have as
176 *  much of the critical data area as possible in a cache line.
177 *
178 *  The SPARC does not appear to have particularly strict alignment
179 *  requirements.  This value was chosen to take advantages of caches.
180 */
181
182#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
183
184/*
185 *  Define what is required to specify how the network to host conversion
186 *  routines are handled.
187 */
188
189#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
190#define CPU_BIG_ENDIAN                           TRUE
191#define CPU_LITTLE_ENDIAN                        FALSE
192
193/*
194 *  The following defines the number of bits actually used in the
195 *  interrupt field of the task mode.  How those bits map to the
196 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
197 *
198 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
199 */
200
201#define CPU_MODES_INTERRUPT_MASK   0x0000000F
202
203/*
204 *  This structure represents the organization of the minimum stack frame
205 *  for the SPARC.  More framing information is required in certain situaions
206 *  such as when there are a large number of out parameters or when the callee
207 *  must save floating point registers.
208 */
209
210#ifndef ASM
211
212typedef struct {
213  unsigned32  l0;
214  unsigned32  l1;
215  unsigned32  l2;
216  unsigned32  l3;
217  unsigned32  l4;
218  unsigned32  l5;
219  unsigned32  l6;
220  unsigned32  l7;
221  unsigned32  i0;
222  unsigned32  i1;
223  unsigned32  i2;
224  unsigned32  i3;
225  unsigned32  i4;
226  unsigned32  i5;
227  unsigned32  i6_fp;
228  unsigned32  i7;
229  void       *structure_return_address;
230  /*
231   *  The following are for the callee to save the register arguments in
232   *  should this be necessary.
233   */
234  unsigned32  saved_arg0;
235  unsigned32  saved_arg1;
236  unsigned32  saved_arg2;
237  unsigned32  saved_arg3;
238  unsigned32  saved_arg4;
239  unsigned32  saved_arg5;
240  unsigned32  pad0;
241}  CPU_Minimum_stack_frame;
242
243#endif /* ASM */
244
245#define CPU_STACK_FRAME_L0_OFFSET             0x00
246#define CPU_STACK_FRAME_L1_OFFSET             0x04
247#define CPU_STACK_FRAME_L2_OFFSET             0x08
248#define CPU_STACK_FRAME_L3_OFFSET             0x0c
249#define CPU_STACK_FRAME_L4_OFFSET             0x10
250#define CPU_STACK_FRAME_L5_OFFSET             0x14
251#define CPU_STACK_FRAME_L6_OFFSET             0x18
252#define CPU_STACK_FRAME_L7_OFFSET             0x1c
253#define CPU_STACK_FRAME_I0_OFFSET             0x20
254#define CPU_STACK_FRAME_I1_OFFSET             0x24
255#define CPU_STACK_FRAME_I2_OFFSET             0x28
256#define CPU_STACK_FRAME_I3_OFFSET             0x2c
257#define CPU_STACK_FRAME_I4_OFFSET             0x30
258#define CPU_STACK_FRAME_I5_OFFSET             0x34
259#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
260#define CPU_STACK_FRAME_I7_OFFSET             0x3c
261#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
262#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
263#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
264#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
265#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
266#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
267#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
268#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
269
270#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
271
272/*
273 * Contexts
274 *
275 *  Generally there are 2 types of context to save.
276 *     1. Interrupt registers to save
277 *     2. Task level registers to save
278 *
279 *  This means we have the following 3 context items:
280 *     1. task level context stuff::  Context_Control
281 *     2. floating point task stuff:: Context_Control_fp
282 *     3. special interrupt level context :: Context_Control_interrupt
283 *
284 *  On the SPARC, we are relatively conservative in that we save most
285 *  of the CPU state in the context area.  The ET (enable trap) bit and
286 *  the CWP (current window pointer) fields of the PSR are considered
287 *  system wide resources and are not maintained on a per-thread basis.
288 */
289
290#ifndef ASM
291
292typedef struct {
293    /*
294     *  Using a double g0_g1 will put everything in this structure on a
295     *  double word boundary which allows us to use double word loads
296     *  and stores safely in the context switch.
297     */
298    double     g0_g1;
299    unsigned32 g2;
300    unsigned32 g3;
301    unsigned32 g4;
302    unsigned32 g5;
303    unsigned32 g6;
304    unsigned32 g7;
305
306    unsigned32 l0;
307    unsigned32 l1;
308    unsigned32 l2;
309    unsigned32 l3;
310    unsigned32 l4;
311    unsigned32 l5;
312    unsigned32 l6;
313    unsigned32 l7;
314
315    unsigned32 i0;
316    unsigned32 i1;
317    unsigned32 i2;
318    unsigned32 i3;
319    unsigned32 i4;
320    unsigned32 i5;
321    unsigned32 i6_fp;
322    unsigned32 i7;
323
324    unsigned32 o0;
325    unsigned32 o1;
326    unsigned32 o2;
327    unsigned32 o3;
328    unsigned32 o4;
329    unsigned32 o5;
330    unsigned32 o6_sp;
331    unsigned32 o7;
332
333    unsigned32 psr;
334} Context_Control;
335
336#endif /* ASM */
337
338/*
339 *  Offsets of fields with Context_Control for assembly routines.
340 */
341
342#define G0_OFFSET    0x00
343#define G1_OFFSET    0x04
344#define G2_OFFSET    0x08
345#define G3_OFFSET    0x0C
346#define G4_OFFSET    0x10
347#define G5_OFFSET    0x14
348#define G6_OFFSET    0x18
349#define G7_OFFSET    0x1C
350
351#define L0_OFFSET    0x20
352#define L1_OFFSET    0x24
353#define L2_OFFSET    0x28
354#define L3_OFFSET    0x2C
355#define L4_OFFSET    0x30
356#define L5_OFFSET    0x34
357#define L6_OFFSET    0x38
358#define L7_OFFSET    0x3C
359
360#define I0_OFFSET    0x40
361#define I1_OFFSET    0x44
362#define I2_OFFSET    0x48
363#define I3_OFFSET    0x4C
364#define I4_OFFSET    0x50
365#define I5_OFFSET    0x54
366#define I6_FP_OFFSET 0x58
367#define I7_OFFSET    0x5C
368
369#define O0_OFFSET    0x60
370#define O1_OFFSET    0x64
371#define O2_OFFSET    0x68
372#define O3_OFFSET    0x6C
373#define O4_OFFSET    0x70
374#define O5_OFFSET    0x74
375#define O6_SP_OFFSET 0x78
376#define O7_OFFSET    0x7C
377
378#define PSR_OFFSET   0x80
379
380#define CONTEXT_CONTROL_SIZE 0x84
381
382/*
383 *  The floating point context area.
384 */
385
386#ifndef ASM
387
388typedef struct {
389    double      f0_f1;
390    double      f2_f3;
391    double      f4_f5;
392    double      f6_f7;
393    double      f8_f9;
394    double      f10_f11;
395    double      f12_f13;
396    double      f14_f15;
397    double      f16_f17;
398    double      f18_f19;
399    double      f20_f21;
400    double      f22_f23;
401    double      f24_f25;
402    double      f26_f27;
403    double      f28_f29;
404    double      f30_f31;
405    unsigned32  fsr;
406} Context_Control_fp;
407
408#endif /* ASM */
409
410/*
411 *  Offsets of fields with Context_Control_fp for assembly routines.
412 */
413
414#define FO_F1_OFFSET     0x00
415#define F2_F3_OFFSET     0x08
416#define F4_F5_OFFSET     0x10
417#define F6_F7_OFFSET     0x18
418#define F8_F9_OFFSET     0x20
419#define F1O_F11_OFFSET   0x28
420#define F12_F13_OFFSET   0x30
421#define F14_F15_OFFSET   0x38
422#define F16_F17_OFFSET   0x40
423#define F18_F19_OFFSET   0x48
424#define F2O_F21_OFFSET   0x50
425#define F22_F23_OFFSET   0x58
426#define F24_F25_OFFSET   0x60
427#define F26_F27_OFFSET   0x68
428#define F28_F29_OFFSET   0x70
429#define F3O_F31_OFFSET   0x78
430#define FSR_OFFSET       0x80
431
432#define CONTEXT_CONTROL_FP_SIZE 0x84
433
434#ifndef ASM
435
436/*
437 *  Context saved on stack for an interrupt.
438 *
439 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
440 *         benefit of the user's handler.
441 */
442
443typedef struct {
444  CPU_Minimum_stack_frame  Stack_frame;
445  unsigned32               psr;
446  unsigned32               pc;
447  unsigned32               npc;
448  unsigned32               g1;
449  unsigned32               g2;
450  unsigned32               g3;
451  unsigned32               g4;
452  unsigned32               g5;
453  unsigned32               g6;
454  unsigned32               g7;
455  unsigned32               i0;
456  unsigned32               i1;
457  unsigned32               i2;
458  unsigned32               i3;
459  unsigned32               i4;
460  unsigned32               i5;
461  unsigned32               i6_fp;
462  unsigned32               i7;
463  unsigned32               y;
464  unsigned32               tpc;
465} CPU_Interrupt_frame;
466
467#endif /* ASM */
468
469/*
470 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
471 */
472
473#define ISF_STACK_FRAME_OFFSET 0x00
474#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
475#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
476#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
477#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
478#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
479#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
480#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
481#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
482#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
483#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
484#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
485#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
486#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
487#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
488#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
489#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
490#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
491#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
492#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
493#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
494
495#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
496#ifndef ASM
497
498/*
499 *  The following table contains the information required to configure
500 *  the processor specific parameters.
501 */
502
503typedef struct {
504  void       (*pretasking_hook)( void );
505  void       (*predriver_hook)( void );
506  void       (*postdriver_hook)( void );
507  void       (*idle_task)( void );
508  boolean      do_zero_of_workspace;
509  unsigned32   idle_task_stack_size;
510  unsigned32   interrupt_stack_size;
511  unsigned32   extra_mpci_receive_server_stack;
512  void *     (*stack_allocate_hook)( unsigned32 );
513  void       (*stack_free_hook)( void* );
514  /* end of fields required on all CPUs */
515
516}   rtems_cpu_table;
517
518/*
519 *  Macros to access required entires in the CPU Table are in
520 *  the file rtems/system.h.
521 */
522
523/*
524 *  Macros to access SPARC specific additions to the CPU Table
525 */
526
527/* There are no CPU specific additions to the CPU Table for this port. */
528
529/*
530 *  This variable is contains the initialize context for the FP unit.
531 *  It is filled in by _CPU_Initialize and copied into the task's FP
532 *  context area during _CPU_Context_Initialize.
533 */
534
535SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
536
537/*
538 *  This stack is allocated by the Interrupt Manager and the switch
539 *  is performed in _ISR_Handler.  These variables contain pointers
540 *  to the lowest and highest addresses in the chunk of memory allocated
541 *  for the interrupt stack.  Since it is unknown whether the stack
542 *  grows up or down (in general), this give the CPU dependent
543 *  code the option of picking the version it wants to use.  Thus
544 *  both must be present if either is.
545 *
546 *  The SPARC supports a software based interrupt stack and these
547 *  are required.
548 */
549
550SCORE_EXTERN void *_CPU_Interrupt_stack_low;
551SCORE_EXTERN void *_CPU_Interrupt_stack_high;
552
553/*
554 *  The following type defines an entry in the SPARC's trap table.
555 *
556 *  NOTE: The instructions chosen are RTEMS dependent although one is
557 *        obligated to use two of the four instructions to perform a
558 *        long jump.  The other instructions load one register with the
559 *        trap type (a.k.a. vector) and another with the psr.
560 */
561 
562typedef struct {
563  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
564  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
565  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
566  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
567} CPU_Trap_table_entry;
568 
569/*
570 *  This is the set of opcodes for the instructions loaded into a trap
571 *  table entry.  The routine which installs a handler is responsible
572 *  for filling in the fields for the _handler address and the _vector
573 *  trap type.
574 *
575 *  The constants following this structure are masks for the fields which
576 *  must be filled in when the handler is installed.
577 */
578 
579extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
580
581/*
582 *  This is the executive's trap table which is installed into the TBR
583 *  register.
584 *
585 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
586 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
587 *         align an entity to anything greater than a 512 byte boundary.
588 *
589 *         Because of this, we pull a little bit of a trick.  We allocate
590 *         enough memory so we can grab an address on a 4096 byte boundary
591 *         from this area.
592 */
593 
594#define SPARC_TRAP_TABLE_ALIGNMENT 4096
595 
596#ifndef NO_TABLE_MOVE
597
598SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
599           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
600#endif
601 
602
603/*
604 *  The size of the floating point context area. 
605 */
606
607#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
608
609#endif
610
611/*
612 *  Amount of extra stack (above minimum stack size) required by
613 *  MPCI receive server thread.  Remember that in a multiprocessor
614 *  system this thread must exist and be able to process all directives.
615 */
616
617#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
618
619/*
620 *  This defines the number of entries in the ISR_Vector_table managed
621 *  by the executive.
622 *
623 *  On the SPARC, there are really only 256 vectors.  However, the executive
624 *  has no easy, fast, reliable way to determine which traps are synchronous
625 *  and which are asynchronous.  By default, synchronous traps return to the
626 *  instruction which caused the interrupt.  So if you install a software
627 *  trap handler as an executive interrupt handler (which is desirable since
628 *  RTEMS takes care of window and register issues), then the executive needs
629 *  to know that the return address is to the trap rather than the instruction
630 *  following the trap.
631 *
632 *  So vectors 0 through 255 are treated as regular asynchronous traps which
633 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
634 *  by the executive to be synchronous and to require that the return address
635 *  be fudged.
636 *
637 *  If you use this mechanism to install a trap handler which must reexecute
638 *  the instruction which caused the trap, then it should be installed as
639 *  an asynchronous trap.  This will avoid the executive changing the return
640 *  address.
641 */
642
643#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
644#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
645
646#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
647#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
648#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
649
650#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
651
652/*
653 *  This is defined if the port has a special way to report the ISR nesting
654 *  level.  Most ports maintain the variable _ISR_Nest_level.
655 */
656
657#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
658
659/*
660 *  Should be large enough to run all tests.  This insures
661 *  that a "reasonable" small application should not have any problems.
662 *
663 *  This appears to be a fairly generous number for the SPARC since
664 *  represents a call depth of about 20 routines based on the minimum
665 *  stack frame.
666 */
667
668#define CPU_STACK_MINIMUM_SIZE  (1024*4)
669
670/*
671 *  CPU's worst alignment requirement for data types on a byte boundary.  This
672 *  alignment does not take into account the requirements for the stack.
673 *
674 *  On the SPARC, this is required for double word loads and stores.
675 */
676
677#define CPU_ALIGNMENT      8
678
679/*
680 *  This number corresponds to the byte alignment requirement for the
681 *  heap handler.  This alignment requirement may be stricter than that
682 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
683 *  common for the heap to follow the same alignment requirement as
684 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
685 *  then this should be set to CPU_ALIGNMENT.
686 *
687 *  NOTE:  This does not have to be a power of 2.  It does have to
688 *         be greater or equal to than CPU_ALIGNMENT.
689 */
690
691#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
692
693/*
694 *  This number corresponds to the byte alignment requirement for memory
695 *  buffers allocated by the partition manager.  This alignment requirement
696 *  may be stricter than that for the data types alignment specified by
697 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
698 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
699 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
700 *
701 *  NOTE:  This does not have to be a power of 2.  It does have to
702 *         be greater or equal to than CPU_ALIGNMENT.
703 */
704
705#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
706
707/*
708 *  This number corresponds to the byte alignment requirement for the
709 *  stack.  This alignment requirement may be stricter than that for the
710 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
711 *  is strict enough for the stack, then this should be set to 0.
712 *
713 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
714 *
715 *  The alignment restrictions for the SPARC are not that strict but this
716 *  should unsure that the stack is always sufficiently alignment that the
717 *  window overflow, underflow, and flush routines can use double word loads
718 *  and stores.
719 */
720
721#define CPU_STACK_ALIGNMENT        16
722
723#ifndef ASM
724
725extern unsigned int sparc_disable_interrupts();
726extern void sparc_enable_interrupts();
727
728/*
729 *  ISR handler macros
730 */
731
732/*
733 *  Support routine to initialize the RTEMS vector table after it is allocated.
734 */
735
736#define _CPU_Initialize_vectors()
737
738/*
739 *  Disable all interrupts for a critical section.  The previous
740 *  level is returned in _level.
741 */
742
743#define _CPU_ISR_Disable( _level ) \
744  (_level) = sparc_disable_interrupts()
745 
746/*
747 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
748 *  This indicates the end of a critical section.  The parameter
749 *  _level is not modified.
750 */
751
752#define _CPU_ISR_Enable( _level ) \
753  sparc_enable_interrupts( _level )
754/*
755 *  This temporarily restores the interrupt to _level before immediately
756 *  disabling them again.  This is used to divide long critical
757 *  sections into two or more parts.  The parameter _level is not
758 *  modified.
759 */
760
761#define _CPU_ISR_Flash( _level ) \
762  sparc_flash_interrupts( _level )
763 
764/*
765 *  Map interrupt level in task mode onto the hardware that the CPU
766 *  actually provides.  Currently, interrupt levels which do not
767 *  map onto the CPU in a straight fashion are undefined. 
768 */
769
770#define _CPU_ISR_Set_level( _newlevel ) \
771   sparc_enable_interrupts( _newlevel << 8)
772 
773unsigned32 _CPU_ISR_Get_level( void );
774 
775/* end of ISR handler macros */
776
777/* Context handler macros */
778
779/*
780 *  Initialize the context to a state suitable for starting a
781 *  task after a context restore operation.  Generally, this
782 *  involves:
783 *
784 *     - setting a starting address
785 *     - preparing the stack
786 *     - preparing the stack and frame pointers
787 *     - setting the proper interrupt level in the context
788 *     - initializing the floating point context
789 *
790 *  NOTE:  Implemented as a subroutine for the SPARC port.
791 */
792
793void _CPU_Context_Initialize(
794  Context_Control  *the_context,
795  unsigned32       *stack_base,
796  unsigned32        size,
797  unsigned32        new_level,
798  void             *entry_point,
799  boolean           is_fp
800);
801
802/*
803 *  This routine is responsible for somehow restarting the currently
804 *  executing task. 
805 *
806 *  On the SPARC, this is is relatively painless but requires a small
807 *  amount of wrapper code before using the regular restore code in
808 *  of the context switch.
809 */
810
811#define _CPU_Context_Restart_self( _the_context ) \
812   _CPU_Context_restore( (_the_context) );
813
814/*
815 *  The FP context area for the SPARC is a simple structure and nothing
816 *  special is required to find the "starting load point"
817 */
818
819#define _CPU_Context_Fp_start( _base, _offset ) \
820   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
821
822/*
823 *  This routine initializes the FP context area passed to it to.
824 *
825 *  The SPARC allows us to use the simple initialization model
826 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
827 *  at CPU initialization and it is simply copied into the destination
828 *  context.
829 */
830
831#define _CPU_Context_Initialize_fp( _destination ) \
832  do { \
833   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
834  } while (0)
835
836/* end of Context handler macros */
837
838/* Fatal Error manager macros */
839
840/*
841 *  This routine copies _error into a known place -- typically a stack
842 *  location or a register, optionally disables interrupts, and
843 *  halts/stops the CPU.
844 */
845
846#define _CPU_Fatal_halt( _error ) \
847  do { \
848    unsigned32 level; \
849    \
850    level = sparc_disable_interrupts(); \
851    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
852    while (1); /* loop forever */ \
853  } while (0)
854
855/* end of Fatal Error manager macros */
856
857/* Bitfield handler macros */
858
859/*
860 *  The SPARC port uses the generic C algorithm for bitfield scan if the
861 *  CPU model does not have a scan instruction.
862 */
863
864#if ( SPARC_HAS_BITSCAN == 0 )
865#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
866#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
867#else
868#error "scan instruction not currently supported by RTEMS!!"
869#endif
870
871/* end of Bitfield handler macros */
872
873/* Priority handler handler macros */
874
875/*
876 *  The SPARC port uses the generic C algorithm for bitfield scan if the
877 *  CPU model does not have a scan instruction.
878 */
879
880#if ( SPARC_HAS_BITSCAN == 1 )
881#error "scan instruction not currently supported by RTEMS!!"
882#endif
883
884/* end of Priority handler macros */
885
886/* functions */
887
888/*
889 *  _CPU_Initialize
890 *
891 *  This routine performs CPU dependent initialization.
892 */
893
894void _CPU_Initialize(
895  rtems_cpu_table  *cpu_table,
896  void            (*thread_dispatch)
897);
898
899/*
900 *  _CPU_ISR_install_raw_handler
901 *
902 *  This routine installs new_handler to be directly called from the trap
903 *  table.
904 */
905 
906void _CPU_ISR_install_raw_handler(
907  unsigned32  vector,
908  proc_ptr    new_handler,
909  proc_ptr   *old_handler
910);
911
912/*
913 *  _CPU_ISR_install_vector
914 *
915 *  This routine installs an interrupt vector.
916 */
917
918void _CPU_ISR_install_vector(
919  unsigned32  vector,
920  proc_ptr    new_handler,
921  proc_ptr   *old_handler
922);
923
924#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
925 
926/*
927 *  _CPU_Thread_Idle_body
928 *
929 *  Some SPARC implementations have low power, sleep, or idle modes.  This
930 *  tries to take advantage of those models.
931 */
932 
933void _CPU_Thread_Idle_body( void );
934 
935#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
936
937/*
938 *  _CPU_Context_switch
939 *
940 *  This routine switches from the run context to the heir context.
941 */
942
943void _CPU_Context_switch(
944  Context_Control  *run,
945  Context_Control  *heir
946);
947
948/*
949 *  _CPU_Context_restore
950 *
951 *  This routine is generally used only to restart self in an
952 *  efficient manner.
953 */
954
955void _CPU_Context_restore(
956  Context_Control *new_context
957);
958
959/*
960 *  _CPU_Context_save_fp
961 *
962 *  This routine saves the floating point context passed to it.
963 */
964
965void _CPU_Context_save_fp(
966  void **fp_context_ptr
967);
968
969/*
970 *  _CPU_Context_restore_fp
971 *
972 *  This routine restores the floating point context passed to it.
973 */
974
975void _CPU_Context_restore_fp(
976  void **fp_context_ptr
977);
978
979/*
980 *  CPU_swap_u32
981 *
982 *  The following routine swaps the endian format of an unsigned int.
983 *  It must be static because it is referenced indirectly.
984 *
985 *  This version will work on any processor, but if you come across a better
986 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
987 *  entity as shown below is not any more efficient on the SPARC.
988 *
989 *     swap least significant two bytes with 16-bit rotate
990 *     swap upper and lower 16-bits
991 *     swap most significant two bytes with 16-bit rotate
992 *
993 *  It is not obvious how the SPARC can do significantly better than the
994 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
995 *  following code at optimization level four (i.e. -O4).
996 */
997 
998static inline unsigned int CPU_swap_u32(
999  unsigned int value
1000)
1001{
1002  unsigned32 byte1, byte2, byte3, byte4, swapped;
1003 
1004  byte4 = (value >> 24) & 0xff;
1005  byte3 = (value >> 16) & 0xff;
1006  byte2 = (value >> 8)  & 0xff;
1007  byte1 =  value        & 0xff;
1008 
1009  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1010  return( swapped );
1011}
1012
1013#define CPU_swap_u16( value ) \
1014  (((value&0xff) << 8) | ((value >> 8)&0xff))
1015
1016#endif ASM
1017
1018#ifdef __cplusplus
1019}
1020#endif
1021
1022#endif
Note: See TracBrowser for help on using the repository browser.