source: rtems/cpukit/score/cpu/sparc/rtems/score/cpu.h @ 20385957

4.104.114.84.95
Last change on this file since 20385957 was 20385957, checked in by Joel Sherrill <joel.sherrill@…>, on 01/03/00 at 14:01:02

Increased minimum stack size so all tests will run.

  • Property mode set to 100644
File size: 29.1 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.OARcorp.com/rtems/license.html.
12 *
13 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
14 *  Research Corporation (OAR) under contract to the European Space
15 *  Agency (ESA).
16 *
17 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
18 *  European Space Agency.
19 *
20 *  $Id$
21 */
22
23#ifndef __CPU_h
24#define __CPU_h
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/sparc.h>               /* pick up machine definitions */
31#ifndef ASM
32#include <rtems/score/sparctypes.h>
33#endif
34
35/* conditional compilation parameters */
36
37/*
38 *  Should the calls to _Thread_Enable_dispatch be inlined?
39 *
40 *  If TRUE, then they are inlined.
41 *  If FALSE, then a subroutine call is made.
42 */
43
44#define CPU_INLINE_ENABLE_DISPATCH       TRUE
45
46/*
47 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
48 *  be unrolled one time?  In unrolled each iteration of the loop examines
49 *  two "nodes" on the chain being searched.  Otherwise, only one node
50 *  is examined per iteration.
51 *
52 *  If TRUE, then the loops are unrolled.
53 *  If FALSE, then the loops are not unrolled.
54 *
55 *  This parameter could go either way on the SPARC.  The interrupt flash
56 *  code is relatively lengthy given the requirements for nops following
57 *  writes to the psr.  But if the clock speed were high enough, this would
58 *  not represent a great deal of time.
59 */
60
61#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
62
63/*
64 *  Does the executive manage a dedicated interrupt stack in software?
65 *
66 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
67 *  If FALSE, nothing is done.
68 *
69 *  The SPARC does not have a dedicated HW interrupt stack and one has
70 *  been implemented in SW.
71 */
72
73#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
74
75/*
76 *  Does this CPU have hardware support for a dedicated interrupt stack?
77 *
78 *  If TRUE, then it must be installed during initialization.
79 *  If FALSE, then no installation is performed.
80 *
81 *  The SPARC does not have a dedicated HW interrupt stack.
82 */
83
84#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
85
86/*
87 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
88 *
89 *  If TRUE, then the memory is allocated during initialization.
90 *  If FALSE, then the memory is allocated during initialization.
91 */
92
93#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
94
95/*
96 *  Does the RTEMS invoke the user's ISR with the vector number and
97 *  a pointer to the saved interrupt frame (1) or just the vector
98 *  number (0)?
99 */
100
101#define CPU_ISR_PASSES_FRAME_POINTER 0
102
103/*
104 *  Does the CPU have hardware floating point?
105 *
106 *  If TRUE, then the FLOATING_POINT task attribute is supported.
107 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
108 */
109
110#if ( SPARC_HAS_FPU == 1 )
111#define CPU_HARDWARE_FP     TRUE
112#else
113#define CPU_HARDWARE_FP     FALSE
114#endif
115
116/*
117 *  Are all tasks FLOATING_POINT tasks implicitly?
118 *
119 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
120 *  If FALSE, then the FLOATING_POINT task attribute is followed.
121 */
122
123#define CPU_ALL_TASKS_ARE_FP     FALSE
124
125/*
126 *  Should the IDLE task have a floating point context?
127 *
128 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
129 *  and it has a floating point context which is switched in and out.
130 *  If FALSE, then the IDLE task does not have a floating point context.
131 */
132
133#define CPU_IDLE_TASK_IS_FP      FALSE
134
135/*
136 *  Should the saving of the floating point registers be deferred
137 *  until a context switch is made to another different floating point
138 *  task?
139 *
140 *  If TRUE, then the floating point context will not be stored until
141 *  necessary.  It will remain in the floating point registers and not
142 *  disturned until another floating point task is switched to.
143 *
144 *  If FALSE, then the floating point context is saved when a floating
145 *  point task is switched out and restored when the next floating point
146 *  task is restored.  The state of the floating point registers between
147 *  those two operations is not specified.
148 */
149
150#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
151
152/*
153 *  Does this port provide a CPU dependent IDLE task implementation?
154 *
155 *  If TRUE, then the routine _CPU_Thread_Idle_body
156 *  must be provided and is the default IDLE thread body instead of
157 *  _CPU_Thread_Idle_body.
158 *
159 *  If FALSE, then use the generic IDLE thread body if the BSP does
160 *  not provide one.
161 */
162
163#if (SPARC_HAS_LOW_POWER_MODE == 1)
164#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
165#else
166#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
167#endif
168
169/*
170 *  Does the stack grow up (toward higher addresses) or down
171 *  (toward lower addresses)?
172 *
173 *  If TRUE, then the grows upward.
174 *  If FALSE, then the grows toward smaller addresses.
175 *
176 *  The stack grows to lower addresses on the SPARC.
177 */
178
179#define CPU_STACK_GROWS_UP               FALSE
180
181/*
182 *  The following is the variable attribute used to force alignment
183 *  of critical data structures.  On some processors it may make
184 *  sense to have these aligned on tighter boundaries than
185 *  the minimum requirements of the compiler in order to have as
186 *  much of the critical data area as possible in a cache line.
187 *
188 *  The SPARC does not appear to have particularly strict alignment
189 *  requirements.  This value was chosen to take advantages of caches.
190 */
191
192#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
193
194/*
195 *  Define what is required to specify how the network to host conversion
196 *  routines are handled.
197 */
198
199#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
200#define CPU_BIG_ENDIAN                           TRUE
201#define CPU_LITTLE_ENDIAN                        FALSE
202
203/*
204 *  The following defines the number of bits actually used in the
205 *  interrupt field of the task mode.  How those bits map to the
206 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
207 *
208 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
209 */
210
211#define CPU_MODES_INTERRUPT_MASK   0x0000000F
212
213/*
214 *  This structure represents the organization of the minimum stack frame
215 *  for the SPARC.  More framing information is required in certain situaions
216 *  such as when there are a large number of out parameters or when the callee
217 *  must save floating point registers.
218 */
219
220#ifndef ASM
221
222typedef struct {
223  unsigned32  l0;
224  unsigned32  l1;
225  unsigned32  l2;
226  unsigned32  l3;
227  unsigned32  l4;
228  unsigned32  l5;
229  unsigned32  l6;
230  unsigned32  l7;
231  unsigned32  i0;
232  unsigned32  i1;
233  unsigned32  i2;
234  unsigned32  i3;
235  unsigned32  i4;
236  unsigned32  i5;
237  unsigned32  i6_fp;
238  unsigned32  i7;
239  void       *structure_return_address;
240  /*
241   *  The following are for the callee to save the register arguments in
242   *  should this be necessary.
243   */
244  unsigned32  saved_arg0;
245  unsigned32  saved_arg1;
246  unsigned32  saved_arg2;
247  unsigned32  saved_arg3;
248  unsigned32  saved_arg4;
249  unsigned32  saved_arg5;
250  unsigned32  pad0;
251}  CPU_Minimum_stack_frame;
252
253#endif /* ASM */
254
255#define CPU_STACK_FRAME_L0_OFFSET             0x00
256#define CPU_STACK_FRAME_L1_OFFSET             0x04
257#define CPU_STACK_FRAME_L2_OFFSET             0x08
258#define CPU_STACK_FRAME_L3_OFFSET             0x0c
259#define CPU_STACK_FRAME_L4_OFFSET             0x10
260#define CPU_STACK_FRAME_L5_OFFSET             0x14
261#define CPU_STACK_FRAME_L6_OFFSET             0x18
262#define CPU_STACK_FRAME_L7_OFFSET             0x1c
263#define CPU_STACK_FRAME_I0_OFFSET             0x20
264#define CPU_STACK_FRAME_I1_OFFSET             0x24
265#define CPU_STACK_FRAME_I2_OFFSET             0x28
266#define CPU_STACK_FRAME_I3_OFFSET             0x2c
267#define CPU_STACK_FRAME_I4_OFFSET             0x30
268#define CPU_STACK_FRAME_I5_OFFSET             0x34
269#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
270#define CPU_STACK_FRAME_I7_OFFSET             0x3c
271#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
272#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
273#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
274#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
275#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
276#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
277#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
278#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
279
280#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
281
282/*
283 * Contexts
284 *
285 *  Generally there are 2 types of context to save.
286 *     1. Interrupt registers to save
287 *     2. Task level registers to save
288 *
289 *  This means we have the following 3 context items:
290 *     1. task level context stuff::  Context_Control
291 *     2. floating point task stuff:: Context_Control_fp
292 *     3. special interrupt level context :: Context_Control_interrupt
293 *
294 *  On the SPARC, we are relatively conservative in that we save most
295 *  of the CPU state in the context area.  The ET (enable trap) bit and
296 *  the CWP (current window pointer) fields of the PSR are considered
297 *  system wide resources and are not maintained on a per-thread basis.
298 */
299
300#ifndef ASM
301
302typedef struct {
303    /*
304     *  Using a double g0_g1 will put everything in this structure on a
305     *  double word boundary which allows us to use double word loads
306     *  and stores safely in the context switch.
307     */
308    double     g0_g1;
309    unsigned32 g2;
310    unsigned32 g3;
311    unsigned32 g4;
312    unsigned32 g5;
313    unsigned32 g6;
314    unsigned32 g7;
315
316    unsigned32 l0;
317    unsigned32 l1;
318    unsigned32 l2;
319    unsigned32 l3;
320    unsigned32 l4;
321    unsigned32 l5;
322    unsigned32 l6;
323    unsigned32 l7;
324
325    unsigned32 i0;
326    unsigned32 i1;
327    unsigned32 i2;
328    unsigned32 i3;
329    unsigned32 i4;
330    unsigned32 i5;
331    unsigned32 i6_fp;
332    unsigned32 i7;
333
334    unsigned32 o0;
335    unsigned32 o1;
336    unsigned32 o2;
337    unsigned32 o3;
338    unsigned32 o4;
339    unsigned32 o5;
340    unsigned32 o6_sp;
341    unsigned32 o7;
342
343    unsigned32 psr;
344} Context_Control;
345
346#endif /* ASM */
347
348/*
349 *  Offsets of fields with Context_Control for assembly routines.
350 */
351
352#define G0_OFFSET    0x00
353#define G1_OFFSET    0x04
354#define G2_OFFSET    0x08
355#define G3_OFFSET    0x0C
356#define G4_OFFSET    0x10
357#define G5_OFFSET    0x14
358#define G6_OFFSET    0x18
359#define G7_OFFSET    0x1C
360
361#define L0_OFFSET    0x20
362#define L1_OFFSET    0x24
363#define L2_OFFSET    0x28
364#define L3_OFFSET    0x2C
365#define L4_OFFSET    0x30
366#define L5_OFFSET    0x34
367#define L6_OFFSET    0x38
368#define L7_OFFSET    0x3C
369
370#define I0_OFFSET    0x40
371#define I1_OFFSET    0x44
372#define I2_OFFSET    0x48
373#define I3_OFFSET    0x4C
374#define I4_OFFSET    0x50
375#define I5_OFFSET    0x54
376#define I6_FP_OFFSET 0x58
377#define I7_OFFSET    0x5C
378
379#define O0_OFFSET    0x60
380#define O1_OFFSET    0x64
381#define O2_OFFSET    0x68
382#define O3_OFFSET    0x6C
383#define O4_OFFSET    0x70
384#define O5_OFFSET    0x74
385#define O6_SP_OFFSET 0x78
386#define O7_OFFSET    0x7C
387
388#define PSR_OFFSET   0x80
389
390#define CONTEXT_CONTROL_SIZE 0x84
391
392/*
393 *  The floating point context area.
394 */
395
396#ifndef ASM
397
398typedef struct {
399    double      f0_f1;
400    double      f2_f3;
401    double      f4_f5;
402    double      f6_f7;
403    double      f8_f9;
404    double      f10_f11;
405    double      f12_f13;
406    double      f14_f15;
407    double      f16_f17;
408    double      f18_f19;
409    double      f20_f21;
410    double      f22_f23;
411    double      f24_f25;
412    double      f26_f27;
413    double      f28_f29;
414    double      f30_f31;
415    unsigned32  fsr;
416} Context_Control_fp;
417
418#endif /* ASM */
419
420/*
421 *  Offsets of fields with Context_Control_fp for assembly routines.
422 */
423
424#define FO_F1_OFFSET     0x00
425#define F2_F3_OFFSET     0x08
426#define F4_F5_OFFSET     0x10
427#define F6_F7_OFFSET     0x18
428#define F8_F9_OFFSET     0x20
429#define F1O_F11_OFFSET   0x28
430#define F12_F13_OFFSET   0x30
431#define F14_F15_OFFSET   0x38
432#define F16_F17_OFFSET   0x40
433#define F18_F19_OFFSET   0x48
434#define F2O_F21_OFFSET   0x50
435#define F22_F23_OFFSET   0x58
436#define F24_F25_OFFSET   0x60
437#define F26_F27_OFFSET   0x68
438#define F28_F29_OFFSET   0x70
439#define F3O_F31_OFFSET   0x78
440#define FSR_OFFSET       0x80
441
442#define CONTEXT_CONTROL_FP_SIZE 0x84
443
444#ifndef ASM
445
446/*
447 *  Context saved on stack for an interrupt.
448 *
449 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
450 *         benefit of the user's handler.
451 */
452
453typedef struct {
454  CPU_Minimum_stack_frame  Stack_frame;
455  unsigned32               psr;
456  unsigned32               pc;
457  unsigned32               npc;
458  unsigned32               g1;
459  unsigned32               g2;
460  unsigned32               g3;
461  unsigned32               g4;
462  unsigned32               g5;
463  unsigned32               g6;
464  unsigned32               g7;
465  unsigned32               i0;
466  unsigned32               i1;
467  unsigned32               i2;
468  unsigned32               i3;
469  unsigned32               i4;
470  unsigned32               i5;
471  unsigned32               i6_fp;
472  unsigned32               i7;
473  unsigned32               y;
474  unsigned32               tpc;
475} CPU_Interrupt_frame;
476
477#endif /* ASM */
478
479/*
480 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
481 */
482
483#define ISF_STACK_FRAME_OFFSET 0x00
484#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
485#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
486#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
487#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
488#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
489#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
490#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
491#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
492#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
493#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
494#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
495#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
496#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
497#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
498#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
499#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
500#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
501#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
502#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
503#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
504
505#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
506#ifndef ASM
507
508/*
509 *  The following table contains the information required to configure
510 *  the processor specific parameters.
511 */
512
513typedef struct {
514  void       (*pretasking_hook)( void );
515  void       (*predriver_hook)( void );
516  void       (*postdriver_hook)( void );
517  void       (*idle_task)( void );
518  boolean      do_zero_of_workspace;
519  unsigned32   idle_task_stack_size;
520  unsigned32   interrupt_stack_size;
521  unsigned32   extra_mpci_receive_server_stack;
522  void *     (*stack_allocate_hook)( unsigned32 );
523  void       (*stack_free_hook)( void* );
524  /* end of fields required on all CPUs */
525
526}   rtems_cpu_table;
527
528/*
529 *  Macros to access required entires in the CPU Table are in
530 *  the file rtems/system.h.
531 */
532
533/*
534 *  Macros to access SPARC specific additions to the CPU Table
535 */
536
537/* There are no CPU specific additions to the CPU Table for this port. */
538
539/*
540 *  This variable is contains the initialize context for the FP unit.
541 *  It is filled in by _CPU_Initialize and copied into the task's FP
542 *  context area during _CPU_Context_Initialize.
543 */
544
545SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
546
547/*
548 *  This stack is allocated by the Interrupt Manager and the switch
549 *  is performed in _ISR_Handler.  These variables contain pointers
550 *  to the lowest and highest addresses in the chunk of memory allocated
551 *  for the interrupt stack.  Since it is unknown whether the stack
552 *  grows up or down (in general), this give the CPU dependent
553 *  code the option of picking the version it wants to use.  Thus
554 *  both must be present if either is.
555 *
556 *  The SPARC supports a software based interrupt stack and these
557 *  are required.
558 */
559
560SCORE_EXTERN void *_CPU_Interrupt_stack_low;
561SCORE_EXTERN void *_CPU_Interrupt_stack_high;
562
563#if defined(erc32)
564
565/*
566 *  ERC32 Specific Variables
567 */
568
569SCORE_EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror;
570
571#endif
572
573/*
574 *  The following type defines an entry in the SPARC's trap table.
575 *
576 *  NOTE: The instructions chosen are RTEMS dependent although one is
577 *        obligated to use two of the four instructions to perform a
578 *        long jump.  The other instructions load one register with the
579 *        trap type (a.k.a. vector) and another with the psr.
580 */
581 
582typedef struct {
583  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
584  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
585  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
586  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
587} CPU_Trap_table_entry;
588 
589/*
590 *  This is the set of opcodes for the instructions loaded into a trap
591 *  table entry.  The routine which installs a handler is responsible
592 *  for filling in the fields for the _handler address and the _vector
593 *  trap type.
594 *
595 *  The constants following this structure are masks for the fields which
596 *  must be filled in when the handler is installed.
597 */
598 
599extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
600
601/*
602 *  This is the executive's trap table which is installed into the TBR
603 *  register.
604 *
605 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
606 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
607 *         align an entity to anything greater than a 512 byte boundary.
608 *
609 *         Because of this, we pull a little bit of a trick.  We allocate
610 *         enough memory so we can grab an address on a 4096 byte boundary
611 *         from this area.
612 */
613 
614#define SPARC_TRAP_TABLE_ALIGNMENT 4096
615 
616#ifndef NO_TABLE_MOVE
617
618SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
619           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
620#endif
621 
622
623/*
624 *  The size of the floating point context area. 
625 */
626
627#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
628
629#endif
630
631/*
632 *  Amount of extra stack (above minimum stack size) required by
633 *  MPCI receive server thread.  Remember that in a multiprocessor
634 *  system this thread must exist and be able to process all directives.
635 */
636
637#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
638
639/*
640 *  This defines the number of entries in the ISR_Vector_table managed
641 *  by the executive.
642 *
643 *  On the SPARC, there are really only 256 vectors.  However, the executive
644 *  has no easy, fast, reliable way to determine which traps are synchronous
645 *  and which are asynchronous.  By default, synchronous traps return to the
646 *  instruction which caused the interrupt.  So if you install a software
647 *  trap handler as an executive interrupt handler (which is desirable since
648 *  RTEMS takes care of window and register issues), then the executive needs
649 *  to know that the return address is to the trap rather than the instruction
650 *  following the trap.
651 *
652 *  So vectors 0 through 255 are treated as regular asynchronous traps which
653 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
654 *  by the executive to be synchronous and to require that the return address
655 *  be fudged.
656 *
657 *  If you use this mechanism to install a trap handler which must reexecute
658 *  the instruction which caused the trap, then it should be installed as
659 *  an asynchronous trap.  This will avoid the executive changing the return
660 *  address.
661 */
662
663#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
664#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
665
666#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
667#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
668#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
669
670#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
671
672/*
673 *  Should be large enough to run all tests.  This insures
674 *  that a "reasonable" small application should not have any problems.
675 *
676 *  This appears to be a fairly generous number for the SPARC since
677 *  represents a call depth of about 20 routines based on the minimum
678 *  stack frame.
679 */
680
681#define CPU_STACK_MINIMUM_SIZE  (1024*4)
682
683/*
684 *  CPU's worst alignment requirement for data types on a byte boundary.  This
685 *  alignment does not take into account the requirements for the stack.
686 *
687 *  On the SPARC, this is required for double word loads and stores.
688 */
689
690#define CPU_ALIGNMENT      8
691
692/*
693 *  This number corresponds to the byte alignment requirement for the
694 *  heap handler.  This alignment requirement may be stricter than that
695 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
696 *  common for the heap to follow the same alignment requirement as
697 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
698 *  then this should be set to CPU_ALIGNMENT.
699 *
700 *  NOTE:  This does not have to be a power of 2.  It does have to
701 *         be greater or equal to than CPU_ALIGNMENT.
702 */
703
704#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
705
706/*
707 *  This number corresponds to the byte alignment requirement for memory
708 *  buffers allocated by the partition manager.  This alignment requirement
709 *  may be stricter than that for the data types alignment specified by
710 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
711 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
712 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
713 *
714 *  NOTE:  This does not have to be a power of 2.  It does have to
715 *         be greater or equal to than CPU_ALIGNMENT.
716 */
717
718#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
719
720/*
721 *  This number corresponds to the byte alignment requirement for the
722 *  stack.  This alignment requirement may be stricter than that for the
723 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
724 *  is strict enough for the stack, then this should be set to 0.
725 *
726 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
727 *
728 *  The alignment restrictions for the SPARC are not that strict but this
729 *  should unsure that the stack is always sufficiently alignment that the
730 *  window overflow, underflow, and flush routines can use double word loads
731 *  and stores.
732 */
733
734#define CPU_STACK_ALIGNMENT        16
735
736#ifndef ASM
737
738extern unsigned int sparc_disable_interrupts();
739extern void sparc_enable_interrupts();
740
741/* ISR handler macros */
742
743/*
744 *  Disable all interrupts for a critical section.  The previous
745 *  level is returned in _level.
746 */
747
748#define _CPU_ISR_Disable( _level ) \
749  (_level) = sparc_disable_interrupts()
750 
751/*
752 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
753 *  This indicates the end of a critical section.  The parameter
754 *  _level is not modified.
755 */
756
757#define _CPU_ISR_Enable( _level ) \
758  sparc_enable_interrupts( _level )
759/*
760 *  This temporarily restores the interrupt to _level before immediately
761 *  disabling them again.  This is used to divide long critical
762 *  sections into two or more parts.  The parameter _level is not
763 *  modified.
764 */
765
766#define _CPU_ISR_Flash( _level ) \
767  sparc_flash_interrupts( _level )
768 
769/*
770 *  Map interrupt level in task mode onto the hardware that the CPU
771 *  actually provides.  Currently, interrupt levels which do not
772 *  map onto the CPU in a straight fashion are undefined. 
773 */
774
775#define _CPU_ISR_Set_level( _newlevel ) \
776   sparc_enable_interrupts( _newlevel << 8)
777 
778unsigned32 _CPU_ISR_Get_level( void );
779 
780/* end of ISR handler macros */
781
782/* Context handler macros */
783
784/*
785 *  Initialize the context to a state suitable for starting a
786 *  task after a context restore operation.  Generally, this
787 *  involves:
788 *
789 *     - setting a starting address
790 *     - preparing the stack
791 *     - preparing the stack and frame pointers
792 *     - setting the proper interrupt level in the context
793 *     - initializing the floating point context
794 *
795 *  NOTE:  Implemented as a subroutine for the SPARC port.
796 */
797
798void _CPU_Context_Initialize(
799  Context_Control  *the_context,
800  unsigned32       *stack_base,
801  unsigned32        size,
802  unsigned32        new_level,
803  void             *entry_point,
804  boolean           is_fp
805);
806
807/*
808 *  This routine is responsible for somehow restarting the currently
809 *  executing task. 
810 *
811 *  On the SPARC, this is is relatively painless but requires a small
812 *  amount of wrapper code before using the regular restore code in
813 *  of the context switch.
814 */
815
816#define _CPU_Context_Restart_self( _the_context ) \
817   _CPU_Context_restore( (_the_context) );
818
819/*
820 *  The FP context area for the SPARC is a simple structure and nothing
821 *  special is required to find the "starting load point"
822 */
823
824#define _CPU_Context_Fp_start( _base, _offset ) \
825   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
826
827/*
828 *  This routine initializes the FP context area passed to it to.
829 *
830 *  The SPARC allows us to use the simple initialization model
831 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
832 *  at CPU initialization and it is simply copied into the destination
833 *  context.
834 */
835
836#define _CPU_Context_Initialize_fp( _destination ) \
837  do { \
838   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
839  } while (0)
840
841/* end of Context handler macros */
842
843/* Fatal Error manager macros */
844
845/*
846 *  This routine copies _error into a known place -- typically a stack
847 *  location or a register, optionally disables interrupts, and
848 *  halts/stops the CPU.
849 */
850
851#define _CPU_Fatal_halt( _error ) \
852  do { \
853    unsigned32 level; \
854    \
855    level = sparc_disable_interrupts(); \
856    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
857    while (1); /* loop forever */ \
858  } while (0)
859
860/* end of Fatal Error manager macros */
861
862/* Bitfield handler macros */
863
864/*
865 *  The SPARC port uses the generic C algorithm for bitfield scan if the
866 *  CPU model does not have a scan instruction.
867 */
868
869#if ( SPARC_HAS_BITSCAN == 0 )
870#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
871#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
872#else
873#error "scan instruction not currently supported by RTEMS!!"
874#endif
875
876/* end of Bitfield handler macros */
877
878/* Priority handler handler macros */
879
880/*
881 *  The SPARC port uses the generic C algorithm for bitfield scan if the
882 *  CPU model does not have a scan instruction.
883 */
884
885#if ( SPARC_HAS_BITSCAN == 1 )
886#error "scan instruction not currently supported by RTEMS!!"
887#endif
888
889/* end of Priority handler macros */
890
891/* functions */
892
893/*
894 *  _CPU_Initialize
895 *
896 *  This routine performs CPU dependent initialization.
897 */
898
899void _CPU_Initialize(
900  rtems_cpu_table  *cpu_table,
901  void            (*thread_dispatch)
902);
903
904/*
905 *  _CPU_ISR_install_raw_handler
906 *
907 *  This routine installs new_handler to be directly called from the trap
908 *  table.
909 */
910 
911void _CPU_ISR_install_raw_handler(
912  unsigned32  vector,
913  proc_ptr    new_handler,
914  proc_ptr   *old_handler
915);
916
917/*
918 *  _CPU_ISR_install_vector
919 *
920 *  This routine installs an interrupt vector.
921 */
922
923void _CPU_ISR_install_vector(
924  unsigned32  vector,
925  proc_ptr    new_handler,
926  proc_ptr   *old_handler
927);
928
929#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
930 
931/*
932 *  _CPU_Thread_Idle_body
933 *
934 *  Some SPARC implementations have low power, sleep, or idle modes.  This
935 *  tries to take advantage of those models.
936 */
937 
938void _CPU_Thread_Idle_body( void );
939 
940#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
941
942/*
943 *  _CPU_Context_switch
944 *
945 *  This routine switches from the run context to the heir context.
946 */
947
948void _CPU_Context_switch(
949  Context_Control  *run,
950  Context_Control  *heir
951);
952
953/*
954 *  _CPU_Context_restore
955 *
956 *  This routine is generally used only to restart self in an
957 *  efficient manner.
958 */
959
960void _CPU_Context_restore(
961  Context_Control *new_context
962);
963
964/*
965 *  _CPU_Context_save_fp
966 *
967 *  This routine saves the floating point context passed to it.
968 */
969
970void _CPU_Context_save_fp(
971  void **fp_context_ptr
972);
973
974/*
975 *  _CPU_Context_restore_fp
976 *
977 *  This routine restores the floating point context passed to it.
978 */
979
980void _CPU_Context_restore_fp(
981  void **fp_context_ptr
982);
983
984/*
985 *  CPU_swap_u32
986 *
987 *  The following routine swaps the endian format of an unsigned int.
988 *  It must be static because it is referenced indirectly.
989 *
990 *  This version will work on any processor, but if you come across a better
991 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
992 *  entity as shown below is not any more efficient on the SPARC.
993 *
994 *     swap least significant two bytes with 16-bit rotate
995 *     swap upper and lower 16-bits
996 *     swap most significant two bytes with 16-bit rotate
997 *
998 *  It is not obvious how the SPARC can do significantly better than the
999 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
1000 *  following code at optimization level four (i.e. -O4).
1001 */
1002 
1003static inline unsigned int CPU_swap_u32(
1004  unsigned int value
1005)
1006{
1007  unsigned32 byte1, byte2, byte3, byte4, swapped;
1008 
1009  byte4 = (value >> 24) & 0xff;
1010  byte3 = (value >> 16) & 0xff;
1011  byte2 = (value >> 8)  & 0xff;
1012  byte1 =  value        & 0xff;
1013 
1014  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1015  return( swapped );
1016}
1017
1018#define CPU_swap_u16( value ) \
1019  (((value&0xff) << 8) | ((value >> 8)&0xff))
1020
1021#endif ASM
1022
1023#ifdef __cplusplus
1024}
1025#endif
1026
1027#endif
Note: See TracBrowser for help on using the repository browser.