source: rtems/c/src/exec/score/cpu/sparc/cpu.h @ 937a6f3c

4.104.114.84.95
Last change on this file since 937a6f3c was 937a6f3c, checked in by Joel Sherrill <joel.sherrill@…>, on 06/03/98 at 19:00:17

Added CPU_ISR_PASSES_FRAME_POINTER so some ports could pass just the
vector number to user ISR's and other ports could pass both the vector
number and a pointer to the ISF.

  • Property mode set to 100644
File size: 28.8 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989-1998.
7 *  On-Line Applications Research Corporation (OAR).
8 *  Copyright assigned to U.S. Government, 1994.
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.OARcorp.com/rtems/license.html.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#ifndef __CPU_h
25#define __CPU_h
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31#include <rtems/score/sparc.h>               /* pick up machine definitions */
32#ifndef ASM
33#include <rtems/score/sparctypes.h>
34#endif
35
36/* conditional compilation parameters */
37
38/*
39 *  Should the calls to _Thread_Enable_dispatch be inlined?
40 *
41 *  If TRUE, then they are inlined.
42 *  If FALSE, then a subroutine call is made.
43 */
44
45#define CPU_INLINE_ENABLE_DISPATCH       TRUE
46
47/*
48 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
49 *  be unrolled one time?  In unrolled each iteration of the loop examines
50 *  two "nodes" on the chain being searched.  Otherwise, only one node
51 *  is examined per iteration.
52 *
53 *  If TRUE, then the loops are unrolled.
54 *  If FALSE, then the loops are not unrolled.
55 *
56 *  This parameter could go either way on the SPARC.  The interrupt flash
57 *  code is relatively lengthy given the requirements for nops following
58 *  writes to the psr.  But if the clock speed were high enough, this would
59 *  not represent a great deal of time.
60 */
61
62#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
63
64/*
65 *  Does the executive manage a dedicated interrupt stack in software?
66 *
67 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
68 *  If FALSE, nothing is done.
69 *
70 *  The SPARC does not have a dedicated HW interrupt stack and one has
71 *  been implemented in SW.
72 */
73
74#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
75
76/*
77 *  Does this CPU have hardware support for a dedicated interrupt stack?
78 *
79 *  If TRUE, then it must be installed during initialization.
80 *  If FALSE, then no installation is performed.
81 *
82 *  The SPARC does not have a dedicated HW interrupt stack.
83 */
84
85#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
86
87/*
88 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
89 *
90 *  If TRUE, then the memory is allocated during initialization.
91 *  If FALSE, then the memory is allocated during initialization.
92 */
93
94#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
95
96/*
97 *  Does the RTEMS invoke the user's ISR with the vector number and
98 *  a pointer to the saved interrupt frame (1) or just the vector
99 *  number (0)?
100 */
101
102#define CPU_ISR_PASSES_FRAME_POINTER 0
103
104/*
105 *  Does the CPU have hardware floating point?
106 *
107 *  If TRUE, then the FLOATING_POINT task attribute is supported.
108 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
109 */
110
111#if ( SPARC_HAS_FPU == 1 )
112#define CPU_HARDWARE_FP     TRUE
113#else
114#define CPU_HARDWARE_FP     FALSE
115#endif
116
117/*
118 *  Are all tasks FLOATING_POINT tasks implicitly?
119 *
120 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
121 *  If FALSE, then the FLOATING_POINT task attribute is followed.
122 */
123
124#define CPU_ALL_TASKS_ARE_FP     FALSE
125
126/*
127 *  Should the IDLE task have a floating point context?
128 *
129 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
130 *  and it has a floating point context which is switched in and out.
131 *  If FALSE, then the IDLE task does not have a floating point context.
132 */
133
134#define CPU_IDLE_TASK_IS_FP      FALSE
135
136/*
137 *  Should the saving of the floating point registers be deferred
138 *  until a context switch is made to another different floating point
139 *  task?
140 *
141 *  If TRUE, then the floating point context will not be stored until
142 *  necessary.  It will remain in the floating point registers and not
143 *  disturned until another floating point task is switched to.
144 *
145 *  If FALSE, then the floating point context is saved when a floating
146 *  point task is switched out and restored when the next floating point
147 *  task is restored.  The state of the floating point registers between
148 *  those two operations is not specified.
149 */
150
151#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
152
153/*
154 *  Does this port provide a CPU dependent IDLE task implementation?
155 *
156 *  If TRUE, then the routine _CPU_Thread_Idle_body
157 *  must be provided and is the default IDLE thread body instead of
158 *  _CPU_Thread_Idle_body.
159 *
160 *  If FALSE, then use the generic IDLE thread body if the BSP does
161 *  not provide one.
162 */
163
164#if (SPARC_HAS_LOW_POWER_MODE == 1)
165#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
166#else
167#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
168#endif
169
170/*
171 *  Does the stack grow up (toward higher addresses) or down
172 *  (toward lower addresses)?
173 *
174 *  If TRUE, then the grows upward.
175 *  If FALSE, then the grows toward smaller addresses.
176 *
177 *  The stack grows to lower addresses on the SPARC.
178 */
179
180#define CPU_STACK_GROWS_UP               FALSE
181
182/*
183 *  The following is the variable attribute used to force alignment
184 *  of critical data structures.  On some processors it may make
185 *  sense to have these aligned on tighter boundaries than
186 *  the minimum requirements of the compiler in order to have as
187 *  much of the critical data area as possible in a cache line.
188 *
189 *  The SPARC does not appear to have particularly strict alignment
190 *  requirements.  This value was chosen to take advantages of caches.
191 */
192
193#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
194
195/*
196 *  Define what is required to specify how the network to host conversion
197 *  routines are handled.
198 */
199
200#define CPU_CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
201#define CPU_BIG_ENDIAN                           TRUE
202#define CPU_LITTLE_ENDIAN                        FALSE
203
204/*
205 *  The following defines the number of bits actually used in the
206 *  interrupt field of the task mode.  How those bits map to the
207 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
208 *
209 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
210 */
211
212#define CPU_MODES_INTERRUPT_MASK   0x0000000F
213
214/*
215 *  This structure represents the organization of the minimum stack frame
216 *  for the SPARC.  More framing information is required in certain situaions
217 *  such as when there are a large number of out parameters or when the callee
218 *  must save floating point registers.
219 */
220
221#ifndef ASM
222
223typedef struct {
224  unsigned32  l0;
225  unsigned32  l1;
226  unsigned32  l2;
227  unsigned32  l3;
228  unsigned32  l4;
229  unsigned32  l5;
230  unsigned32  l6;
231  unsigned32  l7;
232  unsigned32  i0;
233  unsigned32  i1;
234  unsigned32  i2;
235  unsigned32  i3;
236  unsigned32  i4;
237  unsigned32  i5;
238  unsigned32  i6_fp;
239  unsigned32  i7;
240  void       *structure_return_address;
241  /*
242   *  The following are for the callee to save the register arguments in
243   *  should this be necessary.
244   */
245  unsigned32  saved_arg0;
246  unsigned32  saved_arg1;
247  unsigned32  saved_arg2;
248  unsigned32  saved_arg3;
249  unsigned32  saved_arg4;
250  unsigned32  saved_arg5;
251  unsigned32  pad0;
252}  CPU_Minimum_stack_frame;
253
254#endif /* ASM */
255
256#define CPU_STACK_FRAME_L0_OFFSET             0x00
257#define CPU_STACK_FRAME_L1_OFFSET             0x04
258#define CPU_STACK_FRAME_L2_OFFSET             0x08
259#define CPU_STACK_FRAME_L3_OFFSET             0x0c
260#define CPU_STACK_FRAME_L4_OFFSET             0x10
261#define CPU_STACK_FRAME_L5_OFFSET             0x14
262#define CPU_STACK_FRAME_L6_OFFSET             0x18
263#define CPU_STACK_FRAME_L7_OFFSET             0x1c
264#define CPU_STACK_FRAME_I0_OFFSET             0x20
265#define CPU_STACK_FRAME_I1_OFFSET             0x24
266#define CPU_STACK_FRAME_I2_OFFSET             0x28
267#define CPU_STACK_FRAME_I3_OFFSET             0x2c
268#define CPU_STACK_FRAME_I4_OFFSET             0x30
269#define CPU_STACK_FRAME_I5_OFFSET             0x34
270#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
271#define CPU_STACK_FRAME_I7_OFFSET             0x3c
272#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
273#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
274#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
275#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
276#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
277#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
278#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
279#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
280
281#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
282
283/*
284 * Contexts
285 *
286 *  Generally there are 2 types of context to save.
287 *     1. Interrupt registers to save
288 *     2. Task level registers to save
289 *
290 *  This means we have the following 3 context items:
291 *     1. task level context stuff::  Context_Control
292 *     2. floating point task stuff:: Context_Control_fp
293 *     3. special interrupt level context :: Context_Control_interrupt
294 *
295 *  On the SPARC, we are relatively conservative in that we save most
296 *  of the CPU state in the context area.  The ET (enable trap) bit and
297 *  the CWP (current window pointer) fields of the PSR are considered
298 *  system wide resources and are not maintained on a per-thread basis.
299 */
300
301#ifndef ASM
302
303typedef struct {
304    /*
305     *  Using a double g0_g1 will put everything in this structure on a
306     *  double word boundary which allows us to use double word loads
307     *  and stores safely in the context switch.
308     */
309    double     g0_g1;
310    unsigned32 g2;
311    unsigned32 g3;
312    unsigned32 g4;
313    unsigned32 g5;
314    unsigned32 g6;
315    unsigned32 g7;
316
317    unsigned32 l0;
318    unsigned32 l1;
319    unsigned32 l2;
320    unsigned32 l3;
321    unsigned32 l4;
322    unsigned32 l5;
323    unsigned32 l6;
324    unsigned32 l7;
325
326    unsigned32 i0;
327    unsigned32 i1;
328    unsigned32 i2;
329    unsigned32 i3;
330    unsigned32 i4;
331    unsigned32 i5;
332    unsigned32 i6_fp;
333    unsigned32 i7;
334
335    unsigned32 o0;
336    unsigned32 o1;
337    unsigned32 o2;
338    unsigned32 o3;
339    unsigned32 o4;
340    unsigned32 o5;
341    unsigned32 o6_sp;
342    unsigned32 o7;
343
344    unsigned32 psr;
345} Context_Control;
346
347#endif /* ASM */
348
349/*
350 *  Offsets of fields with Context_Control for assembly routines.
351 */
352
353#define G0_OFFSET    0x00
354#define G1_OFFSET    0x04
355#define G2_OFFSET    0x08
356#define G3_OFFSET    0x0C
357#define G4_OFFSET    0x10
358#define G5_OFFSET    0x14
359#define G6_OFFSET    0x18
360#define G7_OFFSET    0x1C
361
362#define L0_OFFSET    0x20
363#define L1_OFFSET    0x24
364#define L2_OFFSET    0x28
365#define L3_OFFSET    0x2C
366#define L4_OFFSET    0x30
367#define L5_OFFSET    0x34
368#define L6_OFFSET    0x38
369#define L7_OFFSET    0x3C
370
371#define I0_OFFSET    0x40
372#define I1_OFFSET    0x44
373#define I2_OFFSET    0x48
374#define I3_OFFSET    0x4C
375#define I4_OFFSET    0x50
376#define I5_OFFSET    0x54
377#define I6_FP_OFFSET 0x58
378#define I7_OFFSET    0x5C
379
380#define O0_OFFSET    0x60
381#define O1_OFFSET    0x64
382#define O2_OFFSET    0x68
383#define O3_OFFSET    0x6C
384#define O4_OFFSET    0x70
385#define O5_OFFSET    0x74
386#define O6_SP_OFFSET 0x78
387#define O7_OFFSET    0x7C
388
389#define PSR_OFFSET   0x80
390
391#define CONTEXT_CONTROL_SIZE 0x84
392
393/*
394 *  The floating point context area.
395 */
396
397#ifndef ASM
398
399typedef struct {
400    double      f0_f1;
401    double      f2_f3;
402    double      f4_f5;
403    double      f6_f7;
404    double      f8_f9;
405    double      f10_f11;
406    double      f12_f13;
407    double      f14_f15;
408    double      f16_f17;
409    double      f18_f19;
410    double      f20_f21;
411    double      f22_f23;
412    double      f24_f25;
413    double      f26_f27;
414    double      f28_f29;
415    double      f30_f31;
416    unsigned32  fsr;
417} Context_Control_fp;
418
419#endif /* ASM */
420
421/*
422 *  Offsets of fields with Context_Control_fp for assembly routines.
423 */
424
425#define FO_F1_OFFSET     0x00
426#define F2_F3_OFFSET     0x08
427#define F4_F5_OFFSET     0x10
428#define F6_F7_OFFSET     0x18
429#define F8_F9_OFFSET     0x20
430#define F1O_F11_OFFSET   0x28
431#define F12_F13_OFFSET   0x30
432#define F14_F15_OFFSET   0x38
433#define F16_F17_OFFSET   0x40
434#define F18_F19_OFFSET   0x48
435#define F2O_F21_OFFSET   0x50
436#define F22_F23_OFFSET   0x58
437#define F24_F25_OFFSET   0x60
438#define F26_F27_OFFSET   0x68
439#define F28_F29_OFFSET   0x70
440#define F3O_F31_OFFSET   0x78
441#define FSR_OFFSET       0x80
442
443#define CONTEXT_CONTROL_FP_SIZE 0x84
444
445#ifndef ASM
446
447/*
448 *  Context saved on stack for an interrupt.
449 *
450 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
451 *         benefit of the user's handler.
452 */
453
454typedef struct {
455  CPU_Minimum_stack_frame  Stack_frame;
456  unsigned32               psr;
457  unsigned32               pc;
458  unsigned32               npc;
459  unsigned32               g1;
460  unsigned32               g2;
461  unsigned32               g3;
462  unsigned32               g4;
463  unsigned32               g5;
464  unsigned32               g6;
465  unsigned32               g7;
466  unsigned32               i0;
467  unsigned32               i1;
468  unsigned32               i2;
469  unsigned32               i3;
470  unsigned32               i4;
471  unsigned32               i5;
472  unsigned32               i6_fp;
473  unsigned32               i7;
474  unsigned32               y;
475  unsigned32               pad0_offset;
476} CPU_Interrupt_frame;
477
478#endif /* ASM */
479
480/*
481 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
482 */
483
484#define ISF_STACK_FRAME_OFFSET 0x00
485#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
486#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
487#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
488#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
489#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
490#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
491#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
492#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
493#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
494#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
495#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
496#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
497#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
498#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
499#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
500#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
501#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
502#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
503#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
504#define ISF_PAD0_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
505
506#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
507#ifndef ASM
508
509/*
510 *  The following table contains the information required to configure
511 *  the processor specific parameters.
512 */
513
514typedef struct {
515  void       (*pretasking_hook)( void );
516  void       (*predriver_hook)( void );
517  void       (*postdriver_hook)( void );
518  void       (*idle_task)( void );
519  boolean      do_zero_of_workspace;
520  unsigned32   interrupt_stack_size;
521  unsigned32   extra_mpci_receive_server_stack;
522  void *     (*stack_allocate_hook)( unsigned32 );
523  void       (*stack_free_hook)( void* );
524  /* end of fields required on all CPUs */
525
526}   rtems_cpu_table;
527
528/*
529 *  This variable is contains the initialize context for the FP unit.
530 *  It is filled in by _CPU_Initialize and copied into the task's FP
531 *  context area during _CPU_Context_Initialize.
532 */
533
534SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
535
536/*
537 *  This stack is allocated by the Interrupt Manager and the switch
538 *  is performed in _ISR_Handler.  These variables contain pointers
539 *  to the lowest and highest addresses in the chunk of memory allocated
540 *  for the interrupt stack.  Since it is unknown whether the stack
541 *  grows up or down (in general), this give the CPU dependent
542 *  code the option of picking the version it wants to use.  Thus
543 *  both must be present if either is.
544 *
545 *  The SPARC supports a software based interrupt stack and these
546 *  are required.
547 */
548
549SCORE_EXTERN void *_CPU_Interrupt_stack_low;
550SCORE_EXTERN void *_CPU_Interrupt_stack_high;
551
552#if defined(erc32)
553
554/*
555 *  ERC32 Specific Variables
556 */
557
558SCORE_EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror;
559
560#endif
561
562/*
563 *  The following type defines an entry in the SPARC's trap table.
564 *
565 *  NOTE: The instructions chosen are RTEMS dependent although one is
566 *        obligated to use two of the four instructions to perform a
567 *        long jump.  The other instructions load one register with the
568 *        trap type (a.k.a. vector) and another with the psr.
569 */
570 
571typedef struct {
572  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
573  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
574  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
575  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
576} CPU_Trap_table_entry;
577 
578/*
579 *  This is the set of opcodes for the instructions loaded into a trap
580 *  table entry.  The routine which installs a handler is responsible
581 *  for filling in the fields for the _handler address and the _vector
582 *  trap type.
583 *
584 *  The constants following this structure are masks for the fields which
585 *  must be filled in when the handler is installed.
586 */
587 
588extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
589
590/*
591 *  This is the executive's trap table which is installed into the TBR
592 *  register.
593 *
594 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
595 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
596 *         align an entity to anything greater than a 512 byte boundary.
597 *
598 *         Because of this, we pull a little bit of a trick.  We allocate
599 *         enough memory so we can grab an address on a 4096 byte boundary
600 *         from this area.
601 */
602 
603#define SPARC_TRAP_TABLE_ALIGNMENT 4096
604 
605#ifndef NO_TABLE_MOVE
606
607SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
608           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
609#endif
610 
611
612/*
613 *  The size of the floating point context area. 
614 */
615
616#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
617
618#endif
619
620/*
621 *  Amount of extra stack (above minimum stack size) required by
622 *  MPCI receive server thread.  Remember that in a multiprocessor
623 *  system this thread must exist and be able to process all directives.
624 */
625
626#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
627
628/*
629 *  This defines the number of entries in the ISR_Vector_table managed
630 *  by the executive.
631 *
632 *  On the SPARC, there are really only 256 vectors.  However, the executive
633 *  has no easy, fast, reliable way to determine which traps are synchronous
634 *  and which are asynchronous.  By default, synchronous traps return to the
635 *  instruction which caused the interrupt.  So if you install a software
636 *  trap handler as an executive interrupt handler (which is desirable since
637 *  RTEMS takes care of window and register issues), then the executive needs
638 *  to know that the return address is to the trap rather than the instruction
639 *  following the trap.
640 *
641 *  So vectors 0 through 255 are treated as regular asynchronous traps which
642 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
643 *  by the executive to be synchronous and to require that the return address
644 *  be fudged.
645 *
646 *  If you use this mechanism to install a trap handler which must reexecute
647 *  the instruction which caused the trap, then it should be installed as
648 *  an asynchronous trap.  This will avoid the executive changing the return
649 *  address.
650 */
651
652#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
653#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
654
655#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
656#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
657#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
658
659#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
660
661/*
662 *  Should be large enough to run all tests.  This insures
663 *  that a "reasonable" small application should not have any problems.
664 *
665 *  This appears to be a fairly generous number for the SPARC since
666 *  represents a call depth of about 20 routines based on the minimum
667 *  stack frame.
668 */
669
670#define CPU_STACK_MINIMUM_SIZE  (1024*2 + 512)
671
672/*
673 *  CPU's worst alignment requirement for data types on a byte boundary.  This
674 *  alignment does not take into account the requirements for the stack.
675 *
676 *  On the SPARC, this is required for double word loads and stores.
677 */
678
679#define CPU_ALIGNMENT      8
680
681/*
682 *  This number corresponds to the byte alignment requirement for the
683 *  heap handler.  This alignment requirement may be stricter than that
684 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
685 *  common for the heap to follow the same alignment requirement as
686 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
687 *  then this should be set to CPU_ALIGNMENT.
688 *
689 *  NOTE:  This does not have to be a power of 2.  It does have to
690 *         be greater or equal to than CPU_ALIGNMENT.
691 */
692
693#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
694
695/*
696 *  This number corresponds to the byte alignment requirement for memory
697 *  buffers allocated by the partition manager.  This alignment requirement
698 *  may be stricter than that for the data types alignment specified by
699 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
700 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
701 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
702 *
703 *  NOTE:  This does not have to be a power of 2.  It does have to
704 *         be greater or equal to than CPU_ALIGNMENT.
705 */
706
707#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
708
709/*
710 *  This number corresponds to the byte alignment requirement for the
711 *  stack.  This alignment requirement may be stricter than that for the
712 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
713 *  is strict enough for the stack, then this should be set to 0.
714 *
715 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
716 *
717 *  The alignment restrictions for the SPARC are not that strict but this
718 *  should unsure that the stack is always sufficiently alignment that the
719 *  window overflow, underflow, and flush routines can use double word loads
720 *  and stores.
721 */
722
723#define CPU_STACK_ALIGNMENT        16
724
725#ifndef ASM
726
727/* ISR handler macros */
728
729/*
730 *  Disable all interrupts for a critical section.  The previous
731 *  level is returned in _level.
732 */
733
734#define _CPU_ISR_Disable( _level ) \
735  sparc_disable_interrupts( _level )
736 
737/*
738 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
739 *  This indicates the end of a critical section.  The parameter
740 *  _level is not modified.
741 */
742
743#define _CPU_ISR_Enable( _level ) \
744  sparc_enable_interrupts( _level )
745 
746/*
747 *  This temporarily restores the interrupt to _level before immediately
748 *  disabling them again.  This is used to divide long critical
749 *  sections into two or more parts.  The parameter _level is not
750 *  modified.
751 */
752
753#define _CPU_ISR_Flash( _level ) \
754  sparc_flash_interrupts( _level )
755 
756/*
757 *  Map interrupt level in task mode onto the hardware that the CPU
758 *  actually provides.  Currently, interrupt levels which do not
759 *  map onto the CPU in a straight fashion are undefined. 
760 */
761
762#define _CPU_ISR_Set_level( _newlevel ) \
763   sparc_set_interrupt_level( _newlevel )
764 
765unsigned32 _CPU_ISR_Get_level( void );
766 
767/* end of ISR handler macros */
768
769/* Context handler macros */
770
771/*
772 *  Initialize the context to a state suitable for starting a
773 *  task after a context restore operation.  Generally, this
774 *  involves:
775 *
776 *     - setting a starting address
777 *     - preparing the stack
778 *     - preparing the stack and frame pointers
779 *     - setting the proper interrupt level in the context
780 *     - initializing the floating point context
781 *
782 *  NOTE:  Implemented as a subroutine for the SPARC port.
783 */
784
785void _CPU_Context_Initialize(
786  Context_Control  *the_context,
787  unsigned32       *stack_base,
788  unsigned32        size,
789  unsigned32        new_level,
790  void             *entry_point,
791  boolean           is_fp
792);
793
794/*
795 *  This routine is responsible for somehow restarting the currently
796 *  executing task. 
797 *
798 *  On the SPARC, this is is relatively painless but requires a small
799 *  amount of wrapper code before using the regular restore code in
800 *  of the context switch.
801 */
802
803#define _CPU_Context_Restart_self( _the_context ) \
804   _CPU_Context_restore( (_the_context) );
805
806/*
807 *  The FP context area for the SPARC is a simple structure and nothing
808 *  special is required to find the "starting load point"
809 */
810
811#define _CPU_Context_Fp_start( _base, _offset ) \
812   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
813
814/*
815 *  This routine initializes the FP context area passed to it to.
816 *
817 *  The SPARC allows us to use the simple initialization model
818 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
819 *  at CPU initialization and it is simply copied into the destination
820 *  context.
821 */
822
823#define _CPU_Context_Initialize_fp( _destination ) \
824  do { \
825   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
826  } while (0)
827
828/* end of Context handler macros */
829
830/* Fatal Error manager macros */
831
832/*
833 *  This routine copies _error into a known place -- typically a stack
834 *  location or a register, optionally disables interrupts, and
835 *  halts/stops the CPU.
836 */
837
838#define _CPU_Fatal_halt( _error ) \
839  do { \
840    unsigned32 level; \
841    \
842    sparc_disable_interrupts( level ); \
843    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
844    while (1); /* loop forever */ \
845  } while (0)
846
847/* end of Fatal Error manager macros */
848
849/* Bitfield handler macros */
850
851/*
852 *  The SPARC port uses the generic C algorithm for bitfield scan if the
853 *  CPU model does not have a scan instruction.
854 */
855
856#if ( SPARC_HAS_BITSCAN == 0 )
857#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
858#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
859#else
860#error "scan instruction not currently supported by RTEMS!!"
861#endif
862
863/* end of Bitfield handler macros */
864
865/* Priority handler handler macros */
866
867/*
868 *  The SPARC port uses the generic C algorithm for bitfield scan if the
869 *  CPU model does not have a scan instruction.
870 */
871
872#if ( SPARC_HAS_BITSCAN == 1 )
873#error "scan instruction not currently supported by RTEMS!!"
874#endif
875
876/* end of Priority handler macros */
877
878/* functions */
879
880/*
881 *  _CPU_Initialize
882 *
883 *  This routine performs CPU dependent initialization.
884 */
885
886void _CPU_Initialize(
887  rtems_cpu_table  *cpu_table,
888  void            (*thread_dispatch)
889);
890
891/*
892 *  _CPU_ISR_install_raw_handler
893 *
894 *  This routine installs new_handler to be directly called from the trap
895 *  table.
896 */
897 
898void _CPU_ISR_install_raw_handler(
899  unsigned32  vector,
900  proc_ptr    new_handler,
901  proc_ptr   *old_handler
902);
903
904/*
905 *  _CPU_ISR_install_vector
906 *
907 *  This routine installs an interrupt vector.
908 */
909
910void _CPU_ISR_install_vector(
911  unsigned32  vector,
912  proc_ptr    new_handler,
913  proc_ptr   *old_handler
914);
915
916#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
917 
918/*
919 *  _CPU_Thread_Idle_body
920 *
921 *  Some SPARC implementations have low power, sleep, or idle modes.  This
922 *  tries to take advantage of those models.
923 */
924 
925void _CPU_Thread_Idle_body( void );
926 
927#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
928
929/*
930 *  _CPU_Context_switch
931 *
932 *  This routine switches from the run context to the heir context.
933 */
934
935void _CPU_Context_switch(
936  Context_Control  *run,
937  Context_Control  *heir
938);
939
940/*
941 *  _CPU_Context_restore
942 *
943 *  This routine is generally used only to restart self in an
944 *  efficient manner.
945 */
946
947void _CPU_Context_restore(
948  Context_Control *new_context
949);
950
951/*
952 *  _CPU_Context_save_fp
953 *
954 *  This routine saves the floating point context passed to it.
955 */
956
957void _CPU_Context_save_fp(
958  void **fp_context_ptr
959);
960
961/*
962 *  _CPU_Context_restore_fp
963 *
964 *  This routine restores the floating point context passed to it.
965 */
966
967void _CPU_Context_restore_fp(
968  void **fp_context_ptr
969);
970
971/*
972 *  CPU_swap_u32
973 *
974 *  The following routine swaps the endian format of an unsigned int.
975 *  It must be static because it is referenced indirectly.
976 *
977 *  This version will work on any processor, but if you come across a better
978 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
979 *  entity as shown below is not any more efficient on the SPARC.
980 *
981 *     swap least significant two bytes with 16-bit rotate
982 *     swap upper and lower 16-bits
983 *     swap most significant two bytes with 16-bit rotate
984 *
985 *  It is not obvious how the SPARC can do significantly better than the
986 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
987 *  following code at optimization level four (i.e. -O4).
988 */
989 
990static inline unsigned int CPU_swap_u32(
991  unsigned int value
992)
993{
994  unsigned32 byte1, byte2, byte3, byte4, swapped;
995 
996  byte4 = (value >> 24) & 0xff;
997  byte3 = (value >> 16) & 0xff;
998  byte2 = (value >> 8)  & 0xff;
999  byte1 =  value        & 0xff;
1000 
1001  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1002  return( swapped );
1003}
1004
1005#define CPU_swap_u16( value ) \
1006  (((value&0xff) << 8) | ((value >> 8)&0xff))
1007
1008#endif ASM
1009
1010#ifdef __cplusplus
1011}
1012#endif
1013
1014#endif
Note: See TracBrowser for help on using the repository browser.