source: rtems/c/src/exec/score/cpu/sparc/cpu.h @ e1a06d1b

4.104.114.84.95
Last change on this file since e1a06d1b was e1a06d1b, checked in by Joel Sherrill <joel.sherrill@…>, on 12/02/96 at 22:47:38

Changes to reflect new revision of erc32 per Jiri Gaisler's suggestions.
This is current as of sis 2.6.

  • Property mode set to 100644
File size: 28.3 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the port of
4 *  the executive to the SPARC processor.
5 *
6 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
7 *  On-Line Applications Research Corporation (OAR).
8 *  All rights assigned to U.S. Government, 1994.
9 *
10 *  This material may be reproduced by or for the U.S. Government pursuant
11 *  to the copyright license under the clause at DFARS 252.227-7013.  This
12 *  notice must appear in all copies of this file and its derivatives.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#ifndef __CPU_h
25#define __CPU_h
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31#include <rtems/score/sparc.h>               /* pick up machine definitions */
32#ifndef ASM
33#include <rtems/score/sparctypes.h>
34#endif
35
36/* conditional compilation parameters */
37
38/*
39 *  Should the calls to _Thread_Enable_dispatch be inlined?
40 *
41 *  If TRUE, then they are inlined.
42 *  If FALSE, then a subroutine call is made.
43 */
44
45#define CPU_INLINE_ENABLE_DISPATCH       TRUE
46
47/*
48 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
49 *  be unrolled one time?  In unrolled each iteration of the loop examines
50 *  two "nodes" on the chain being searched.  Otherwise, only one node
51 *  is examined per iteration.
52 *
53 *  If TRUE, then the loops are unrolled.
54 *  If FALSE, then the loops are not unrolled.
55 *
56 *  This parameter could go either way on the SPARC.  The interrupt flash
57 *  code is relatively lengthy given the requirements for nops following
58 *  writes to the psr.  But if the clock speed were high enough, this would
59 *  not represent a great deal of time.
60 */
61
62#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
63
64/*
65 *  Does the executive manage a dedicated interrupt stack in software?
66 *
67 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
68 *  If FALSE, nothing is done.
69 *
70 *  The SPARC does not have a dedicated HW interrupt stack and one has
71 *  been implemented in SW.
72 */
73
74#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
75
76/*
77 *  Does this CPU have hardware support for a dedicated interrupt stack?
78 *
79 *  If TRUE, then it must be installed during initialization.
80 *  If FALSE, then no installation is performed.
81 *
82 *  The SPARC does not have a dedicated HW interrupt stack.
83 */
84
85#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
86
87/*
88 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
89 *
90 *  If TRUE, then the memory is allocated during initialization.
91 *  If FALSE, then the memory is allocated during initialization.
92 */
93
94#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
95
96/*
97 *  Does the CPU have hardware floating point?
98 *
99 *  If TRUE, then the FLOATING_POINT task attribute is supported.
100 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
101 */
102
103#if ( SPARC_HAS_FPU == 1 )
104#define CPU_HARDWARE_FP     TRUE
105#else
106#define CPU_HARDWARE_FP     FALSE
107#endif
108
109/*
110 *  Are all tasks FLOATING_POINT tasks implicitly?
111 *
112 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
113 *  If FALSE, then the FLOATING_POINT task attribute is followed.
114 */
115
116#define CPU_ALL_TASKS_ARE_FP     FALSE
117
118/*
119 *  Should the IDLE task have a floating point context?
120 *
121 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
122 *  and it has a floating point context which is switched in and out.
123 *  If FALSE, then the IDLE task does not have a floating point context.
124 */
125
126#define CPU_IDLE_TASK_IS_FP      FALSE
127
128/*
129 *  Should the saving of the floating point registers be deferred
130 *  until a context switch is made to another different floating point
131 *  task?
132 *
133 *  If TRUE, then the floating point context will not be stored until
134 *  necessary.  It will remain in the floating point registers and not
135 *  disturned until another floating point task is switched to.
136 *
137 *  If FALSE, then the floating point context is saved when a floating
138 *  point task is switched out and restored when the next floating point
139 *  task is restored.  The state of the floating point registers between
140 *  those two operations is not specified.
141 */
142
143#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
144
145/*
146 *  Does this port provide a CPU dependent IDLE task implementation?
147 *
148 *  If TRUE, then the routine _CPU_Thread_Idle_body
149 *  must be provided and is the default IDLE thread body instead of
150 *  _CPU_Thread_Idle_body.
151 *
152 *  If FALSE, then use the generic IDLE thread body if the BSP does
153 *  not provide one.
154 */
155
156#if (SPARC_HAS_LOW_POWER_MODE == 1)
157#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
158#else
159#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
160#endif
161
162/*
163 *  Does the stack grow up (toward higher addresses) or down
164 *  (toward lower addresses)?
165 *
166 *  If TRUE, then the grows upward.
167 *  If FALSE, then the grows toward smaller addresses.
168 *
169 *  The stack grows to lower addresses on the SPARC.
170 */
171
172#define CPU_STACK_GROWS_UP               FALSE
173
174/*
175 *  The following is the variable attribute used to force alignment
176 *  of critical data structures.  On some processors it may make
177 *  sense to have these aligned on tighter boundaries than
178 *  the minimum requirements of the compiler in order to have as
179 *  much of the critical data area as possible in a cache line.
180 *
181 *  The SPARC does not appear to have particularly strict alignment
182 *  requirements.  This value was chosen to take advantages of caches.
183 */
184
185#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
186
187/*
188 *  The following defines the number of bits actually used in the
189 *  interrupt field of the task mode.  How those bits map to the
190 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
191 *
192 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
193 */
194
195#define CPU_MODES_INTERRUPT_MASK   0x0000000F
196
197/*
198 *  This structure represents the organization of the minimum stack frame
199 *  for the SPARC.  More framing information is required in certain situaions
200 *  such as when there are a large number of out parameters or when the callee
201 *  must save floating point registers.
202 */
203
204#ifndef ASM
205
206typedef struct {
207  unsigned32  l0;
208  unsigned32  l1;
209  unsigned32  l2;
210  unsigned32  l3;
211  unsigned32  l4;
212  unsigned32  l5;
213  unsigned32  l6;
214  unsigned32  l7;
215  unsigned32  i0;
216  unsigned32  i1;
217  unsigned32  i2;
218  unsigned32  i3;
219  unsigned32  i4;
220  unsigned32  i5;
221  unsigned32  i6_fp;
222  unsigned32  i7;
223  void       *structure_return_address;
224  /*
225   *  The following are for the callee to save the register arguments in
226   *  should this be necessary.
227   */
228  unsigned32  saved_arg0;
229  unsigned32  saved_arg1;
230  unsigned32  saved_arg2;
231  unsigned32  saved_arg3;
232  unsigned32  saved_arg4;
233  unsigned32  saved_arg5;
234  unsigned32  pad0;
235}  CPU_Minimum_stack_frame;
236
237#endif /* ASM */
238
239#define CPU_STACK_FRAME_L0_OFFSET             0x00
240#define CPU_STACK_FRAME_L1_OFFSET             0x04
241#define CPU_STACK_FRAME_L2_OFFSET             0x08
242#define CPU_STACK_FRAME_L3_OFFSET             0x0c
243#define CPU_STACK_FRAME_L4_OFFSET             0x10
244#define CPU_STACK_FRAME_L5_OFFSET             0x14
245#define CPU_STACK_FRAME_L6_OFFSET             0x18
246#define CPU_STACK_FRAME_L7_OFFSET             0x1c
247#define CPU_STACK_FRAME_I0_OFFSET             0x20
248#define CPU_STACK_FRAME_I1_OFFSET             0x24
249#define CPU_STACK_FRAME_I2_OFFSET             0x28
250#define CPU_STACK_FRAME_I3_OFFSET             0x2c
251#define CPU_STACK_FRAME_I4_OFFSET             0x30
252#define CPU_STACK_FRAME_I5_OFFSET             0x34
253#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
254#define CPU_STACK_FRAME_I7_OFFSET             0x3c
255#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
256#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
257#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
258#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
259#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
260#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
261#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
262#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
263
264#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
265
266/*
267 * Contexts
268 *
269 *  Generally there are 2 types of context to save.
270 *     1. Interrupt registers to save
271 *     2. Task level registers to save
272 *
273 *  This means we have the following 3 context items:
274 *     1. task level context stuff::  Context_Control
275 *     2. floating point task stuff:: Context_Control_fp
276 *     3. special interrupt level context :: Context_Control_interrupt
277 *
278 *  On the SPARC, we are relatively conservative in that we save most
279 *  of the CPU state in the context area.  The ET (enable trap) bit and
280 *  the CWP (current window pointer) fields of the PSR are considered
281 *  system wide resources and are not maintained on a per-thread basis.
282 */
283
284#ifndef ASM
285
286typedef struct {
287    /*
288     *  Using a double g0_g1 will put everything in this structure on a
289     *  double word boundary which allows us to use double word loads
290     *  and stores safely in the context switch.
291     */
292    double     g0_g1;
293    unsigned32 g2;
294    unsigned32 g3;
295    unsigned32 g4;
296    unsigned32 g5;
297    unsigned32 g6;
298    unsigned32 g7;
299
300    unsigned32 l0;
301    unsigned32 l1;
302    unsigned32 l2;
303    unsigned32 l3;
304    unsigned32 l4;
305    unsigned32 l5;
306    unsigned32 l6;
307    unsigned32 l7;
308
309    unsigned32 i0;
310    unsigned32 i1;
311    unsigned32 i2;
312    unsigned32 i3;
313    unsigned32 i4;
314    unsigned32 i5;
315    unsigned32 i6_fp;
316    unsigned32 i7;
317
318    unsigned32 o0;
319    unsigned32 o1;
320    unsigned32 o2;
321    unsigned32 o3;
322    unsigned32 o4;
323    unsigned32 o5;
324    unsigned32 o6_sp;
325    unsigned32 o7;
326
327    unsigned32 psr;
328} Context_Control;
329
330#endif /* ASM */
331
332/*
333 *  Offsets of fields with Context_Control for assembly routines.
334 */
335
336#define G0_OFFSET    0x00
337#define G1_OFFSET    0x04
338#define G2_OFFSET    0x08
339#define G3_OFFSET    0x0C
340#define G4_OFFSET    0x10
341#define G5_OFFSET    0x14
342#define G6_OFFSET    0x18
343#define G7_OFFSET    0x1C
344
345#define L0_OFFSET    0x20
346#define L1_OFFSET    0x24
347#define L2_OFFSET    0x28
348#define L3_OFFSET    0x2C
349#define L4_OFFSET    0x30
350#define L5_OFFSET    0x34
351#define L6_OFFSET    0x38
352#define L7_OFFSET    0x3C
353
354#define I0_OFFSET    0x40
355#define I1_OFFSET    0x44
356#define I2_OFFSET    0x48
357#define I3_OFFSET    0x4C
358#define I4_OFFSET    0x50
359#define I5_OFFSET    0x54
360#define I6_FP_OFFSET 0x58
361#define I7_OFFSET    0x5C
362
363#define O0_OFFSET    0x60
364#define O1_OFFSET    0x64
365#define O2_OFFSET    0x68
366#define O3_OFFSET    0x6C
367#define O4_OFFSET    0x70
368#define O5_OFFSET    0x74
369#define O6_SP_OFFSET 0x78
370#define O7_OFFSET    0x7C
371
372#define PSR_OFFSET   0x80
373
374#define CONTEXT_CONTROL_SIZE 0x84
375
376/*
377 *  The floating point context area.
378 */
379
380#ifndef ASM
381
382typedef struct {
383    double      f0_f1;
384    double      f2_f3;
385    double      f4_f5;
386    double      f6_f7;
387    double      f8_f9;
388    double      f10_f11;
389    double      f12_f13;
390    double      f14_f15;
391    double      f16_f17;
392    double      f18_f19;
393    double      f20_f21;
394    double      f22_f23;
395    double      f24_f25;
396    double      f26_f27;
397    double      f28_f29;
398    double      f30_f31;
399    unsigned32  fsr;
400} Context_Control_fp;
401
402#endif /* ASM */
403
404/*
405 *  Offsets of fields with Context_Control_fp for assembly routines.
406 */
407
408#define FO_F1_OFFSET     0x00
409#define F2_F3_OFFSET     0x08
410#define F4_F5_OFFSET     0x10
411#define F6_F7_OFFSET     0x18
412#define F8_F9_OFFSET     0x20
413#define F1O_F11_OFFSET   0x28
414#define F12_F13_OFFSET   0x30
415#define F14_F15_OFFSET   0x38
416#define F16_F17_OFFSET   0x40
417#define F18_F19_OFFSET   0x48
418#define F2O_F21_OFFSET   0x50
419#define F22_F23_OFFSET   0x58
420#define F24_F25_OFFSET   0x60
421#define F26_F27_OFFSET   0x68
422#define F28_F29_OFFSET   0x70
423#define F3O_F31_OFFSET   0x78
424#define FSR_OFFSET       0x80
425
426#define CONTEXT_CONTROL_FP_SIZE 0x84
427
428#ifndef ASM
429
430/*
431 *  Context saved on stack for an interrupt.
432 *
433 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
434 *         benefit of the user's handler.
435 */
436
437typedef struct {
438  CPU_Minimum_stack_frame  Stack_frame;
439  unsigned32               psr;
440  unsigned32               pc;
441  unsigned32               npc;
442  unsigned32               g1;
443  unsigned32               g2;
444  unsigned32               g3;
445  unsigned32               g4;
446  unsigned32               g5;
447  unsigned32               g6;
448  unsigned32               g7;
449  unsigned32               i0;
450  unsigned32               i1;
451  unsigned32               i2;
452  unsigned32               i3;
453  unsigned32               i4;
454  unsigned32               i5;
455  unsigned32               i6_fp;
456  unsigned32               i7;
457  unsigned32               y;
458  unsigned32               pad0_offset;
459} CPU_Interrupt_frame;
460
461#endif /* ASM */
462
463/*
464 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
465 */
466
467#define ISF_STACK_FRAME_OFFSET 0x00
468#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
469#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
470#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
471#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
472#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
473#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
474#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
475#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
476#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
477#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
478#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
479#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
480#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
481#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
482#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
483#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
484#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
485#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
486#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
487#define ISF_PAD0_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
488
489#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
490#ifndef ASM
491
492/*
493 *  The following table contains the information required to configure
494 *  the processor specific parameters.
495 */
496
497typedef struct {
498  void       (*pretasking_hook)( void );
499  void       (*predriver_hook)( void );
500  void       (*postdriver_hook)( void );
501  void       (*idle_task)( void );
502  boolean      do_zero_of_workspace;
503  unsigned32   interrupt_stack_size;
504  unsigned32   extra_mpci_receive_server_stack;
505  void *     (*stack_allocate_hook)( unsigned32 );
506  void       (*stack_free_hook)( void* );
507  /* end of fields required on all CPUs */
508
509}   rtems_cpu_table;
510
511/*
512 *  This variable is contains the initialize context for the FP unit.
513 *  It is filled in by _CPU_Initialize and copied into the task's FP
514 *  context area during _CPU_Context_Initialize.
515 */
516
517SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
518
519/*
520 *  This stack is allocated by the Interrupt Manager and the switch
521 *  is performed in _ISR_Handler.  These variables contain pointers
522 *  to the lowest and highest addresses in the chunk of memory allocated
523 *  for the interrupt stack.  Since it is unknown whether the stack
524 *  grows up or down (in general), this give the CPU dependent
525 *  code the option of picking the version it wants to use.  Thus
526 *  both must be present if either is.
527 *
528 *  The SPARC supports a software based interrupt stack and these
529 *  are required.
530 */
531
532SCORE_EXTERN void *_CPU_Interrupt_stack_low;
533SCORE_EXTERN void *_CPU_Interrupt_stack_high;
534
535#if defined(erc32)
536
537/*
538 *  ERC32 Specific Variables
539 */
540
541SCORE_EXTERN unsigned32 _ERC32_MEC_Timer_Control_Mirror;
542
543#endif
544
545/*
546 *  The following type defines an entry in the SPARC's trap table.
547 *
548 *  NOTE: The instructions chosen are RTEMS dependent although one is
549 *        obligated to use two of the four instructions to perform a
550 *        long jump.  The other instructions load one register with the
551 *        trap type (a.k.a. vector) and another with the psr.
552 */
553 
554typedef struct {
555  unsigned32   mov_psr_l0;                     /* mov   %psr, %l0           */
556  unsigned32   sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
557  unsigned32   jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
558  unsigned32   mov_vector_l3;                  /* mov   _vector, %l3        */
559} CPU_Trap_table_entry;
560 
561/*
562 *  This is the set of opcodes for the instructions loaded into a trap
563 *  table entry.  The routine which installs a handler is responsible
564 *  for filling in the fields for the _handler address and the _vector
565 *  trap type.
566 *
567 *  The constants following this structure are masks for the fields which
568 *  must be filled in when the handler is installed.
569 */
570 
571extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
572
573/*
574 *  This is the executive's trap table which is installed into the TBR
575 *  register.
576 *
577 *  NOTE:  Unfortunately, this must be aligned on a 4096 byte boundary.
578 *         The GNU tools as of binutils 2.5.2 and gcc 2.7.0 would not
579 *         align an entity to anything greater than a 512 byte boundary.
580 *
581 *         Because of this, we pull a little bit of a trick.  We allocate
582 *         enough memory so we can grab an address on a 4096 byte boundary
583 *         from this area.
584 */
585 
586#define SPARC_TRAP_TABLE_ALIGNMENT 4096
587 
588#ifndef NO_TABLE_MOVE
589
590SCORE_EXTERN unsigned8 _CPU_Trap_Table_area[ 8192 ]
591           __attribute__ ((aligned (SPARC_TRAP_TABLE_ALIGNMENT)));
592#endif
593 
594
595/*
596 *  The size of the floating point context area. 
597 */
598
599#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
600
601#endif
602
603/*
604 *  Amount of extra stack (above minimum stack size) required by
605 *  MPCI receive server thread.  Remember that in a multiprocessor
606 *  system this thread must exist and be able to process all directives.
607 */
608
609#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
610
611/*
612 *  This defines the number of entries in the ISR_Vector_table managed
613 *  by the executive.
614 *
615 *  On the SPARC, there are really only 256 vectors.  However, the executive
616 *  has no easy, fast, reliable way to determine which traps are synchronous
617 *  and which are asynchronous.  By default, synchronous traps return to the
618 *  instruction which caused the interrupt.  So if you install a software
619 *  trap handler as an executive interrupt handler (which is desirable since
620 *  RTEMS takes care of window and register issues), then the executive needs
621 *  to know that the return address is to the trap rather than the instruction
622 *  following the trap.
623 *
624 *  So vectors 0 through 255 are treated as regular asynchronous traps which
625 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
626 *  by the executive to be synchronous and to require that the return address
627 *  be fudged.
628 *
629 *  If you use this mechanism to install a trap handler which must reexecute
630 *  the instruction which caused the trap, then it should be installed as
631 *  an asynchronous trap.  This will avoid the executive changing the return
632 *  address.
633 */
634
635#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
636#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
637
638#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
639#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
640#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
641
642#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
643
644/*
645 *  Should be large enough to run all tests.  This insures
646 *  that a "reasonable" small application should not have any problems.
647 *
648 *  This appears to be a fairly generous number for the SPARC since
649 *  represents a call depth of about 20 routines based on the minimum
650 *  stack frame.
651 */
652
653#define CPU_STACK_MINIMUM_SIZE  (1024*2 + 512)
654
655/*
656 *  CPU's worst alignment requirement for data types on a byte boundary.  This
657 *  alignment does not take into account the requirements for the stack.
658 *
659 *  On the SPARC, this is required for double word loads and stores.
660 */
661
662#define CPU_ALIGNMENT      8
663
664/*
665 *  This number corresponds to the byte alignment requirement for the
666 *  heap handler.  This alignment requirement may be stricter than that
667 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
668 *  common for the heap to follow the same alignment requirement as
669 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
670 *  then this should be set to CPU_ALIGNMENT.
671 *
672 *  NOTE:  This does not have to be a power of 2.  It does have to
673 *         be greater or equal to than CPU_ALIGNMENT.
674 */
675
676#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
677
678/*
679 *  This number corresponds to the byte alignment requirement for memory
680 *  buffers allocated by the partition manager.  This alignment requirement
681 *  may be stricter than that for the data types alignment specified by
682 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
683 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
684 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
685 *
686 *  NOTE:  This does not have to be a power of 2.  It does have to
687 *         be greater or equal to than CPU_ALIGNMENT.
688 */
689
690#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
691
692/*
693 *  This number corresponds to the byte alignment requirement for the
694 *  stack.  This alignment requirement may be stricter than that for the
695 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
696 *  is strict enough for the stack, then this should be set to 0.
697 *
698 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
699 *
700 *  The alignment restrictions for the SPARC are not that strict but this
701 *  should unsure that the stack is always sufficiently alignment that the
702 *  window overflow, underflow, and flush routines can use double word loads
703 *  and stores.
704 */
705
706#define CPU_STACK_ALIGNMENT        16
707
708#ifndef ASM
709
710/* ISR handler macros */
711
712/*
713 *  Disable all interrupts for a critical section.  The previous
714 *  level is returned in _level.
715 */
716
717#define _CPU_ISR_Disable( _level ) \
718  sparc_disable_interrupts( _level )
719 
720/*
721 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
722 *  This indicates the end of a critical section.  The parameter
723 *  _level is not modified.
724 */
725
726#define _CPU_ISR_Enable( _level ) \
727  sparc_enable_interrupts( _level )
728 
729/*
730 *  This temporarily restores the interrupt to _level before immediately
731 *  disabling them again.  This is used to divide long critical
732 *  sections into two or more parts.  The parameter _level is not
733 *  modified.
734 */
735
736#define _CPU_ISR_Flash( _level ) \
737  sparc_flash_interrupts( _level )
738 
739/*
740 *  Map interrupt level in task mode onto the hardware that the CPU
741 *  actually provides.  Currently, interrupt levels which do not
742 *  map onto the CPU in a straight fashion are undefined. 
743 */
744
745#define _CPU_ISR_Set_level( _newlevel ) \
746   sparc_set_interrupt_level( _newlevel )
747 
748unsigned32 _CPU_ISR_Get_level( void );
749 
750/* end of ISR handler macros */
751
752/* Context handler macros */
753
754/*
755 *  Initialize the context to a state suitable for starting a
756 *  task after a context restore operation.  Generally, this
757 *  involves:
758 *
759 *     - setting a starting address
760 *     - preparing the stack
761 *     - preparing the stack and frame pointers
762 *     - setting the proper interrupt level in the context
763 *     - initializing the floating point context
764 *
765 *  NOTE:  Implemented as a subroutine for the SPARC port.
766 */
767
768void _CPU_Context_Initialize(
769  Context_Control  *the_context,
770  unsigned32       *stack_base,
771  unsigned32        size,
772  unsigned32        new_level,
773  void             *entry_point,
774  boolean           is_fp
775);
776
777/*
778 *  This routine is responsible for somehow restarting the currently
779 *  executing task. 
780 *
781 *  On the SPARC, this is is relatively painless but requires a small
782 *  amount of wrapper code before using the regular restore code in
783 *  of the context switch.
784 */
785
786#define _CPU_Context_Restart_self( _the_context ) \
787   _CPU_Context_restore( (_the_context) );
788
789/*
790 *  The FP context area for the SPARC is a simple structure and nothing
791 *  special is required to find the "starting load point"
792 */
793
794#define _CPU_Context_Fp_start( _base, _offset ) \
795   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
796
797/*
798 *  This routine initializes the FP context area passed to it to.
799 *
800 *  The SPARC allows us to use the simple initialization model
801 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
802 *  at CPU initialization and it is simply copied into the destination
803 *  context.
804 */
805
806#define _CPU_Context_Initialize_fp( _destination ) \
807  do { \
808   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
809  } while (0)
810
811/* end of Context handler macros */
812
813/* Fatal Error manager macros */
814
815/*
816 *  This routine copies _error into a known place -- typically a stack
817 *  location or a register, optionally disables interrupts, and
818 *  halts/stops the CPU.
819 */
820
821#define _CPU_Fatal_halt( _error ) \
822  do { \
823    unsigned32 level; \
824    \
825    sparc_disable_interrupts( level ); \
826    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
827    while (1); /* loop forever */ \
828  } while (0)
829
830/* end of Fatal Error manager macros */
831
832/* Bitfield handler macros */
833
834/*
835 *  The SPARC port uses the generic C algorithm for bitfield scan if the
836 *  CPU model does not have a scan instruction.
837 */
838
839#if ( SPARC_HAS_BITSCAN == 0 )
840#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
841#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
842#else
843#error "scan instruction not currently supported by RTEMS!!"
844#endif
845
846/* end of Bitfield handler macros */
847
848/* Priority handler handler macros */
849
850/*
851 *  The SPARC port uses the generic C algorithm for bitfield scan if the
852 *  CPU model does not have a scan instruction.
853 */
854
855#if ( SPARC_HAS_BITSCAN == 1 )
856#error "scan instruction not currently supported by RTEMS!!"
857#endif
858
859/* end of Priority handler macros */
860
861/* functions */
862
863/*
864 *  _CPU_Initialize
865 *
866 *  This routine performs CPU dependent initialization.
867 */
868
869void _CPU_Initialize(
870  rtems_cpu_table  *cpu_table,
871  void            (*thread_dispatch)
872);
873
874/*
875 *  _CPU_ISR_install_raw_handler
876 *
877 *  This routine installs new_handler to be directly called from the trap
878 *  table.
879 */
880 
881void _CPU_ISR_install_raw_handler(
882  unsigned32  vector,
883  proc_ptr    new_handler,
884  proc_ptr   *old_handler
885);
886
887/*
888 *  _CPU_ISR_install_vector
889 *
890 *  This routine installs an interrupt vector.
891 */
892
893void _CPU_ISR_install_vector(
894  unsigned32  vector,
895  proc_ptr    new_handler,
896  proc_ptr   *old_handler
897);
898
899#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
900 
901/*
902 *  _CPU_Thread_Idle_body
903 *
904 *  Some SPARC implementations have low power, sleep, or idle modes.  This
905 *  tries to take advantage of those models.
906 */
907 
908void _CPU_Thread_Idle_body( void );
909 
910#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
911
912/*
913 *  _CPU_Context_switch
914 *
915 *  This routine switches from the run context to the heir context.
916 */
917
918void _CPU_Context_switch(
919  Context_Control  *run,
920  Context_Control  *heir
921);
922
923/*
924 *  _CPU_Context_restore
925 *
926 *  This routine is generallu used only to restart self in an
927 *  efficient manner.
928 */
929
930void _CPU_Context_restore(
931  Context_Control *new_context
932);
933
934/*
935 *  _CPU_Context_save_fp
936 *
937 *  This routine saves the floating point context passed to it.
938 */
939
940void _CPU_Context_save_fp(
941  void **fp_context_ptr
942);
943
944/*
945 *  _CPU_Context_restore_fp
946 *
947 *  This routine restores the floating point context passed to it.
948 */
949
950void _CPU_Context_restore_fp(
951  void **fp_context_ptr
952);
953
954/*
955 *  CPU_swap_u32
956 *
957 *  The following routine swaps the endian format of an unsigned int.
958 *  It must be static because it is referenced indirectly.
959 *
960 *  This version will work on any processor, but if you come across a better
961 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
962 *  entity as shown below is not any more efficient on the SPARC.
963 *
964 *     swap least significant two bytes with 16-bit rotate
965 *     swap upper and lower 16-bits
966 *     swap most significant two bytes with 16-bit rotate
967 *
968 *  It is not obvious how the SPARC can do significantly better than the
969 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
970 *  following code at optimization level four (i.e. -O4).
971 */
972 
973static inline unsigned int CPU_swap_u32(
974  unsigned int value
975)
976{
977  unsigned32 byte1, byte2, byte3, byte4, swapped;
978 
979  byte4 = (value >> 24) & 0xff;
980  byte3 = (value >> 16) & 0xff;
981  byte2 = (value >> 8)  & 0xff;
982  byte1 =  value        & 0xff;
983 
984  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
985  return( swapped );
986}
987
988#endif ASM
989
990#ifdef __cplusplus
991}
992#endif
993
994#endif
Note: See TracBrowser for help on using the repository browser.