source: rtems/cpukit/score/cpu/sparc/rtems/score/cpu.h @ 93c2b41

4.104.114.84.95
Last change on this file since 93c2b41 was 8b56aa3, checked in by Ralf Corsepius <ralf.corsepius@…>, on 05/09/07 at 15:28:52

2007-05-09 Ralf Corsépius <ralf.corsepius@…>

  • rtems/score/cpu.h: Remove CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES.
  • Property mode set to 100644
File size: 28.2 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains information pertaining to the port of
7 *  the executive to the SPARC processor.
8 *
9 *  COPYRIGHT (c) 1989-2006.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 *
16 *  $Id$
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
26#include <rtems/score/sparc.h>               /* pick up machine definitions */
27#ifndef ASM
28#include <rtems/score/types.h>
29#endif
30
31/* conditional compilation parameters */
32
33/*
34 *  Should the calls to _Thread_Enable_dispatch be inlined?
35 *
36 *  If TRUE, then they are inlined.
37 *  If FALSE, then a subroutine call is made.
38 */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41
42/*
43 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
44 *  be unrolled one time?  In unrolled each iteration of the loop examines
45 *  two "nodes" on the chain being searched.  Otherwise, only one node
46 *  is examined per iteration.
47 *
48 *  If TRUE, then the loops are unrolled.
49 *  If FALSE, then the loops are not unrolled.
50 *
51 *  This parameter could go either way on the SPARC.  The interrupt flash
52 *  code is relatively lengthy given the requirements for nops following
53 *  writes to the psr.  But if the clock speed were high enough, this would
54 *  not represent a great deal of time.
55 */
56
57#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
58
59/*
60 *  Does the executive manage a dedicated interrupt stack in software?
61 *
62 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
63 *  If FALSE, nothing is done.
64 *
65 *  The SPARC does not have a dedicated HW interrupt stack and one has
66 *  been implemented in SW.
67 */
68
69#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
70
71/*
72 *  Does this CPU have hardware support for a dedicated interrupt stack?
73 *
74 *  If TRUE, then it must be installed during initialization.
75 *  If FALSE, then no installation is performed.
76 *
77 *  The SPARC does not have a dedicated HW interrupt stack.
78 */
79
80#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
81
82/*
83 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
84 *
85 *  If TRUE, then the memory is allocated during initialization.
86 *  If FALSE, then the memory is allocated during initialization.
87 */
88
89#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
90
91/*
92 *  Does the RTEMS invoke the user's ISR with the vector number and
93 *  a pointer to the saved interrupt frame (1) or just the vector
94 *  number (0)?
95 */
96
97#define CPU_ISR_PASSES_FRAME_POINTER 0
98
99/*
100 *  Does the CPU have hardware floating point?
101 *
102 *  If TRUE, then the FLOATING_POINT task attribute is supported.
103 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
104 */
105
106#if ( SPARC_HAS_FPU == 1 )
107#define CPU_HARDWARE_FP     TRUE
108#else
109#define CPU_HARDWARE_FP     FALSE
110#endif
111#define CPU_SOFTWARE_FP     FALSE
112
113/*
114 *  Are all tasks FLOATING_POINT tasks implicitly?
115 *
116 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
117 *  If FALSE, then the FLOATING_POINT task attribute is followed.
118 */
119
120#define CPU_ALL_TASKS_ARE_FP     FALSE
121
122/*
123 *  Should the IDLE task have a floating point context?
124 *
125 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
126 *  and it has a floating point context which is switched in and out.
127 *  If FALSE, then the IDLE task does not have a floating point context.
128 */
129
130#define CPU_IDLE_TASK_IS_FP      FALSE
131
132/*
133 *  Should the saving of the floating point registers be deferred
134 *  until a context switch is made to another different floating point
135 *  task?
136 *
137 *  If TRUE, then the floating point context will not be stored until
138 *  necessary.  It will remain in the floating point registers and not
139 *  disturned until another floating point task is switched to.
140 *
141 *  If FALSE, then the floating point context is saved when a floating
142 *  point task is switched out and restored when the next floating point
143 *  task is restored.  The state of the floating point registers between
144 *  those two operations is not specified.
145 */
146
147#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
148
149/*
150 *  Does this port provide a CPU dependent IDLE task implementation?
151 *
152 *  If TRUE, then the routine _CPU_Thread_Idle_body
153 *  must be provided and is the default IDLE thread body instead of
154 *  _CPU_Thread_Idle_body.
155 *
156 *  If FALSE, then use the generic IDLE thread body if the BSP does
157 *  not provide one.
158 */
159
160#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
161
162/*
163 *  Does the stack grow up (toward higher addresses) or down
164 *  (toward lower addresses)?
165 *
166 *  If TRUE, then the grows upward.
167 *  If FALSE, then the grows toward smaller addresses.
168 *
169 *  The stack grows to lower addresses on the SPARC.
170 */
171
172#define CPU_STACK_GROWS_UP               FALSE
173
174/*
175 *  The following is the variable attribute used to force alignment
176 *  of critical data structures.  On some processors it may make
177 *  sense to have these aligned on tighter boundaries than
178 *  the minimum requirements of the compiler in order to have as
179 *  much of the critical data area as possible in a cache line.
180 *
181 *  The SPARC does not appear to have particularly strict alignment
182 *  requirements.  This value was chosen to take advantages of caches.
183 */
184
185#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
186
187/*
188 *  Define what is required to specify how the network to host conversion
189 *  routines are handled.
190 */
191
192#define CPU_BIG_ENDIAN                           TRUE
193#define CPU_LITTLE_ENDIAN                        FALSE
194
195/*
196 *  The following defines the number of bits actually used in the
197 *  interrupt field of the task mode.  How those bits map to the
198 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
199 *
200 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
201 */
202
203#define CPU_MODES_INTERRUPT_MASK   0x0000000F
204
205/*
206 *  This structure represents the organization of the minimum stack frame
207 *  for the SPARC.  More framing information is required in certain situaions
208 *  such as when there are a large number of out parameters or when the callee
209 *  must save floating point registers.
210 */
211
212#ifndef ASM
213
214typedef struct {
215  uint32_t    l0;
216  uint32_t    l1;
217  uint32_t    l2;
218  uint32_t    l3;
219  uint32_t    l4;
220  uint32_t    l5;
221  uint32_t    l6;
222  uint32_t    l7;
223  uint32_t    i0;
224  uint32_t    i1;
225  uint32_t    i2;
226  uint32_t    i3;
227  uint32_t    i4;
228  uint32_t    i5;
229  uint32_t    i6_fp;
230  uint32_t    i7;
231  void       *structure_return_address;
232  /*
233   *  The following are for the callee to save the register arguments in
234   *  should this be necessary.
235   */
236  uint32_t    saved_arg0;
237  uint32_t    saved_arg1;
238  uint32_t    saved_arg2;
239  uint32_t    saved_arg3;
240  uint32_t    saved_arg4;
241  uint32_t    saved_arg5;
242  uint32_t    pad0;
243}  CPU_Minimum_stack_frame;
244
245#endif /* ASM */
246
247#define CPU_STACK_FRAME_L0_OFFSET             0x00
248#define CPU_STACK_FRAME_L1_OFFSET             0x04
249#define CPU_STACK_FRAME_L2_OFFSET             0x08
250#define CPU_STACK_FRAME_L3_OFFSET             0x0c
251#define CPU_STACK_FRAME_L4_OFFSET             0x10
252#define CPU_STACK_FRAME_L5_OFFSET             0x14
253#define CPU_STACK_FRAME_L6_OFFSET             0x18
254#define CPU_STACK_FRAME_L7_OFFSET             0x1c
255#define CPU_STACK_FRAME_I0_OFFSET             0x20
256#define CPU_STACK_FRAME_I1_OFFSET             0x24
257#define CPU_STACK_FRAME_I2_OFFSET             0x28
258#define CPU_STACK_FRAME_I3_OFFSET             0x2c
259#define CPU_STACK_FRAME_I4_OFFSET             0x30
260#define CPU_STACK_FRAME_I5_OFFSET             0x34
261#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
262#define CPU_STACK_FRAME_I7_OFFSET             0x3c
263#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
264#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
265#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
266#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
267#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
268#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
269#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
270#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
271
272#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
273
274/*
275 * Contexts
276 *
277 *  Generally there are 2 types of context to save.
278 *     1. Interrupt registers to save
279 *     2. Task level registers to save
280 *
281 *  This means we have the following 3 context items:
282 *     1. task level context stuff::  Context_Control
283 *     2. floating point task stuff:: Context_Control_fp
284 *     3. special interrupt level context :: Context_Control_interrupt
285 *
286 *  On the SPARC, we are relatively conservative in that we save most
287 *  of the CPU state in the context area.  The ET (enable trap) bit and
288 *  the CWP (current window pointer) fields of the PSR are considered
289 *  system wide resources and are not maintained on a per-thread basis.
290 */
291
292#ifndef ASM
293
294typedef struct {
295    /*
296     *  Using a double g0_g1 will put everything in this structure on a
297     *  double word boundary which allows us to use double word loads
298     *  and stores safely in the context switch.
299     */
300    double     g0_g1;
301    uint32_t   g2;
302    uint32_t   g3;
303    uint32_t   g4;
304    uint32_t   g5;
305    uint32_t   g6;
306    uint32_t   g7;
307
308    uint32_t   l0;
309    uint32_t   l1;
310    uint32_t   l2;
311    uint32_t   l3;
312    uint32_t   l4;
313    uint32_t   l5;
314    uint32_t   l6;
315    uint32_t   l7;
316
317    uint32_t   i0;
318    uint32_t   i1;
319    uint32_t   i2;
320    uint32_t   i3;
321    uint32_t   i4;
322    uint32_t   i5;
323    uint32_t   i6_fp;
324    uint32_t   i7;
325
326    uint32_t   o0;
327    uint32_t   o1;
328    uint32_t   o2;
329    uint32_t   o3;
330    uint32_t   o4;
331    uint32_t   o5;
332    uint32_t   o6_sp;
333    uint32_t   o7;
334
335    uint32_t   psr;
336} Context_Control;
337
338#endif /* ASM */
339
340/*
341 *  Offsets of fields with Context_Control for assembly routines.
342 */
343
344#define G0_OFFSET    0x00
345#define G1_OFFSET    0x04
346#define G2_OFFSET    0x08
347#define G3_OFFSET    0x0C
348#define G4_OFFSET    0x10
349#define G5_OFFSET    0x14
350#define G6_OFFSET    0x18
351#define G7_OFFSET    0x1C
352
353#define L0_OFFSET    0x20
354#define L1_OFFSET    0x24
355#define L2_OFFSET    0x28
356#define L3_OFFSET    0x2C
357#define L4_OFFSET    0x30
358#define L5_OFFSET    0x34
359#define L6_OFFSET    0x38
360#define L7_OFFSET    0x3C
361
362#define I0_OFFSET    0x40
363#define I1_OFFSET    0x44
364#define I2_OFFSET    0x48
365#define I3_OFFSET    0x4C
366#define I4_OFFSET    0x50
367#define I5_OFFSET    0x54
368#define I6_FP_OFFSET 0x58
369#define I7_OFFSET    0x5C
370
371#define O0_OFFSET    0x60
372#define O1_OFFSET    0x64
373#define O2_OFFSET    0x68
374#define O3_OFFSET    0x6C
375#define O4_OFFSET    0x70
376#define O5_OFFSET    0x74
377#define O6_SP_OFFSET 0x78
378#define O7_OFFSET    0x7C
379
380#define PSR_OFFSET   0x80
381
382#define CONTEXT_CONTROL_SIZE 0x84
383
384/*
385 *  The floating point context area.
386 */
387
388#ifndef ASM
389
390typedef struct {
391    double      f0_f1;
392    double      f2_f3;
393    double      f4_f5;
394    double      f6_f7;
395    double      f8_f9;
396    double      f10_f11;
397    double      f12_f13;
398    double      f14_f15;
399    double      f16_f17;
400    double      f18_f19;
401    double      f20_f21;
402    double      f22_f23;
403    double      f24_f25;
404    double      f26_f27;
405    double      f28_f29;
406    double      f30_f31;
407    uint32_t    fsr;
408} Context_Control_fp;
409
410#endif /* ASM */
411
412/*
413 *  Offsets of fields with Context_Control_fp for assembly routines.
414 */
415
416#define FO_F1_OFFSET     0x00
417#define F2_F3_OFFSET     0x08
418#define F4_F5_OFFSET     0x10
419#define F6_F7_OFFSET     0x18
420#define F8_F9_OFFSET     0x20
421#define F1O_F11_OFFSET   0x28
422#define F12_F13_OFFSET   0x30
423#define F14_F15_OFFSET   0x38
424#define F16_F17_OFFSET   0x40
425#define F18_F19_OFFSET   0x48
426#define F2O_F21_OFFSET   0x50
427#define F22_F23_OFFSET   0x58
428#define F24_F25_OFFSET   0x60
429#define F26_F27_OFFSET   0x68
430#define F28_F29_OFFSET   0x70
431#define F3O_F31_OFFSET   0x78
432#define FSR_OFFSET       0x80
433
434#define CONTEXT_CONTROL_FP_SIZE 0x84
435
436#ifndef ASM
437
438/*
439 *  Context saved on stack for an interrupt.
440 *
441 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
442 *         benefit of the user's handler.
443 */
444
445typedef struct {
446  CPU_Minimum_stack_frame  Stack_frame;
447  uint32_t                 psr;
448  uint32_t                 pc;
449  uint32_t                 npc;
450  uint32_t                 g1;
451  uint32_t                 g2;
452  uint32_t                 g3;
453  uint32_t                 g4;
454  uint32_t                 g5;
455  uint32_t                 g6;
456  uint32_t                 g7;
457  uint32_t                 i0;
458  uint32_t                 i1;
459  uint32_t                 i2;
460  uint32_t                 i3;
461  uint32_t                 i4;
462  uint32_t                 i5;
463  uint32_t                 i6_fp;
464  uint32_t                 i7;
465  uint32_t                 y;
466  uint32_t                 tpc;
467} CPU_Interrupt_frame;
468
469#endif /* ASM */
470
471/*
472 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
473 */
474
475#define ISF_STACK_FRAME_OFFSET 0x00
476#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
477#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
478#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
479#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
480#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
481#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
482#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
483#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
484#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
485#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
486#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
487#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
488#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
489#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
490#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
491#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
492#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
493#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
494#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
495#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
496
497#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
498#ifndef ASM
499
500/*
501 *  The following table contains the information required to configure
502 *  the processor specific parameters.
503 */
504
505typedef struct {
506  void       (*pretasking_hook)( void );
507  void       (*predriver_hook)( void );
508  void       (*postdriver_hook)( void );
509  void       (*idle_task)( void );
510  boolean      do_zero_of_workspace;
511  uint32_t     idle_task_stack_size;
512  uint32_t     interrupt_stack_size;
513  uint32_t     extra_mpci_receive_server_stack;
514  void *     (*stack_allocate_hook)( uint32_t   );
515  void       (*stack_free_hook)( void* );
516  /* end of fields required on all CPUs */
517
518}   rtems_cpu_table;
519
520/*
521 *  Macros to access required entires in the CPU Table are in
522 *  the file rtems/system.h.
523 */
524
525/*
526 *  Macros to access SPARC specific additions to the CPU Table
527 */
528
529/* There are no CPU specific additions to the CPU Table for this port. */
530
531/*
532 *  This variable is contains the initialize context for the FP unit.
533 *  It is filled in by _CPU_Initialize and copied into the task's FP
534 *  context area during _CPU_Context_Initialize.
535 */
536
537SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
538
539/*
540 *  This stack is allocated by the Interrupt Manager and the switch
541 *  is performed in _ISR_Handler.  These variables contain pointers
542 *  to the lowest and highest addresses in the chunk of memory allocated
543 *  for the interrupt stack.  Since it is unknown whether the stack
544 *  grows up or down (in general), this give the CPU dependent
545 *  code the option of picking the version it wants to use.  Thus
546 *  both must be present if either is.
547 *
548 *  The SPARC supports a software based interrupt stack and these
549 *  are required.
550 */
551
552SCORE_EXTERN void *_CPU_Interrupt_stack_low;
553SCORE_EXTERN void *_CPU_Interrupt_stack_high;
554
555/*
556 *  The following type defines an entry in the SPARC's trap table.
557 *
558 *  NOTE: The instructions chosen are RTEMS dependent although one is
559 *        obligated to use two of the four instructions to perform a
560 *        long jump.  The other instructions load one register with the
561 *        trap type (a.k.a. vector) and another with the psr.
562 */
563 
564typedef struct {
565  uint32_t     mov_psr_l0;                     /* mov   %psr, %l0           */
566  uint32_t     sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
567  uint32_t     jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
568  uint32_t     mov_vector_l3;                  /* mov   _vector, %l3        */
569} CPU_Trap_table_entry;
570 
571/*
572 *  This is the set of opcodes for the instructions loaded into a trap
573 *  table entry.  The routine which installs a handler is responsible
574 *  for filling in the fields for the _handler address and the _vector
575 *  trap type.
576 *
577 *  The constants following this structure are masks for the fields which
578 *  must be filled in when the handler is installed.
579 */
580 
581extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
582
583/*
584 *  The size of the floating point context area. 
585 */
586
587#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
588
589#endif
590
591/*
592 *  Amount of extra stack (above minimum stack size) required by
593 *  MPCI receive server thread.  Remember that in a multiprocessor
594 *  system this thread must exist and be able to process all directives.
595 */
596
597#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
598
599/*
600 *  This defines the number of entries in the ISR_Vector_table managed
601 *  by the executive.
602 *
603 *  On the SPARC, there are really only 256 vectors.  However, the executive
604 *  has no easy, fast, reliable way to determine which traps are synchronous
605 *  and which are asynchronous.  By default, synchronous traps return to the
606 *  instruction which caused the interrupt.  So if you install a software
607 *  trap handler as an executive interrupt handler (which is desirable since
608 *  RTEMS takes care of window and register issues), then the executive needs
609 *  to know that the return address is to the trap rather than the instruction
610 *  following the trap.
611 *
612 *  So vectors 0 through 255 are treated as regular asynchronous traps which
613 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
614 *  by the executive to be synchronous and to require that the return address
615 *  be fudged.
616 *
617 *  If you use this mechanism to install a trap handler which must reexecute
618 *  the instruction which caused the trap, then it should be installed as
619 *  an asynchronous trap.  This will avoid the executive changing the return
620 *  address.
621 */
622
623#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
624#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
625
626#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
627#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
628#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
629
630#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
631
632/*
633 *  This is defined if the port has a special way to report the ISR nesting
634 *  level.  Most ports maintain the variable _ISR_Nest_level.
635 */
636
637#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
638
639/*
640 *  Should be large enough to run all tests.  This ensures
641 *  that a "reasonable" small application should not have any problems.
642 *
643 *  This appears to be a fairly generous number for the SPARC since
644 *  represents a call depth of about 20 routines based on the minimum
645 *  stack frame.
646 */
647
648#define CPU_STACK_MINIMUM_SIZE  (1024*4)
649
650/*
651 *  CPU's worst alignment requirement for data types on a byte boundary.  This
652 *  alignment does not take into account the requirements for the stack.
653 *
654 *  On the SPARC, this is required for double word loads and stores.
655 */
656
657#define CPU_ALIGNMENT      8
658
659/*
660 *  This number corresponds to the byte alignment requirement for the
661 *  heap handler.  This alignment requirement may be stricter than that
662 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
663 *  common for the heap to follow the same alignment requirement as
664 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
665 *  then this should be set to CPU_ALIGNMENT.
666 *
667 *  NOTE:  This does not have to be a power of 2.  It does have to
668 *         be greater or equal to than CPU_ALIGNMENT.
669 */
670
671#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
672
673/*
674 *  This number corresponds to the byte alignment requirement for memory
675 *  buffers allocated by the partition manager.  This alignment requirement
676 *  may be stricter than that for the data types alignment specified by
677 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
678 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
679 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
680 *
681 *  NOTE:  This does not have to be a power of 2.  It does have to
682 *         be greater or equal to than CPU_ALIGNMENT.
683 */
684
685#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
686
687/*
688 *  This number corresponds to the byte alignment requirement for the
689 *  stack.  This alignment requirement may be stricter than that for the
690 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
691 *  is strict enough for the stack, then this should be set to 0.
692 *
693 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
694 *
695 *  The alignment restrictions for the SPARC are not that strict but this
696 *  should unsure that the stack is always sufficiently alignment that the
697 *  window overflow, underflow, and flush routines can use double word loads
698 *  and stores.
699 */
700
701#define CPU_STACK_ALIGNMENT        16
702
703#ifndef ASM
704
705extern unsigned int sparc_disable_interrupts();
706extern void sparc_enable_interrupts();
707
708/*
709 *  ISR handler macros
710 */
711
712/*
713 *  Support routine to initialize the RTEMS vector table after it is allocated.
714 */
715
716#define _CPU_Initialize_vectors()
717
718/*
719 *  Disable all interrupts for a critical section.  The previous
720 *  level is returned in _level.
721 */
722
723#define _CPU_ISR_Disable( _level ) \
724  (_level) = sparc_disable_interrupts()
725 
726/*
727 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
728 *  This indicates the end of a critical section.  The parameter
729 *  _level is not modified.
730 */
731
732#define _CPU_ISR_Enable( _level ) \
733  sparc_enable_interrupts( _level )
734/*
735 *  This temporarily restores the interrupt to _level before immediately
736 *  disabling them again.  This is used to divide long critical
737 *  sections into two or more parts.  The parameter _level is not
738 *  modified.
739 */
740
741#define _CPU_ISR_Flash( _level ) \
742  sparc_flash_interrupts( _level )
743 
744/*
745 *  Map interrupt level in task mode onto the hardware that the CPU
746 *  actually provides.  Currently, interrupt levels which do not
747 *  map onto the CPU in a straight fashion are undefined. 
748 */
749
750#define _CPU_ISR_Set_level( _newlevel ) \
751   sparc_enable_interrupts( _newlevel << 8)
752 
753uint32_t   _CPU_ISR_Get_level( void );
754 
755/* end of ISR handler macros */
756
757/* Context handler macros */
758
759/*
760 *  Initialize the context to a state suitable for starting a
761 *  task after a context restore operation.  Generally, this
762 *  involves:
763 *
764 *     - setting a starting address
765 *     - preparing the stack
766 *     - preparing the stack and frame pointers
767 *     - setting the proper interrupt level in the context
768 *     - initializing the floating point context
769 *
770 *  NOTE:  Implemented as a subroutine for the SPARC port.
771 */
772
773void _CPU_Context_Initialize(
774  Context_Control  *the_context,
775  uint32_t         *stack_base,
776  uint32_t          size,
777  uint32_t          new_level,
778  void             *entry_point,
779  boolean           is_fp
780);
781
782/*
783 *  This routine is responsible for somehow restarting the currently
784 *  executing task. 
785 *
786 *  On the SPARC, this is is relatively painless but requires a small
787 *  amount of wrapper code before using the regular restore code in
788 *  of the context switch.
789 */
790
791#define _CPU_Context_Restart_self( _the_context ) \
792   _CPU_Context_restore( (_the_context) );
793
794/*
795 *  The FP context area for the SPARC is a simple structure and nothing
796 *  special is required to find the "starting load point"
797 */
798
799#define _CPU_Context_Fp_start( _base, _offset ) \
800   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
801
802/*
803 *  This routine initializes the FP context area passed to it to.
804 *
805 *  The SPARC allows us to use the simple initialization model
806 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
807 *  at CPU initialization and it is simply copied into the destination
808 *  context.
809 */
810
811#define _CPU_Context_Initialize_fp( _destination ) \
812  do { \
813   *(*(_destination)) = _CPU_Null_fp_context; \
814  } while (0)
815
816/* end of Context handler macros */
817
818/* Fatal Error manager macros */
819
820/*
821 *  This routine copies _error into a known place -- typically a stack
822 *  location or a register, optionally disables interrupts, and
823 *  halts/stops the CPU.
824 */
825
826#define _CPU_Fatal_halt( _error ) \
827  do { \
828    uint32_t   level; \
829    \
830    level = sparc_disable_interrupts(); \
831    asm volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
832    while (1); /* loop forever */ \
833  } while (0)
834
835/* end of Fatal Error manager macros */
836
837/* Bitfield handler macros */
838
839/*
840 *  The SPARC port uses the generic C algorithm for bitfield scan if the
841 *  CPU model does not have a scan instruction.
842 */
843
844#if ( SPARC_HAS_BITSCAN == 0 )
845#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
846#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
847#else
848#error "scan instruction not currently supported by RTEMS!!"
849#endif
850
851/* end of Bitfield handler macros */
852
853/* Priority handler handler macros */
854
855/*
856 *  The SPARC port uses the generic C algorithm for bitfield scan if the
857 *  CPU model does not have a scan instruction.
858 */
859
860#if ( SPARC_HAS_BITSCAN == 1 )
861#error "scan instruction not currently supported by RTEMS!!"
862#endif
863
864/* end of Priority handler macros */
865
866/* functions */
867
868/*
869 *  _CPU_Initialize
870 *
871 *  This routine performs CPU dependent initialization.
872 */
873
874void _CPU_Initialize(
875  rtems_cpu_table  *cpu_table,
876  void            (*thread_dispatch)
877);
878
879/*
880 *  _CPU_ISR_install_raw_handler
881 *
882 *  This routine installs new_handler to be directly called from the trap
883 *  table.
884 */
885 
886void _CPU_ISR_install_raw_handler(
887  uint32_t    vector,
888  proc_ptr    new_handler,
889  proc_ptr   *old_handler
890);
891
892/*
893 *  _CPU_ISR_install_vector
894 *
895 *  This routine installs an interrupt vector.
896 */
897
898void _CPU_ISR_install_vector(
899  uint32_t    vector,
900  proc_ptr    new_handler,
901  proc_ptr   *old_handler
902);
903
904#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
905 
906/*
907 *  _CPU_Thread_Idle_body
908 *
909 *  Some SPARC implementations have low power, sleep, or idle modes.  This
910 *  tries to take advantage of those models.
911 */
912 
913void _CPU_Thread_Idle_body( void );
914 
915#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
916
917/*
918 *  _CPU_Context_switch
919 *
920 *  This routine switches from the run context to the heir context.
921 */
922
923void _CPU_Context_switch(
924  Context_Control  *run,
925  Context_Control  *heir
926);
927
928/*
929 *  _CPU_Context_restore
930 *
931 *  This routine is generally used only to restart self in an
932 *  efficient manner.
933 */
934
935void _CPU_Context_restore(
936  Context_Control *new_context
937);
938
939/*
940 *  _CPU_Context_save_fp
941 *
942 *  This routine saves the floating point context passed to it.
943 */
944
945void _CPU_Context_save_fp(
946  Context_Control_fp **fp_context_ptr
947);
948
949/*
950 *  _CPU_Context_restore_fp
951 *
952 *  This routine restores the floating point context passed to it.
953 */
954
955void _CPU_Context_restore_fp(
956  Context_Control_fp **fp_context_ptr
957);
958
959/*
960 *  CPU_swap_u32
961 *
962 *  The following routine swaps the endian format of an unsigned int.
963 *  It must be static because it is referenced indirectly.
964 *
965 *  This version will work on any processor, but if you come across a better
966 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
967 *  entity as shown below is not any more efficient on the SPARC.
968 *
969 *     swap least significant two bytes with 16-bit rotate
970 *     swap upper and lower 16-bits
971 *     swap most significant two bytes with 16-bit rotate
972 *
973 *  It is not obvious how the SPARC can do significantly better than the
974 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
975 *  following code at optimization level four (i.e. -O4).
976 */
977 
978static inline uint32_t CPU_swap_u32(
979  uint32_t value
980)
981{
982  uint32_t   byte1, byte2, byte3, byte4, swapped;
983 
984  byte4 = (value >> 24) & 0xff;
985  byte3 = (value >> 16) & 0xff;
986  byte2 = (value >> 8)  & 0xff;
987  byte1 =  value        & 0xff;
988 
989  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
990  return( swapped );
991}
992
993#define CPU_swap_u16( value ) \
994  (((value&0xff) << 8) | ((value >> 8)&0xff))
995
996#endif /* ASM */
997
998#ifdef __cplusplus
999}
1000#endif
1001
1002#endif
Note: See TracBrowser for help on using the repository browser.