source: rtems/cpukit/score/cpu/sparc64/rtems/score/cpu.h @ d2202ac

4.115
Last change on this file since d2202ac was f1738ed, checked in by Sebastian Huber <sebastian.huber@…>, on 11/04/12 at 20:04:39

score: PR1607: Add and use CPU_SIZEOF_POINTER

Add and use new CPU port define CPU_SIZEOF_POINTER. It must be an
integer literal that can be used by the assembler. This value will be
used to calculate offsets of structure members. These offsets will be
used in assembler code.

The size of a pointer is part of the application binary interface (ABI)
and thus independent of the actual programming language. The compiler
will provide defines to determine the current ABI. We use these defines
to select the appropriate CPU_SIZEOF_POINTER value.

Static assertions in the new file "cpukit/score/src/percpuasm.c" will
ensure that the value of CPU_SIZEOF_POINTER is consistent with the
current compiler settings. Also the offset values used by assembler
code are verfied.

  • Property mode set to 100644
File size: 31.1 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains information pertaining to the port of
7 *  the executive to the SPARC64 processor.
8 *
9 *  COPYRIGHT (c) 1989-2006.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  This file is based on the SPARC cpu.h file. Modifications are made
13 *  to support the SPARC64 processor.
14 *    COPYRIGHT (c) 2010. Gedare Bloom.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.com/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_CPU_H
22#define _RTEMS_SCORE_CPU_H
23
24#ifdef __cplusplus
25extern "C" {
26#endif
27
28#include <rtems/score/types.h>
29#include <rtems/score/sparc64.h>
30
31/* conditional compilation parameters */
32
33/*
34 *  Should the calls to _Thread_Enable_dispatch be inlined?
35 *
36 *  If TRUE, then they are inlined.
37 *  If FALSE, then a subroutine call is made.
38 */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41
42/*
43 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
44 *  be unrolled one time?  In unrolled each iteration of the loop examines
45 *  two "nodes" on the chain being searched.  Otherwise, only one node
46 *  is examined per iteration.
47 *
48 *  If TRUE, then the loops are unrolled.
49 *  If FALSE, then the loops are not unrolled.
50 *
51 *  This parameter could go either way on the SPARC.  The interrupt flash
52 *  code is relatively lengthy given the requirements for nops following
53 *  writes to the psr.  But if the clock speed were high enough, this would
54 *  not represent a great deal of time.
55 */
56
57#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
58
59/*
60 *  Does the executive manage a dedicated interrupt stack in software?
61 *
62 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
63 *  If FALSE, nothing is done.
64 *
65 *  The SPARC does not have a dedicated HW interrupt stack and one has
66 *  been implemented in SW.
67 */
68
69#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
70
71/*
72 *  Does the CPU follow the simple vectored interrupt model?
73 *
74 *  If TRUE, then RTEMS allocates the vector table it internally manages.
75 *  If FALSE, then the BSP is assumed to allocate and manage the vector
76 *  table
77 *
78 *  SPARC Specific Information:
79 *
80 *  XXX document implementation including references if appropriate
81 */
82#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
83
84/*
85 *  Does this CPU have hardware support for a dedicated interrupt stack?
86 *
87 *  If TRUE, then it must be installed during initialization.
88 *  If FALSE, then no installation is performed.
89 *
90 *  The SPARC does not have a dedicated HW interrupt stack.
91 */
92
93#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
94
95/*
96 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
97 *
98 *  If TRUE, then the memory is allocated during initialization.
99 *  If FALSE, then the memory is allocated during initialization.
100 */
101
102#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
103
104/*
105 *  Does the RTEMS invoke the user's ISR with the vector number and
106 *  a pointer to the saved interrupt frame (1) or just the vector
107 *  number (0)?
108 */
109
110#define CPU_ISR_PASSES_FRAME_POINTER 0
111
112/*
113 *  Does the CPU have hardware floating point?
114 *
115 *  If TRUE, then the FLOATING_POINT task attribute is supported.
116 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
117 */
118
119#if ( SPARC_HAS_FPU == 1 )
120#define CPU_HARDWARE_FP     TRUE
121#else
122#define CPU_HARDWARE_FP     FALSE
123#endif
124#define CPU_SOFTWARE_FP     FALSE
125
126/*
127 *  Are all tasks FLOATING_POINT tasks implicitly?
128 *
129 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
130 *  If FALSE, then the FLOATING_POINT task attribute is followed.
131 */
132
133#define CPU_ALL_TASKS_ARE_FP     FALSE
134
135/*
136 *  Should the IDLE task have a floating point context?
137 *
138 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
139 *  and it has a floating point context which is switched in and out.
140 *  If FALSE, then the IDLE task does not have a floating point context.
141 */
142
143#define CPU_IDLE_TASK_IS_FP      FALSE
144
145/*
146 *  Should the saving of the floating point registers be deferred
147 *  until a context switch is made to another different floating point
148 *  task?
149 *
150 *  If TRUE, then the floating point context will not be stored until
151 *  necessary.  It will remain in the floating point registers and not
152 *  disturned until another floating point task is switched to.
153 *
154 *  If FALSE, then the floating point context is saved when a floating
155 *  point task is switched out and restored when the next floating point
156 *  task is restored.  The state of the floating point registers between
157 *  those two operations is not specified.
158 */
159
160#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
161
162/*
163 *  Does this port provide a CPU dependent IDLE task implementation?
164 *
165 *  If TRUE, then the routine _CPU_Thread_Idle_body
166 *  must be provided and is the default IDLE thread body instead of
167 *  _CPU_Thread_Idle_body.
168 *
169 *  If FALSE, then use the generic IDLE thread body if the BSP does
170 *  not provide one.
171 */
172
173#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
174
175/*
176 *  Does the stack grow up (toward higher addresses) or down
177 *  (toward lower addresses)?
178 *
179 *  If TRUE, then the grows upward.
180 *  If FALSE, then the grows toward smaller addresses.
181 *
182 *  The stack grows to lower addresses on the SPARC.
183 */
184
185#define CPU_STACK_GROWS_UP               FALSE
186
187/*
188 *  The following is the variable attribute used to force alignment
189 *  of critical data structures.  On some processors it may make
190 *  sense to have these aligned on tighter boundaries than
191 *  the minimum requirements of the compiler in order to have as
192 *  much of the critical data area as possible in a cache line.
193 *
194 *  The SPARC does not appear to have particularly strict alignment
195 *  requirements.  This value (16) was chosen to take advantages of caches.
196 *
197 *  SPARC 64 requirements on floating point alignment is at least 8,
198 *  and is 16 if quad-word fp instructions are available (e.g. LDQF).
199 */
200
201#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
202
203#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
204
205/*
206 *  Define what is required to specify how the network to host conversion
207 *  routines are handled.
208 */
209
210#define CPU_BIG_ENDIAN                           TRUE
211#define CPU_LITTLE_ENDIAN                        FALSE
212
213/*
214 *  The following defines the number of bits actually used in the
215 *  interrupt field of the task mode.  How those bits map to the
216 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
217 *
218 *  The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
219 */
220
221#define CPU_MODES_INTERRUPT_MASK   0x0000000F
222
223/*
224 *  This structure represents the organization of the minimum stack frame
225 *  for the SPARC.  More framing information is required in certain situaions
226 *  such as when there are a large number of out parameters or when the callee
227 *  must save floating point registers.
228 */
229
230#ifndef ASM
231
232typedef struct {
233  uint64_t    l0;
234  uint64_t    l1;
235  uint64_t    l2;
236  uint64_t    l3;
237  uint64_t    l4;
238  uint64_t    l5;
239  uint64_t    l6;
240  uint64_t    l7;
241  uint64_t    i0;
242  uint64_t    i1;
243  uint64_t    i2;
244  uint64_t    i3;
245  uint64_t    i4;
246  uint64_t    i5;
247  uint64_t    i6_fp;
248  uint64_t    i7;
249  void       *structure_return_address;
250  /*
251   *  The following are for the callee to save the register arguments in
252   *  should this be necessary.
253   */
254  uint64_t    saved_arg0;
255  uint64_t    saved_arg1;
256  uint64_t    saved_arg2;
257  uint64_t    saved_arg3;
258  uint64_t    saved_arg4;
259  uint64_t    saved_arg5;
260  uint64_t    pad0;
261}  CPU_Minimum_stack_frame;
262
263#endif /* !ASM */
264
265#define CPU_STACK_FRAME_L0_OFFSET             0x00
266#define CPU_STACK_FRAME_L1_OFFSET             0x08
267#define CPU_STACK_FRAME_L2_OFFSET             0x10
268#define CPU_STACK_FRAME_L3_OFFSET             0x18
269#define CPU_STACK_FRAME_L4_OFFSET             0x20
270#define CPU_STACK_FRAME_L5_OFFSET             0x28
271#define CPU_STACK_FRAME_L6_OFFSET             0x30
272#define CPU_STACK_FRAME_L7_OFFSET             0x38
273#define CPU_STACK_FRAME_I0_OFFSET             0x40
274#define CPU_STACK_FRAME_I1_OFFSET             0x48
275#define CPU_STACK_FRAME_I2_OFFSET             0x50
276#define CPU_STACK_FRAME_I3_OFFSET             0x58
277#define CPU_STACK_FRAME_I4_OFFSET             0x60
278#define CPU_STACK_FRAME_I5_OFFSET             0x68
279#define CPU_STACK_FRAME_I6_FP_OFFSET          0x70
280#define CPU_STACK_FRAME_I7_OFFSET             0x78
281#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x80
282#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x88
283#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x90
284#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x98
285#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0xA0
286#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0xA8
287#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0xB0
288#define CPU_STACK_FRAME_PAD0_OFFSET           0xB8
289
290#define CPU_MINIMUM_STACK_FRAME_SIZE          0xC0
291
292/*
293 * Contexts
294 *
295 *  Generally there are 2 types of context to save.
296 *     1. Interrupt registers to save
297 *     2. Task level registers to save
298 *
299 *  This means we have the following 3 context items:
300 *     1. task level context stuff::  Context_Control
301 *     2. floating point task stuff:: Context_Control_fp
302 *     3. special interrupt level context :: Context_Control_interrupt
303 *
304 *  On the SPARC, we are relatively conservative in that we save most
305 *  of the CPU state in the context area.  The ET (enable trap) bit and
306 *  the CWP (current window pointer) fields of the PSR are considered
307 *  system wide resources and are not maintained on a per-thread basis.
308 */
309
310#ifndef ASM
311
312typedef struct {
313    uint64_t   g1;
314    uint64_t   g2;
315    uint64_t   g3;
316    uint64_t   g4;
317    uint64_t   g5;
318    uint64_t   g6;
319    uint64_t   g7;
320
321    uint64_t   l0;
322    uint64_t   l1;
323    uint64_t   l2;
324    uint64_t   l3;
325    uint64_t   l4;
326    uint64_t   l5;
327    uint64_t   l6;
328    uint64_t   l7;
329
330    uint64_t   i0;
331    uint64_t   i1;
332    uint64_t   i2;
333    uint64_t   i3;
334    uint64_t   i4;
335    uint64_t   i5;
336    uint64_t   i6_fp;
337    uint64_t   i7;
338
339    uint64_t   o0;
340    uint64_t   o1;
341    uint64_t   o2;
342    uint64_t   o3;
343    uint64_t   o4;
344    uint64_t   o5;
345    uint64_t   o6_sp;
346    uint64_t   o7;
347
348    uint32_t   isr_dispatch_disable;
349    uint32_t   pad;
350} Context_Control;
351
352#define _CPU_Context_Get_SP( _context ) \
353  (_context)->o6_sp
354
355#endif /* ASM */
356
357/*
358 *  Offsets of fields with Context_Control for assembly routines.
359 */
360
361#define G1_OFFSET    0x00
362#define G2_OFFSET    0x08
363#define G3_OFFSET    0x10
364#define G4_OFFSET    0x18
365#define G5_OFFSET    0x20
366#define G6_OFFSET    0x28
367#define G7_OFFSET    0x30
368
369#define L0_OFFSET    0x38
370#define L1_OFFSET    0x40
371#define L2_OFFSET    0x48
372#define L3_OFFSET    0x50
373#define L4_OFFSET    0x58
374#define L5_OFFSET    0x60
375#define L6_OFFSET    0x68
376#define L7_OFFSET    0x70
377
378#define I0_OFFSET    0x78
379#define I1_OFFSET    0x80
380#define I2_OFFSET    0x88
381#define I3_OFFSET    0x90
382#define I4_OFFSET    0x98
383#define I5_OFFSET    0xA0
384#define I6_FP_OFFSET    0xA8
385#define I7_OFFSET 0xB0
386
387#define O0_OFFSET    0xB8
388#define O1_OFFSET    0xC0
389#define O2_OFFSET    0xC8
390#define O3_OFFSET    0xD0
391#define O4_OFFSET    0xD8
392#define O5_OFFSET    0xE0
393#define O6_SP_OFFSET    0xE8
394#define O7_OFFSET 0xF0
395
396#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
397#define ISR_PAD_OFFSET 0xFC
398
399#define CONTEXT_CONTROL_SIZE 0x100
400
401/*
402 *  The floating point context area.
403 */
404
405#ifndef ASM
406
407typedef struct {
408    double      f0;     /* f0-f1 */
409    double      f2;     /* f2-f3 */
410    double      f4;     /* f4-f5 */
411    double      f6;     /* f6-f7 */
412    double      f8;     /* f8-f9 */
413    double      f10;    /* f10-f11 */
414    double      f12;    /* f12-f13 */
415    double      f14;    /* f14-f15 */
416    double      f16;    /* f16-f17 */
417    double      f18;    /* f18-f19 */
418    double      f20;    /* f20-f21 */
419    double      f22;    /* f22-f23 */
420    double      f24;    /* f24-f25 */
421    double      f26;    /* f26-f27 */
422    double      f28;    /* f28-f29 */
423    double      f30;    /* f30-f31 */
424    double      f32;
425    double      f34;
426    double      f36;
427    double      f38;
428    double      f40;
429    double      f42;
430    double      f44;
431    double      f46;
432    double      f48;
433    double      f50;
434    double      f52;
435    double      f54;
436    double      f56;
437    double      f58;
438    double      f60;
439    double      f62;
440    uint64_t    fsr;
441} Context_Control_fp;
442
443#endif /* !ASM */
444
445/*
446 *  Offsets of fields with Context_Control_fp for assembly routines.
447 */
448
449#define FO_OFFSET    0x00
450#define F2_OFFSET    0x08
451#define F4_OFFSET    0x10
452#define F6_OFFSET    0x18
453#define F8_OFFSET    0x20
454#define F1O_OFFSET   0x28
455#define F12_OFFSET   0x30
456#define F14_OFFSET   0x38
457#define F16_OFFSET   0x40
458#define F18_OFFSET   0x48
459#define F2O_OFFSET   0x50
460#define F22_OFFSET   0x58
461#define F24_OFFSET   0x60
462#define F26_OFFSET   0x68
463#define F28_OFFSET   0x70
464#define F3O_OFFSET   0x78
465#define F32_OFFSET   0x80
466#define F34_OFFSET   0x88
467#define F36_OFFSET   0x90
468#define F38_OFFSET   0x98
469#define F4O_OFFSET   0xA0
470#define F42_OFFSET   0xA8
471#define F44_OFFSET   0xB0
472#define F46_OFFSET   0xB8
473#define F48_OFFSET   0xC0
474#define F5O_OFFSET   0xC8
475#define F52_OFFSET   0xD0
476#define F54_OFFSET   0xD8
477#define F56_OFFSET   0xE0
478#define F58_OFFSET   0xE8
479#define F6O_OFFSET   0xF0
480#define F62_OFFSET   0xF8
481#define FSR_OFFSET   0x100
482
483#define CONTEXT_CONTROL_FP_SIZE 0x108
484
485#ifndef ASM
486
487/*
488 *  Context saved on stack for an interrupt.
489 *
490 *  NOTE:  The tstate, tpc, and tnpc are saved in this structure
491 *         to allow resetting the TL while still being able to return
492 *         from a trap later.  The PIL is saved because
493 *         if this is an external interrupt, we will mask lower
494 *         priority interrupts until finishing. Even though the y register
495 *         is deprecated, gcc still uses it.
496 */
497
498typedef struct {
499  CPU_Minimum_stack_frame  Stack_frame;
500  uint64_t                 tstate;
501  uint64_t                 tpc;
502  uint64_t                 tnpc;
503  uint64_t                 pil;
504  uint64_t                 y;
505  uint64_t                 g1;
506  uint64_t                 g2;
507  uint64_t                 g3;
508  uint64_t                 g4;
509  uint64_t                 g5;
510  uint64_t                 g6;
511  uint64_t                 g7;
512  uint64_t                 o0;
513  uint64_t                 o1;
514  uint64_t                 o2;
515  uint64_t                 o3;
516  uint64_t                 o4;
517  uint64_t                 o5;
518  uint64_t                 o6_sp;
519  uint64_t                 o7;
520  uint64_t                 tvec;
521} CPU_Interrupt_frame;
522
523#endif /* ASM */
524
525/*
526 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
527 */
528
529#define ISF_STACK_FRAME_OFFSET 0x00
530#define ISF_TSTATE_OFFSET      CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
531#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
532#define ISF_TNPC_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
533#define ISF_PIL_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
534#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
535#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
536#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
537#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
538#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
539#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
540#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
541#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x58
542#define ISF_O0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x60
543#define ISF_O1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x68
544#define ISF_O2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x70
545#define ISF_O3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x78
546#define ISF_O4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x80
547#define ISF_O5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x88
548#define ISF_O6_SP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x90
549#define ISF_O7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x98
550#define ISF_TVEC_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0xA0
551
552#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0xA8
553#ifndef ASM
554/*
555 *  This variable is contains the initialize context for the FP unit.
556 *  It is filled in by _CPU_Initialize and copied into the task's FP
557 *  context area during _CPU_Context_Initialize.
558 */
559
560SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
561
562/*
563 *  This stack is allocated by the Interrupt Manager and the switch
564 *  is performed in _ISR_Handler.  These variables contain pointers
565 *  to the lowest and highest addresses in the chunk of memory allocated
566 *  for the interrupt stack.  Since it is unknown whether the stack
567 *  grows up or down (in general), this give the CPU dependent
568 *  code the option of picking the version it wants to use.  Thus
569 *  both must be present if either is.
570 *
571 *  The SPARC supports a software based interrupt stack and these
572 *  are required.
573 */
574/*
575SCORE_EXTERN void *_CPU_Interrupt_stack_low;
576SCORE_EXTERN void *_CPU_Interrupt_stack_high;
577*/
578/*
579 *  This flag is context switched with each thread.  It indicates
580 *  that THIS thread has an _ISR_Dispatch stack frame on its stack.
581 *  By using this flag, we can avoid nesting more interrupt dispatching
582 *  attempts on a previously interrupted thread's stack.
583 */
584
585SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
586
587/*
588 *  The following type defines an entry in the SPARC's trap table.
589 *
590 *  NOTE: The instructions chosen are RTEMS dependent although one is
591 *        obligated to use two of the four instructions to perform a
592 *        long jump.  The other instructions load one register with the
593 *        trap type (a.k.a. vector) and another with the psr.
594 */
595/* For SPARC V9, we must use 6 of these instructions to perform a long
596 * jump, because the _handler value is now 64-bits. We also need to store
597 * temporary values in the global register set at this trap level. Because
598 * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
599 * to pass parameters to ISR_Handler.
600 *
601 * The instruction sequence is now more like:
602 *      rdpr %tstate, %g4
603 *      setx _handler, %g2, %g3
604 *      jmp %g3+0
605 *      mov _vector, %g2
606 */
607typedef struct {
608  uint32_t     rdpr_tstate_g4;                  /* rdpr  %tstate, %g4        */
609  uint32_t     sethi_of_hh_handler_to_g2;       /* sethi %hh(_handler), %g2  */
610  uint32_t     or_g2_hm_handler_to_g2;          /* or %l3, %hm(_handler), %g2 */
611  uint32_t     sllx_g2_by_32_to_g2;             /* sllx   %g2, 32, %g2 */
612  uint32_t     sethi_of_handler_to_g3;          /* sethi %hi(_handler), %g3  */
613  uint32_t     or_g3_g2_to_g3;                  /* or     %g3, %g2, %g3 */
614  uint32_t     jmp_to_low_of_handler_plus_g3;   /* jmp   %g3 + %lo(_handler) */
615  uint32_t     mov_vector_g2;                   /* mov   _vector, %g2        */
616} CPU_Trap_table_entry;
617 
618/*
619 *  This is the set of opcodes for the instructions loaded into a trap
620 *  table entry.  The routine which installs a handler is responsible
621 *  for filling in the fields for the _handler address and the _vector
622 *  trap type.
623 *
624 *  The constants following this structure are masks for the fields which
625 *  must be filled in when the handler is installed.
626 */
627 
628extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
629
630/*
631 *  The size of the floating point context area. 
632 */
633
634#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
635
636#endif
637
638/*
639 *  Amount of extra stack (above minimum stack size) required by
640 *  MPCI receive server thread.  Remember that in a multiprocessor
641 *  system this thread must exist and be able to process all directives.
642 */
643
644#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
645
646/*
647 *  This defines the number of entries in the ISR_Vector_table managed
648 *  by the executive.
649 *
650 *  On the SPARC, there are really only 256 vectors.  However, the executive
651 *  has no easy, fast, reliable way to determine which traps are synchronous
652 *  and which are asynchronous.  By default, synchronous traps return to the
653 *  instruction which caused the interrupt.  So if you install a software
654 *  trap handler as an executive interrupt handler (which is desirable since
655 *  RTEMS takes care of window and register issues), then the executive needs
656 *  to know that the return address is to the trap rather than the instruction
657 *  following the trap.
658 *
659 *  So vectors 0 through 255 are treated as regular asynchronous traps which
660 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
661 *  by the executive to be synchronous and to require that the return address
662 *  be fudged.
663 *
664 *  If you use this mechanism to install a trap handler which must reexecute
665 *  the instruction which caused the trap, then it should be installed as
666 *  an asynchronous trap.  This will avoid the executive changing the return
667 *  address.
668 */
669/* On SPARC v9, there are 512 vectors. The same philosophy applies to
670 * vector installation and use, we just provide a larger table.
671 */
672#define CPU_INTERRUPT_NUMBER_OF_VECTORS     512
673#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
674
675#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x200
676#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
677#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 512 )
678
679#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 512)
680
681/*
682 *  This is defined if the port has a special way to report the ISR nesting
683 *  level.  Most ports maintain the variable _ISR_Nest_level.
684 */
685
686#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
687
688/*
689 *  Should be large enough to run all tests.  This ensures
690 *  that a "reasonable" small application should not have any problems.
691 *
692 *  This appears to be a fairly generous number for the SPARC since
693 *  represents a call depth of about 20 routines based on the minimum
694 *  stack frame.
695 */
696
697#define CPU_STACK_MINIMUM_SIZE  (1024*8)
698
699#define CPU_SIZEOF_POINTER 8
700
701/*
702 *  CPU's worst alignment requirement for data types on a byte boundary.  This
703 *  alignment does not take into account the requirements for the stack.
704 *
705 *  On the SPARC, this is required for double word loads and stores.
706 *
707 *  Note: quad-word loads/stores need alignment of 16, but currently supported
708 *  architectures do not provide HW implemented quad-word operations.
709 */
710
711#define CPU_ALIGNMENT      8
712
713/*
714 *  This number corresponds to the byte alignment requirement for the
715 *  heap handler.  This alignment requirement may be stricter than that
716 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
717 *  common for the heap to follow the same alignment requirement as
718 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
719 *  then this should be set to CPU_ALIGNMENT.
720 *
721 *  NOTE:  This does not have to be a power of 2.  It does have to
722 *         be greater or equal to than CPU_ALIGNMENT.
723 */
724
725#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
726
727/*
728 *  This number corresponds to the byte alignment requirement for memory
729 *  buffers allocated by the partition manager.  This alignment requirement
730 *  may be stricter than that for the data types alignment specified by
731 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
732 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
733 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
734 *
735 *  NOTE:  This does not have to be a power of 2.  It does have to
736 *         be greater or equal to than CPU_ALIGNMENT.
737 */
738
739#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
740
741/*
742 *  This number corresponds to the byte alignment requirement for the
743 *  stack.  This alignment requirement may be stricter than that for the
744 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
745 *  is strict enough for the stack, then this should be set to 0.
746 *
747 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
748 *
749 *  The alignment restrictions for the SPARC are not that strict but this
750 *  should unsure that the stack is always sufficiently alignment that the
751 *  window overflow, underflow, and flush routines can use double word loads
752 *  and stores.
753 */
754
755#define CPU_STACK_ALIGNMENT        16
756
757#ifndef ASM
758
759/*
760 *  ISR handler macros
761 */
762
763/*
764 *  Support routine to initialize the RTEMS vector table after it is allocated.
765 */
766
767#define _CPU_Initialize_vectors()
768
769/*
770 *  Disable all interrupts for a critical section.  The previous
771 *  level is returned in _level.
772 */
773
774 #define _CPU_ISR_Disable( _level ) \
775  (_level) = sparc_disable_interrupts()
776
777/*
778 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
779 *  This indicates the end of a critical section.  The parameter
780 *  _level is not modified.
781 */
782
783#define _CPU_ISR_Enable( _level ) \
784  sparc_enable_interrupts( _level )
785
786/*
787 *  This temporarily restores the interrupt to _level before immediately
788 *  disabling them again.  This is used to divide long critical
789 *  sections into two or more parts.  The parameter _level is not
790 *  modified.
791 */
792
793#define _CPU_ISR_Flash( _level ) \
794   sparc_flash_interrupts( _level )
795
796/*
797 *  Map interrupt level in task mode onto the hardware that the CPU
798 *  actually provides.  Currently, interrupt levels which do not
799 *  map onto the CPU in a straight fashion are undefined. 
800 */
801
802#define _CPU_ISR_Set_level( _newlevel ) \
803   sparc_enable_interrupts( _newlevel)
804
805uint32_t   _CPU_ISR_Get_level( void );
806 
807/* end of ISR handler macros */
808
809/* Context handler macros */
810
811/*
812 *  Initialize the context to a state suitable for starting a
813 *  task after a context restore operation.  Generally, this
814 *  involves:
815 *
816 *     - setting a starting address
817 *     - preparing the stack
818 *     - preparing the stack and frame pointers
819 *     - setting the proper interrupt level in the context
820 *     - initializing the floating point context
821 *
822 *  NOTE:  Implemented as a subroutine for the SPARC port.
823 */
824
825void _CPU_Context_Initialize(
826  Context_Control  *the_context,
827  void         *stack_base,
828  uint32_t          size,
829  uint32_t          new_level,
830  void             *entry_point,
831  bool              is_fp
832);
833
834/*
835 *  This macro is invoked from _Thread_Handler to do whatever CPU
836 *  specific magic is required that must be done in the context of
837 *  the thread when it starts.
838 *
839 *  On the SPARC, this is setting the frame pointer so GDB is happy.
840 *  Make GDB stop unwinding at _Thread_Handler, previous register window
841 *  Frame pointer is 0 and calling address must be a function with starting
842 *  with a SAVE instruction. If return address is leaf-function (no SAVE)
843 *  GDB will not look at prev reg window fp.
844 *
845 *  _Thread_Handler is known to start with SAVE.
846 */
847
848#define _CPU_Context_Initialization_at_thread_begin() \
849  do { \
850    __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
851  } while (0)
852
853/*
854 *  This routine is responsible for somehow restarting the currently
855 *  executing task. 
856 *
857 *  On the SPARC, this is is relatively painless but requires a small
858 *  amount of wrapper code before using the regular restore code in
859 *  of the context switch.
860 */
861
862#define _CPU_Context_Restart_self( _the_context ) \
863   _CPU_Context_restore( (_the_context) );
864
865/*
866 *  The FP context area for the SPARC is a simple structure and nothing
867 *  special is required to find the "starting load point"
868 */
869
870#define _CPU_Context_Fp_start( _base, _offset ) \
871   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
872
873/*
874 *  This routine initializes the FP context area passed to it to.
875 *
876 *  The SPARC allows us to use the simple initialization model
877 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
878 *  at CPU initialization and it is simply copied into the destination
879 *  context.
880 */
881
882#define _CPU_Context_Initialize_fp( _destination ) \
883  do { \
884   *(*(_destination)) = _CPU_Null_fp_context; \
885  } while (0)
886
887/* end of Context handler macros */
888
889/* Fatal Error manager macros */
890
891/*
892 *  This routine copies _error into a known place -- typically a stack
893 *  location or a register, optionally disables interrupts, and
894 *  halts/stops the CPU.
895 */
896
897#define _CPU_Fatal_halt( _error ) \
898  do { \
899    uint32_t   level; \
900    \
901    level = sparc_disable_interrupts(); \
902    __asm__ volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
903    while (1); /* loop forever */ \
904  } while (0)
905
906/* end of Fatal Error manager macros */
907
908/* Bitfield handler macros */
909
910/*
911 *  The SPARC port uses the generic C algorithm for bitfield scan if the
912 *  CPU model does not have a scan instruction.
913 */
914
915#if ( SPARC_HAS_BITSCAN == 0 )
916#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
917#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
918#else
919#error "scan instruction not currently supported by RTEMS!!"
920#endif
921
922/* end of Bitfield handler macros */
923
924/* Priority handler handler macros */
925
926/*
927 *  The SPARC port uses the generic C algorithm for bitfield scan if the
928 *  CPU model does not have a scan instruction.
929 */
930
931#if ( SPARC_HAS_BITSCAN == 1 )
932#error "scan instruction not currently supported by RTEMS!!"
933#endif
934
935/* end of Priority handler macros */
936
937/* functions */
938
939/*
940 *  _CPU_Initialize
941 *
942 *  This routine performs CPU dependent initialization.
943 */
944
945void _CPU_Initialize(void);
946
947/*
948 *  _CPU_ISR_install_raw_handler
949 *
950 *  This routine installs new_handler to be directly called from the trap
951 *  table.
952 */
953 
954void _CPU_ISR_install_raw_handler(
955  uint32_t    vector,
956  proc_ptr    new_handler,
957  proc_ptr   *old_handler
958);
959
960/*
961 *  _CPU_ISR_install_vector
962 *
963 *  This routine installs an interrupt vector.
964 */
965
966void _CPU_ISR_install_vector(
967  uint64_t    vector,
968  proc_ptr    new_handler,
969  proc_ptr   *old_handler
970);
971
972#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
973 
974/*
975 *  _CPU_Thread_Idle_body
976 *
977 *  Some SPARC implementations have low power, sleep, or idle modes.  This
978 *  tries to take advantage of those models.
979 */
980 
981void *_CPU_Thread_Idle_body( uintptr_t ignored );
982
983#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
984
985/*
986 *  _CPU_Context_switch
987 *
988 *  This routine switches from the run context to the heir context.
989 */
990
991void _CPU_Context_switch(
992  Context_Control  *run,
993  Context_Control  *heir
994);
995
996/*
997 *  _CPU_Context_restore
998 *
999 *  This routine is generally used only to restart self in an
1000 *  efficient manner.
1001 */
1002
1003void _CPU_Context_restore(
1004  Context_Control *new_context
1005) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
1006
1007/*
1008 *  _CPU_Context_save_fp
1009 *
1010 *  This routine saves the floating point context passed to it.
1011 */
1012
1013void _CPU_Context_save_fp(
1014  Context_Control_fp **fp_context_ptr
1015);
1016
1017/*
1018 *  _CPU_Context_restore_fp
1019 *
1020 *  This routine restores the floating point context passed to it.
1021 */
1022
1023void _CPU_Context_restore_fp(
1024  Context_Control_fp **fp_context_ptr
1025);
1026
1027/*
1028 *  CPU_swap_u32
1029 *
1030 *  The following routine swaps the endian format of an unsigned int.
1031 *  It must be static because it is referenced indirectly.
1032 *
1033 *  This version will work on any processor, but if you come across a better
1034 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
1035 *  entity as shown below is not any more efficient on the SPARC.
1036 *
1037 *     swap least significant two bytes with 16-bit rotate
1038 *     swap upper and lower 16-bits
1039 *     swap most significant two bytes with 16-bit rotate
1040 *
1041 *  It is not obvious how the SPARC can do significantly better than the
1042 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
1043 *  following code at optimization level four (i.e. -O4).
1044 */
1045 
1046static inline uint32_t CPU_swap_u32(
1047  uint32_t value
1048)
1049{
1050  uint32_t   byte1, byte2, byte3, byte4, swapped;
1051 
1052  byte4 = (value >> 24) & 0xff;
1053  byte3 = (value >> 16) & 0xff;
1054  byte2 = (value >> 8)  & 0xff;
1055  byte1 =  value        & 0xff;
1056 
1057  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1058  return( swapped );
1059}
1060
1061#define CPU_swap_u16( value ) \
1062  (((value&0xff) << 8) | ((value >> 8)&0xff))
1063
1064#endif /* ASM */
1065
1066#ifdef __cplusplus
1067}
1068#endif
1069
1070#endif
Note: See TracBrowser for help on using the repository browser.