source: rtems/cpukit/score/cpu/sparc64/include/rtems/score/cpu.h @ 27bbc05

5
Last change on this file since 27bbc05 was 27bbc05, checked in by Sebastian Huber <sebastian.huber@…>, on 08/02/18 at 12:49:01

score: Remove CPU_PARTITION_ALIGNMENT

Use the CPU_SIZEOF_POINTER alignment instead. The internal alignment
requirement is defined by the use of Chain_Node (consisting of two
pointers) to manage the free chain of partitions.

It seems that previously the condition

CPU_PARTITION_ALIGNMENT >= sizeof(Chain_Node)

was true on all CPU ports. Now, we need an additional check.

Update #3482.

  • Property mode set to 100644
File size: 28.3 KB
RevLine 
[c86da31c]1/**
[1362b7a]2 * @file
3 *
4 * @brief SPARC64 CPU Department Source
5 *
6 * This include file contains information pertaining to the port of
7 * the executive to the SPARC64 processor.
[c86da31c]8 */
9
10/*
[1362b7a]11 *
[c86da31c]12 *
[71d97c9]13 *  COPYRIGHT (c) 1989-2006. On-Line Applications Research Corporation (OAR).
[c86da31c]14 *
[1362b7a]15 *  This file is based on the SPARC cpu.h file. Modifications are made
[c86da31c]16 *  to support the SPARC64 processor.
[71d97c9]17 *  COPYRIGHT (c) 2010. Gedare Bloom.
[c86da31c]18 *
19 *  The license and distribution terms for this file may be
20 *  found in the file LICENSE in this distribution or at
[c499856]21 *  http://www.rtems.org/license/LICENSE.
[c86da31c]22 */
23
24#ifndef _RTEMS_SCORE_CPU_H
25#define _RTEMS_SCORE_CPU_H
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
[55e76c2]31#include <rtems/score/basedefs.h>
[89b85e51]32#include <rtems/score/sparc64.h>
[c86da31c]33
34/* conditional compilation parameters */
35
36/*
37 *  Does the CPU follow the simple vectored interrupt model?
38 *
39 *  If TRUE, then RTEMS allocates the vector table it internally manages.
40 *  If FALSE, then the BSP is assumed to allocate and manage the vector
41 *  table
42 *
43 *  SPARC Specific Information:
44 *
45 *  XXX document implementation including references if appropriate
46 */
47#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
48
49/*
50 *  Does the RTEMS invoke the user's ISR with the vector number and
[1362b7a]51 *  a pointer to the saved interrupt frame (1) or just the vector
[c86da31c]52 *  number (0)?
53 */
54
[141e16d]55#define CPU_ISR_PASSES_FRAME_POINTER FALSE
[c86da31c]56
57/*
58 *  Does the CPU have hardware floating point?
59 *
60 *  If TRUE, then the FLOATING_POINT task attribute is supported.
61 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
62 */
63
64#if ( SPARC_HAS_FPU == 1 )
65#define CPU_HARDWARE_FP     TRUE
66#else
67#define CPU_HARDWARE_FP     FALSE
68#endif
69#define CPU_SOFTWARE_FP     FALSE
70
71/*
72 *  Are all tasks FLOATING_POINT tasks implicitly?
73 *
74 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
75 *  If FALSE, then the FLOATING_POINT task attribute is followed.
76 */
77
78#define CPU_ALL_TASKS_ARE_FP     FALSE
79
80/*
81 *  Should the IDLE task have a floating point context?
82 *
83 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
84 *  and it has a floating point context which is switched in and out.
85 *  If FALSE, then the IDLE task does not have a floating point context.
86 */
87
88#define CPU_IDLE_TASK_IS_FP      FALSE
89
90/*
91 *  Should the saving of the floating point registers be deferred
92 *  until a context switch is made to another different floating point
93 *  task?
94 *
95 *  If TRUE, then the floating point context will not be stored until
96 *  necessary.  It will remain in the floating point registers and not
97 *  disturned until another floating point task is switched to.
98 *
99 *  If FALSE, then the floating point context is saved when a floating
100 *  point task is switched out and restored when the next floating point
101 *  task is restored.  The state of the floating point registers between
102 *  those two operations is not specified.
103 */
104
105#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
106
[84e6f15]107#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
108
[c86da31c]109/*
110 *  Does this port provide a CPU dependent IDLE task implementation?
111 *
112 *  If TRUE, then the routine _CPU_Thread_Idle_body
113 *  must be provided and is the default IDLE thread body instead of
114 *  _CPU_Thread_Idle_body.
115 *
116 *  If FALSE, then use the generic IDLE thread body if the BSP does
117 *  not provide one.
118 */
119
120#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
121
122/*
123 *  Does the stack grow up (toward higher addresses) or down
124 *  (toward lower addresses)?
125 *
126 *  If TRUE, then the grows upward.
127 *  If FALSE, then the grows toward smaller addresses.
128 *
129 *  The stack grows to lower addresses on the SPARC.
130 */
131
132#define CPU_STACK_GROWS_UP               FALSE
133
[a8865f8]134/* FIXME: Is this the right value? */
135#define CPU_CACHE_LINE_BYTES 32
136
[c86da31c]137/*
138 *  The following is the variable attribute used to force alignment
139 *  of critical data structures.  On some processors it may make
140 *  sense to have these aligned on tighter boundaries than
141 *  the minimum requirements of the compiler in order to have as
142 *  much of the critical data area as possible in a cache line.
143 *
[1362b7a]144 *  The SPARC does not appear to have particularly strict alignment
[c86da31c]145 *  requirements.  This value (16) was chosen to take advantages of caches.
146 *
[1362b7a]147 *  SPARC 64 requirements on floating point alignment is at least 8,
[c86da31c]148 *  and is 16 if quad-word fp instructions are available (e.g. LDQF).
149 */
150
[a8865f8]151#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( 16 )
[c86da31c]152
153/*
154 *  The following defines the number of bits actually used in the
155 *  interrupt field of the task mode.  How those bits map to the
156 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
157 *
158 *  The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
159 */
160
161#define CPU_MODES_INTERRUPT_MASK   0x0000000F
162
[decff899]163#define CPU_MAXIMUM_PROCESSORS 32
164
[c86da31c]165/*
[1362b7a]166 *  This structure represents the organization of the minimum stack frame
[c86da31c]167 *  for the SPARC.  More framing information is required in certain situaions
168 *  such as when there are a large number of out parameters or when the callee
169 *  must save floating point registers.
170 */
171
172#ifndef ASM
173
174typedef struct {
175  uint64_t    l0;
176  uint64_t    l1;
177  uint64_t    l2;
178  uint64_t    l3;
179  uint64_t    l4;
180  uint64_t    l5;
181  uint64_t    l6;
182  uint64_t    l7;
183  uint64_t    i0;
184  uint64_t    i1;
185  uint64_t    i2;
186  uint64_t    i3;
187  uint64_t    i4;
188  uint64_t    i5;
189  uint64_t    i6_fp;
190  uint64_t    i7;
191  void       *structure_return_address;
192  /*
193   *  The following are for the callee to save the register arguments in
194   *  should this be necessary.
195   */
196  uint64_t    saved_arg0;
197  uint64_t    saved_arg1;
198  uint64_t    saved_arg2;
199  uint64_t    saved_arg3;
200  uint64_t    saved_arg4;
201  uint64_t    saved_arg5;
202  uint64_t    pad0;
[d18560a]203} SPARC64_Minimum_stack_frame;
[c86da31c]204
205#endif /* !ASM */
206
207#define CPU_STACK_FRAME_L0_OFFSET             0x00
208#define CPU_STACK_FRAME_L1_OFFSET             0x08
209#define CPU_STACK_FRAME_L2_OFFSET             0x10
210#define CPU_STACK_FRAME_L3_OFFSET             0x18
211#define CPU_STACK_FRAME_L4_OFFSET             0x20
212#define CPU_STACK_FRAME_L5_OFFSET             0x28
213#define CPU_STACK_FRAME_L6_OFFSET             0x30
214#define CPU_STACK_FRAME_L7_OFFSET             0x38
215#define CPU_STACK_FRAME_I0_OFFSET             0x40
216#define CPU_STACK_FRAME_I1_OFFSET             0x48
217#define CPU_STACK_FRAME_I2_OFFSET             0x50
218#define CPU_STACK_FRAME_I3_OFFSET             0x58
219#define CPU_STACK_FRAME_I4_OFFSET             0x60
220#define CPU_STACK_FRAME_I5_OFFSET             0x68
221#define CPU_STACK_FRAME_I6_FP_OFFSET          0x70
222#define CPU_STACK_FRAME_I7_OFFSET             0x78
223#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x80
224#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x88
225#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x90
226#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x98
227#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0xA0
228#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0xA8
229#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0xB0
230#define CPU_STACK_FRAME_PAD0_OFFSET           0xB8
231
[d18560a]232#define SPARC64_MINIMUM_STACK_FRAME_SIZE          0xC0
[c86da31c]233
234/*
235 * Contexts
236 *
237 *  Generally there are 2 types of context to save.
238 *     1. Interrupt registers to save
239 *     2. Task level registers to save
240 *
241 *  This means we have the following 3 context items:
242 *     1. task level context stuff::  Context_Control
243 *     2. floating point task stuff:: Context_Control_fp
244 *     3. special interrupt level context :: Context_Control_interrupt
245 *
246 *  On the SPARC, we are relatively conservative in that we save most
247 *  of the CPU state in the context area.  The ET (enable trap) bit and
248 *  the CWP (current window pointer) fields of the PSR are considered
249 *  system wide resources and are not maintained on a per-thread basis.
250 */
251
252#ifndef ASM
253
254typedef struct {
255    uint64_t   g1;
256    uint64_t   g2;
257    uint64_t   g3;
258    uint64_t   g4;
259    uint64_t   g5;
260    uint64_t   g6;
261    uint64_t   g7;
262
263    uint64_t   l0;
264    uint64_t   l1;
265    uint64_t   l2;
266    uint64_t   l3;
267    uint64_t   l4;
268    uint64_t   l5;
269    uint64_t   l6;
270    uint64_t   l7;
271
272    uint64_t   i0;
273    uint64_t   i1;
274    uint64_t   i2;
275    uint64_t   i3;
276    uint64_t   i4;
277    uint64_t   i5;
278    uint64_t   i6_fp;
279    uint64_t   i7;
280
281    uint64_t   o0;
282    uint64_t   o1;
283    uint64_t   o2;
284    uint64_t   o3;
285    uint64_t   o4;
286    uint64_t   o5;
287    uint64_t   o6_sp;
288    uint64_t   o7;
289
290    uint32_t   isr_dispatch_disable;
291    uint32_t   pad;
292} Context_Control;
293
294#define _CPU_Context_Get_SP( _context ) \
295  (_context)->o6_sp
296
297#endif /* ASM */
298
299/*
300 *  Offsets of fields with Context_Control for assembly routines.
301 */
302
303#define G1_OFFSET    0x00
304#define G2_OFFSET    0x08
305#define G3_OFFSET    0x10
306#define G4_OFFSET    0x18
307#define G5_OFFSET    0x20
308#define G6_OFFSET    0x28
309#define G7_OFFSET    0x30
310
311#define L0_OFFSET    0x38
312#define L1_OFFSET    0x40
313#define L2_OFFSET    0x48
314#define L3_OFFSET    0x50
315#define L4_OFFSET    0x58
316#define L5_OFFSET    0x60
317#define L6_OFFSET    0x68
318#define L7_OFFSET    0x70
319
320#define I0_OFFSET    0x78
321#define I1_OFFSET    0x80
322#define I2_OFFSET    0x88
323#define I3_OFFSET    0x90
324#define I4_OFFSET    0x98
325#define I5_OFFSET    0xA0
326#define I6_FP_OFFSET    0xA8
327#define I7_OFFSET 0xB0
328
329#define O0_OFFSET    0xB8
330#define O1_OFFSET    0xC0
331#define O2_OFFSET    0xC8
332#define O3_OFFSET    0xD0
333#define O4_OFFSET    0xD8
334#define O5_OFFSET    0xE0
335#define O6_SP_OFFSET    0xE8
336#define O7_OFFSET 0xF0
337
338#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
339#define ISR_PAD_OFFSET 0xFC
340
341/*
342 *  The floating point context area.
343 */
344
345#ifndef ASM
346
347typedef struct {
348    double      f0;     /* f0-f1 */
349    double      f2;     /* f2-f3 */
350    double      f4;     /* f4-f5 */
351    double      f6;     /* f6-f7 */
352    double      f8;     /* f8-f9 */
353    double      f10;    /* f10-f11 */
354    double      f12;    /* f12-f13 */
355    double      f14;    /* f14-f15 */
356    double      f16;    /* f16-f17 */
357    double      f18;    /* f18-f19 */
358    double      f20;    /* f20-f21 */
359    double      f22;    /* f22-f23 */
360    double      f24;    /* f24-f25 */
361    double      f26;    /* f26-f27 */
362    double      f28;    /* f28-f29 */
363    double      f30;    /* f30-f31 */
364    double      f32;
365    double      f34;
366    double      f36;
367    double      f38;
368    double      f40;
369    double      f42;
370    double      f44;
371    double      f46;
372    double      f48;
373    double      f50;
374    double      f52;
375    double      f54;
376    double      f56;
377    double      f58;
378    double      f60;
379    double      f62;
380    uint64_t    fsr;
381} Context_Control_fp;
382
383#endif /* !ASM */
384
385/*
386 *  Offsets of fields with Context_Control_fp for assembly routines.
387 */
388
389#define FO_OFFSET    0x00
390#define F2_OFFSET    0x08
391#define F4_OFFSET    0x10
392#define F6_OFFSET    0x18
393#define F8_OFFSET    0x20
394#define F1O_OFFSET   0x28
395#define F12_OFFSET   0x30
396#define F14_OFFSET   0x38
397#define F16_OFFSET   0x40
398#define F18_OFFSET   0x48
399#define F2O_OFFSET   0x50
400#define F22_OFFSET   0x58
401#define F24_OFFSET   0x60
402#define F26_OFFSET   0x68
403#define F28_OFFSET   0x70
404#define F3O_OFFSET   0x78
405#define F32_OFFSET   0x80
406#define F34_OFFSET   0x88
407#define F36_OFFSET   0x90
408#define F38_OFFSET   0x98
409#define F4O_OFFSET   0xA0
410#define F42_OFFSET   0xA8
411#define F44_OFFSET   0xB0
412#define F46_OFFSET   0xB8
413#define F48_OFFSET   0xC0
414#define F5O_OFFSET   0xC8
415#define F52_OFFSET   0xD0
416#define F54_OFFSET   0xD8
417#define F56_OFFSET   0xE0
418#define F58_OFFSET   0xE8
419#define F6O_OFFSET   0xF0
420#define F62_OFFSET   0xF8
421#define FSR_OFFSET   0x100
422
423#define CONTEXT_CONTROL_FP_SIZE 0x108
424
425#ifndef ASM
426
427/*
428 *  Context saved on stack for an interrupt.
429 *
430 *  NOTE:  The tstate, tpc, and tnpc are saved in this structure
431 *         to allow resetting the TL while still being able to return
432 *         from a trap later.  The PIL is saved because
[1362b7a]433 *         if this is an external interrupt, we will mask lower
[c86da31c]434 *         priority interrupts until finishing. Even though the y register
435 *         is deprecated, gcc still uses it.
436 */
437
438typedef struct {
[d18560a]439  SPARC64_Minimum_stack_frame Stack_frame;
[c86da31c]440  uint64_t                 tstate;
441  uint64_t                 tpc;
442  uint64_t                 tnpc;
443  uint64_t                 pil;
444  uint64_t                 y;
445  uint64_t                 g1;
446  uint64_t                 g2;
447  uint64_t                 g3;
448  uint64_t                 g4;
449  uint64_t                 g5;
450  uint64_t                 g6;
451  uint64_t                 g7;
452  uint64_t                 o0;
453  uint64_t                 o1;
454  uint64_t                 o2;
455  uint64_t                 o3;
456  uint64_t                 o4;
457  uint64_t                 o5;
458  uint64_t                 o6_sp;
459  uint64_t                 o7;
[67baf60]460  uint64_t                 tvec;
[c86da31c]461} CPU_Interrupt_frame;
462
463#endif /* ASM */
464
465/*
466 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
467 */
468
[d18560a]469#define ISF_TSTATE_OFFSET      SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x00
470#define ISF_TPC_OFFSET         SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x08
471#define ISF_TNPC_OFFSET        SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x10
472#define ISF_PIL_OFFSET         SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x18
473#define ISF_Y_OFFSET           SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x20
474#define ISF_G1_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x28
475#define ISF_G2_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x30
476#define ISF_G3_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x38
477#define ISF_G4_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x40
478#define ISF_G5_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x48
479#define ISF_G6_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x50
480#define ISF_G7_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x58
481#define ISF_O0_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x60
482#define ISF_O1_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x68
483#define ISF_O2_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x70
484#define ISF_O3_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x78
485#define ISF_O4_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x80
486#define ISF_O5_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x88
487#define ISF_O6_SP_OFFSET       SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x90
488#define ISF_O7_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x98
489#define ISF_TVEC_OFFSET        SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA0
490
491#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA8
[c86da31c]492#ifndef ASM
493/*
494 *  This variable is contains the initialize context for the FP unit.
[1362b7a]495 *  It is filled in by _CPU_Initialize and copied into the task's FP
[c86da31c]496 *  context area during _CPU_Context_Initialize.
497 */
498
[51dc9a61]499extern Context_Control_fp _CPU_Null_fp_context;
[c86da31c]500
501/*
502 *  This flag is context switched with each thread.  It indicates
503 *  that THIS thread has an _ISR_Dispatch stack frame on its stack.
504 *  By using this flag, we can avoid nesting more interrupt dispatching
505 *  attempts on a previously interrupted thread's stack.
506 */
507
[51dc9a61]508extern volatile uint32_t _CPU_ISR_Dispatch_disable;
[c86da31c]509
510/*
511 *  The following type defines an entry in the SPARC's trap table.
512 *
513 *  NOTE: The instructions chosen are RTEMS dependent although one is
514 *        obligated to use two of the four instructions to perform a
515 *        long jump.  The other instructions load one register with the
516 *        trap type (a.k.a. vector) and another with the psr.
517 */
[1362b7a]518/* For SPARC V9, we must use 6 of these instructions to perform a long
519 * jump, because the _handler value is now 64-bits. We also need to store
520 * temporary values in the global register set at this trap level. Because
[c86da31c]521 * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
522 * to pass parameters to ISR_Handler.
523 *
524 * The instruction sequence is now more like:
525 *      rdpr %tstate, %g4
526 *      setx _handler, %g2, %g3
527 *      jmp %g3+0
528 *      mov _vector, %g2
529 */
530typedef struct {
531  uint32_t     rdpr_tstate_g4;                  /* rdpr  %tstate, %g4        */
532  uint32_t     sethi_of_hh_handler_to_g2;       /* sethi %hh(_handler), %g2  */
533  uint32_t     or_g2_hm_handler_to_g2;          /* or %l3, %hm(_handler), %g2 */
534  uint32_t     sllx_g2_by_32_to_g2;             /* sllx   %g2, 32, %g2 */
535  uint32_t     sethi_of_handler_to_g3;          /* sethi %hi(_handler), %g3  */
536  uint32_t     or_g3_g2_to_g3;                  /* or     %g3, %g2, %g3 */
537  uint32_t     jmp_to_low_of_handler_plus_g3;   /* jmp   %g3 + %lo(_handler) */
538  uint32_t     mov_vector_g2;                   /* mov   _vector, %g2        */
539} CPU_Trap_table_entry;
[1362b7a]540
[c86da31c]541/*
542 *  This is the set of opcodes for the instructions loaded into a trap
543 *  table entry.  The routine which installs a handler is responsible
544 *  for filling in the fields for the _handler address and the _vector
545 *  trap type.
546 *
547 *  The constants following this structure are masks for the fields which
548 *  must be filled in when the handler is installed.
549 */
[1362b7a]550
[c86da31c]551extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
552
553/*
[1362b7a]554 *  The size of the floating point context area.
[c86da31c]555 */
556
557#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
558
559#endif
560
561/*
562 *  Amount of extra stack (above minimum stack size) required by
563 *  MPCI receive server thread.  Remember that in a multiprocessor
564 *  system this thread must exist and be able to process all directives.
565 */
566
567#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
568
569/*
570 *  This defines the number of entries in the ISR_Vector_table managed
571 *  by the executive.
572 *
573 *  On the SPARC, there are really only 256 vectors.  However, the executive
574 *  has no easy, fast, reliable way to determine which traps are synchronous
575 *  and which are asynchronous.  By default, synchronous traps return to the
576 *  instruction which caused the interrupt.  So if you install a software
577 *  trap handler as an executive interrupt handler (which is desirable since
578 *  RTEMS takes care of window and register issues), then the executive needs
579 *  to know that the return address is to the trap rather than the instruction
580 *  following the trap.
581 *
582 *  So vectors 0 through 255 are treated as regular asynchronous traps which
583 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
584 *  by the executive to be synchronous and to require that the return address
585 *  be fudged.
586 *
587 *  If you use this mechanism to install a trap handler which must reexecute
588 *  the instruction which caused the trap, then it should be installed as
589 *  an asynchronous trap.  This will avoid the executive changing the return
590 *  address.
591 */
[1362b7a]592/* On SPARC v9, there are 512 vectors. The same philosophy applies to
[c86da31c]593 * vector installation and use, we just provide a larger table.
594 */
595#define CPU_INTERRUPT_NUMBER_OF_VECTORS     512
596#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
597
598#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x200
599#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
600#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 512 )
601
602#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 512)
603
604/*
605 *  This is defined if the port has a special way to report the ISR nesting
606 *  level.  Most ports maintain the variable _ISR_Nest_level.
607 */
608
609#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
610
611/*
612 *  Should be large enough to run all tests.  This ensures
613 *  that a "reasonable" small application should not have any problems.
614 *
615 *  This appears to be a fairly generous number for the SPARC since
616 *  represents a call depth of about 20 routines based on the minimum
617 *  stack frame.
618 */
619
620#define CPU_STACK_MINIMUM_SIZE  (1024*8)
621
[f1738ed]622#define CPU_SIZEOF_POINTER 8
623
[c86da31c]624/*
625 *  CPU's worst alignment requirement for data types on a byte boundary.  This
626 *  alignment does not take into account the requirements for the stack.
627 *
628 *  On the SPARC, this is required for double word loads and stores.
629 *
630 *  Note: quad-word loads/stores need alignment of 16, but currently supported
631 *  architectures do not provide HW implemented quad-word operations.
632 */
633
634#define CPU_ALIGNMENT      8
635
636/*
637 *  This number corresponds to the byte alignment requirement for the
638 *  heap handler.  This alignment requirement may be stricter than that
639 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
640 *  common for the heap to follow the same alignment requirement as
641 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
642 *  then this should be set to CPU_ALIGNMENT.
643 *
644 *  NOTE:  This does not have to be a power of 2.  It does have to
645 *         be greater or equal to than CPU_ALIGNMENT.
646 */
647
648#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
649
650/*
651 *  This number corresponds to the byte alignment requirement for the
652 *  stack.  This alignment requirement may be stricter than that for the
653 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
654 *  is strict enough for the stack, then this should be set to 0.
655 *
656 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
657 *
658 *  The alignment restrictions for the SPARC are not that strict but this
659 *  should unsure that the stack is always sufficiently alignment that the
660 *  window overflow, underflow, and flush routines can use double word loads
661 *  and stores.
662 */
663
664#define CPU_STACK_ALIGNMENT        16
665
[c8df844]666#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
667
[c86da31c]668#ifndef ASM
669
670/*
671 *  ISR handler macros
672 */
673
674/*
675 *  Support routine to initialize the RTEMS vector table after it is allocated.
676 */
677
678#define _CPU_Initialize_vectors()
679
680/*
681 *  Disable all interrupts for a critical section.  The previous
682 *  level is returned in _level.
683 */
684
685 #define _CPU_ISR_Disable( _level ) \
686  (_level) = sparc_disable_interrupts()
687
688/*
689 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
690 *  This indicates the end of a critical section.  The parameter
691 *  _level is not modified.
692 */
693
694#define _CPU_ISR_Enable( _level ) \
695  sparc_enable_interrupts( _level )
696
697/*
698 *  This temporarily restores the interrupt to _level before immediately
699 *  disabling them again.  This is used to divide long critical
700 *  sections into two or more parts.  The parameter _level is not
701 *  modified.
702 */
703
704#define _CPU_ISR_Flash( _level ) \
705   sparc_flash_interrupts( _level )
706
[408609f6]707RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
708{
[27eccda]709  return ( level & SPARC_PSTATE_IE_MASK ) != 0;
[408609f6]710}
711
[c86da31c]712/*
713 *  Map interrupt level in task mode onto the hardware that the CPU
714 *  actually provides.  Currently, interrupt levels which do not
[1362b7a]715 *  map onto the CPU in a straight fashion are undefined.
[c86da31c]716 */
717
718#define _CPU_ISR_Set_level( _newlevel ) \
719   sparc_enable_interrupts( _newlevel)
720
721uint32_t   _CPU_ISR_Get_level( void );
[1362b7a]722
[c86da31c]723/* end of ISR handler macros */
724
725/* Context handler macros */
726
727/*
728 *  Initialize the context to a state suitable for starting a
729 *  task after a context restore operation.  Generally, this
730 *  involves:
731 *
732 *     - setting a starting address
733 *     - preparing the stack
734 *     - preparing the stack and frame pointers
735 *     - setting the proper interrupt level in the context
736 *     - initializing the floating point context
737 *
738 *  NOTE:  Implemented as a subroutine for the SPARC port.
739 */
740
741void _CPU_Context_Initialize(
742  Context_Control  *the_context,
743  void         *stack_base,
744  uint32_t          size,
745  uint32_t          new_level,
746  void             *entry_point,
[022851a]747  bool              is_fp,
748  void             *tls_area
[c86da31c]749);
750
751/*
752 *  This macro is invoked from _Thread_Handler to do whatever CPU
753 *  specific magic is required that must be done in the context of
754 *  the thread when it starts.
755 *
756 *  On the SPARC, this is setting the frame pointer so GDB is happy.
757 *  Make GDB stop unwinding at _Thread_Handler, previous register window
758 *  Frame pointer is 0 and calling address must be a function with starting
[1362b7a]759 *  with a SAVE instruction. If return address is leaf-function (no SAVE)
[c86da31c]760 *  GDB will not look at prev reg window fp.
761 *
762 *  _Thread_Handler is known to start with SAVE.
763 */
764
765#define _CPU_Context_Initialization_at_thread_begin() \
766  do { \
[d00ca42]767    __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
[c86da31c]768  } while (0)
769
770/*
771 *  This routine is responsible for somehow restarting the currently
[1362b7a]772 *  executing task.
[c86da31c]773 *
774 *  On the SPARC, this is is relatively painless but requires a small
775 *  amount of wrapper code before using the regular restore code in
776 *  of the context switch.
777 */
778
779#define _CPU_Context_Restart_self( _the_context ) \
780   _CPU_Context_restore( (_the_context) );
781
782/*
783 *  This routine initializes the FP context area passed to it to.
784 *
785 *  The SPARC allows us to use the simple initialization model
[1362b7a]786 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
[c86da31c]787 *  at CPU initialization and it is simply copied into the destination
788 *  context.
789 */
790
791#define _CPU_Context_Initialize_fp( _destination ) \
792  do { \
793   *(*(_destination)) = _CPU_Null_fp_context; \
794  } while (0)
795
796/* end of Context handler macros */
797
798/* Fatal Error manager macros */
799
800/*
801 *  This routine copies _error into a known place -- typically a stack
802 *  location or a register, optionally disables interrupts, and
803 *  halts/stops the CPU.
804 */
805
[f82752a4]806#define _CPU_Fatal_halt( _source, _error ) \
[c86da31c]807  do { \
808    uint32_t   level; \
809    \
810    level = sparc_disable_interrupts(); \
[d00ca42]811    __asm__ volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
[c86da31c]812    while (1); /* loop forever */ \
813  } while (0)
814
815/* end of Fatal Error manager macros */
816
817/* Bitfield handler macros */
818
819/*
820 *  The SPARC port uses the generic C algorithm for bitfield scan if the
821 *  CPU model does not have a scan instruction.
822 */
823
824#if ( SPARC_HAS_BITSCAN == 0 )
825#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
826#else
827#error "scan instruction not currently supported by RTEMS!!"
828#endif
829
830/* end of Bitfield handler macros */
831
832/* Priority handler handler macros */
833
834/*
835 *  The SPARC port uses the generic C algorithm for bitfield scan if the
836 *  CPU model does not have a scan instruction.
837 */
838
839#if ( SPARC_HAS_BITSCAN == 1 )
840#error "scan instruction not currently supported by RTEMS!!"
841#endif
842
843/* end of Priority handler macros */
844
845/* functions */
846
847/*
848 *  _CPU_Initialize
849 *
850 *  This routine performs CPU dependent initialization.
851 */
852
853void _CPU_Initialize(void);
854
855/*
856 *  _CPU_ISR_install_raw_handler
857 *
858 *  This routine installs new_handler to be directly called from the trap
859 *  table.
860 */
[1362b7a]861
[c86da31c]862void _CPU_ISR_install_raw_handler(
863  uint32_t    vector,
864  proc_ptr    new_handler,
865  proc_ptr   *old_handler
866);
867
868/*
869 *  _CPU_ISR_install_vector
870 *
871 *  This routine installs an interrupt vector.
872 */
873
874void _CPU_ISR_install_vector(
875  uint64_t    vector,
876  proc_ptr    new_handler,
877  proc_ptr   *old_handler
878);
879
880#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
[1362b7a]881
[c86da31c]882/*
883 *  _CPU_Thread_Idle_body
884 *
885 *  Some SPARC implementations have low power, sleep, or idle modes.  This
886 *  tries to take advantage of those models.
887 */
[1362b7a]888
[c86da31c]889void *_CPU_Thread_Idle_body( uintptr_t ignored );
890
891#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
892
893/*
894 *  _CPU_Context_switch
895 *
896 *  This routine switches from the run context to the heir context.
897 */
898
899void _CPU_Context_switch(
900  Context_Control  *run,
901  Context_Control  *heir
902);
903
904/*
905 *  _CPU_Context_restore
906 *
907 *  This routine is generally used only to restart self in an
908 *  efficient manner.
909 */
910
911void _CPU_Context_restore(
912  Context_Control *new_context
[143696a]913) RTEMS_NO_RETURN;
[c86da31c]914
915/*
916 *  _CPU_Context_save_fp
917 *
918 *  This routine saves the floating point context passed to it.
919 */
920
921void _CPU_Context_save_fp(
922  Context_Control_fp **fp_context_ptr
923);
924
925/*
926 *  _CPU_Context_restore_fp
927 *
928 *  This routine restores the floating point context passed to it.
929 */
930
931void _CPU_Context_restore_fp(
932  Context_Control_fp **fp_context_ptr
933);
934
[815994f]935/* FIXME */
936typedef CPU_Interrupt_frame CPU_Exception_frame;
937
938void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
939
[c86da31c]940/*
941 *  CPU_swap_u32
942 *
943 *  The following routine swaps the endian format of an unsigned int.
944 *  It must be static because it is referenced indirectly.
945 *
946 *  This version will work on any processor, but if you come across a better
[1362b7a]947 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
[c86da31c]948 *  entity as shown below is not any more efficient on the SPARC.
949 *
950 *     swap least significant two bytes with 16-bit rotate
951 *     swap upper and lower 16-bits
952 *     swap most significant two bytes with 16-bit rotate
953 *
954 *  It is not obvious how the SPARC can do significantly better than the
955 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
956 *  following code at optimization level four (i.e. -O4).
957 */
[1362b7a]958
[c86da31c]959static inline uint32_t CPU_swap_u32(
960  uint32_t value
961)
962{
963  uint32_t   byte1, byte2, byte3, byte4, swapped;
[1362b7a]964
[c86da31c]965  byte4 = (value >> 24) & 0xff;
966  byte3 = (value >> 16) & 0xff;
967  byte2 = (value >> 8)  & 0xff;
968  byte1 =  value        & 0xff;
[1362b7a]969
[c86da31c]970  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
971  return( swapped );
972}
973
974#define CPU_swap_u16( value ) \
975  (((value&0xff) << 8) | ((value >> 8)&0xff))
976
[24bf11e]977typedef uint32_t CPU_Counter_ticks;
978
[65f868c]979uint32_t _CPU_Counter_frequency( void );
980
[24bf11e]981CPU_Counter_ticks _CPU_Counter_read( void );
982
983static inline CPU_Counter_ticks _CPU_Counter_difference(
984  CPU_Counter_ticks second,
985  CPU_Counter_ticks first
986)
987{
988  return second - first;
989}
990
[55e76c2]991/** Type that can store a 32-bit integer or a pointer. */
992typedef uintptr_t CPU_Uint32ptr;
993
[c86da31c]994#endif /* ASM */
995
996#ifdef __cplusplus
997}
998#endif
999
1000#endif
Note: See TracBrowser for help on using the repository browser.