source: rtems/cpukit/score/cpu/sparc64/rtems/score/cpu.h @ 27bfcd8

5
Last change on this file since 27bfcd8 was 27bfcd8, checked in by Sebastian Huber <sebastian.huber@…>, on 01/25/17 at 13:32:02

score: Delete _CPU_Context_Fp_start()

Since the FP area pointer is passed by reference in
_CPU_Context_Initialize_fp() the optional FP area adjustment via
_CPU_Context_Fp_start() is superfluous. It is also wrong with respect
to memory management, e.g. pointer passed to _Workspace_Free() may be
not the one returned by _Workspace_Allocate().

Close #1400.

  • Property mode set to 100644
File size: 29.8 KB
RevLine 
[c86da31c]1/**
[1362b7a]2 * @file
3 *
4 * @brief SPARC64 CPU Department Source
5 *
6 * This include file contains information pertaining to the port of
7 * the executive to the SPARC64 processor.
[c86da31c]8 */
9
10/*
[1362b7a]11 *
[c86da31c]12 *
[71d97c9]13 *  COPYRIGHT (c) 1989-2006. On-Line Applications Research Corporation (OAR).
[c86da31c]14 *
[1362b7a]15 *  This file is based on the SPARC cpu.h file. Modifications are made
[c86da31c]16 *  to support the SPARC64 processor.
[71d97c9]17 *  COPYRIGHT (c) 2010. Gedare Bloom.
[c86da31c]18 *
19 *  The license and distribution terms for this file may be
20 *  found in the file LICENSE in this distribution or at
[c499856]21 *  http://www.rtems.org/license/LICENSE.
[c86da31c]22 */
23
24#ifndef _RTEMS_SCORE_CPU_H
25#define _RTEMS_SCORE_CPU_H
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31#include <rtems/score/types.h>
[89b85e51]32#include <rtems/score/sparc64.h>
[c86da31c]33
34/* conditional compilation parameters */
35
36/*
37 *  Does the executive manage a dedicated interrupt stack in software?
38 *
39 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
40 *  If FALSE, nothing is done.
41 *
42 *  The SPARC does not have a dedicated HW interrupt stack and one has
43 *  been implemented in SW.
44 */
45
46#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
47
48/*
49 *  Does the CPU follow the simple vectored interrupt model?
50 *
51 *  If TRUE, then RTEMS allocates the vector table it internally manages.
52 *  If FALSE, then the BSP is assumed to allocate and manage the vector
53 *  table
54 *
55 *  SPARC Specific Information:
56 *
57 *  XXX document implementation including references if appropriate
58 */
59#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
60
61/*
62 *  Does this CPU have hardware support for a dedicated interrupt stack?
63 *
64 *  If TRUE, then it must be installed during initialization.
65 *  If FALSE, then no installation is performed.
66 *
67 *  The SPARC does not have a dedicated HW interrupt stack.
68 */
69
70#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
71
72/*
73 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
74 *
75 *  If TRUE, then the memory is allocated during initialization.
76 *  If FALSE, then the memory is allocated during initialization.
77 */
78
79#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
80
81/*
82 *  Does the RTEMS invoke the user's ISR with the vector number and
[1362b7a]83 *  a pointer to the saved interrupt frame (1) or just the vector
[c86da31c]84 *  number (0)?
85 */
86
[141e16d]87#define CPU_ISR_PASSES_FRAME_POINTER FALSE
[c86da31c]88
89/*
90 *  Does the CPU have hardware floating point?
91 *
92 *  If TRUE, then the FLOATING_POINT task attribute is supported.
93 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
94 */
95
96#if ( SPARC_HAS_FPU == 1 )
97#define CPU_HARDWARE_FP     TRUE
98#else
99#define CPU_HARDWARE_FP     FALSE
100#endif
101#define CPU_SOFTWARE_FP     FALSE
102
103/*
104 *  Are all tasks FLOATING_POINT tasks implicitly?
105 *
106 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
107 *  If FALSE, then the FLOATING_POINT task attribute is followed.
108 */
109
110#define CPU_ALL_TASKS_ARE_FP     FALSE
111
112/*
113 *  Should the IDLE task have a floating point context?
114 *
115 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
116 *  and it has a floating point context which is switched in and out.
117 *  If FALSE, then the IDLE task does not have a floating point context.
118 */
119
120#define CPU_IDLE_TASK_IS_FP      FALSE
121
122/*
123 *  Should the saving of the floating point registers be deferred
124 *  until a context switch is made to another different floating point
125 *  task?
126 *
127 *  If TRUE, then the floating point context will not be stored until
128 *  necessary.  It will remain in the floating point registers and not
129 *  disturned until another floating point task is switched to.
130 *
131 *  If FALSE, then the floating point context is saved when a floating
132 *  point task is switched out and restored when the next floating point
133 *  task is restored.  The state of the floating point registers between
134 *  those two operations is not specified.
135 */
136
137#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
138
[84e6f15]139#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
140
[c86da31c]141/*
142 *  Does this port provide a CPU dependent IDLE task implementation?
143 *
144 *  If TRUE, then the routine _CPU_Thread_Idle_body
145 *  must be provided and is the default IDLE thread body instead of
146 *  _CPU_Thread_Idle_body.
147 *
148 *  If FALSE, then use the generic IDLE thread body if the BSP does
149 *  not provide one.
150 */
151
152#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
153
154/*
155 *  Does the stack grow up (toward higher addresses) or down
156 *  (toward lower addresses)?
157 *
158 *  If TRUE, then the grows upward.
159 *  If FALSE, then the grows toward smaller addresses.
160 *
161 *  The stack grows to lower addresses on the SPARC.
162 */
163
164#define CPU_STACK_GROWS_UP               FALSE
165
[a8865f8]166/* FIXME: Is this the right value? */
167#define CPU_CACHE_LINE_BYTES 32
168
[c86da31c]169/*
170 *  The following is the variable attribute used to force alignment
171 *  of critical data structures.  On some processors it may make
172 *  sense to have these aligned on tighter boundaries than
173 *  the minimum requirements of the compiler in order to have as
174 *  much of the critical data area as possible in a cache line.
175 *
[1362b7a]176 *  The SPARC does not appear to have particularly strict alignment
[c86da31c]177 *  requirements.  This value (16) was chosen to take advantages of caches.
178 *
[1362b7a]179 *  SPARC 64 requirements on floating point alignment is at least 8,
[c86da31c]180 *  and is 16 if quad-word fp instructions are available (e.g. LDQF).
181 */
182
[a8865f8]183#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( 16 )
[c86da31c]184
185/*
186 *  The following defines the number of bits actually used in the
187 *  interrupt field of the task mode.  How those bits map to the
188 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
189 *
190 *  The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
191 */
192
193#define CPU_MODES_INTERRUPT_MASK   0x0000000F
194
[decff899]195#define CPU_MAXIMUM_PROCESSORS 32
196
[c86da31c]197/*
[1362b7a]198 *  This structure represents the organization of the minimum stack frame
[c86da31c]199 *  for the SPARC.  More framing information is required in certain situaions
200 *  such as when there are a large number of out parameters or when the callee
201 *  must save floating point registers.
202 */
203
204#ifndef ASM
205
206typedef struct {
207  uint64_t    l0;
208  uint64_t    l1;
209  uint64_t    l2;
210  uint64_t    l3;
211  uint64_t    l4;
212  uint64_t    l5;
213  uint64_t    l6;
214  uint64_t    l7;
215  uint64_t    i0;
216  uint64_t    i1;
217  uint64_t    i2;
218  uint64_t    i3;
219  uint64_t    i4;
220  uint64_t    i5;
221  uint64_t    i6_fp;
222  uint64_t    i7;
223  void       *structure_return_address;
224  /*
225   *  The following are for the callee to save the register arguments in
226   *  should this be necessary.
227   */
228  uint64_t    saved_arg0;
229  uint64_t    saved_arg1;
230  uint64_t    saved_arg2;
231  uint64_t    saved_arg3;
232  uint64_t    saved_arg4;
233  uint64_t    saved_arg5;
234  uint64_t    pad0;
[d18560a]235} SPARC64_Minimum_stack_frame;
[c86da31c]236
237#endif /* !ASM */
238
239#define CPU_STACK_FRAME_L0_OFFSET             0x00
240#define CPU_STACK_FRAME_L1_OFFSET             0x08
241#define CPU_STACK_FRAME_L2_OFFSET             0x10
242#define CPU_STACK_FRAME_L3_OFFSET             0x18
243#define CPU_STACK_FRAME_L4_OFFSET             0x20
244#define CPU_STACK_FRAME_L5_OFFSET             0x28
245#define CPU_STACK_FRAME_L6_OFFSET             0x30
246#define CPU_STACK_FRAME_L7_OFFSET             0x38
247#define CPU_STACK_FRAME_I0_OFFSET             0x40
248#define CPU_STACK_FRAME_I1_OFFSET             0x48
249#define CPU_STACK_FRAME_I2_OFFSET             0x50
250#define CPU_STACK_FRAME_I3_OFFSET             0x58
251#define CPU_STACK_FRAME_I4_OFFSET             0x60
252#define CPU_STACK_FRAME_I5_OFFSET             0x68
253#define CPU_STACK_FRAME_I6_FP_OFFSET          0x70
254#define CPU_STACK_FRAME_I7_OFFSET             0x78
255#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x80
256#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x88
257#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x90
258#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x98
259#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0xA0
260#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0xA8
261#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0xB0
262#define CPU_STACK_FRAME_PAD0_OFFSET           0xB8
263
[d18560a]264#define SPARC64_MINIMUM_STACK_FRAME_SIZE          0xC0
[c86da31c]265
266/*
267 * Contexts
268 *
269 *  Generally there are 2 types of context to save.
270 *     1. Interrupt registers to save
271 *     2. Task level registers to save
272 *
273 *  This means we have the following 3 context items:
274 *     1. task level context stuff::  Context_Control
275 *     2. floating point task stuff:: Context_Control_fp
276 *     3. special interrupt level context :: Context_Control_interrupt
277 *
278 *  On the SPARC, we are relatively conservative in that we save most
279 *  of the CPU state in the context area.  The ET (enable trap) bit and
280 *  the CWP (current window pointer) fields of the PSR are considered
281 *  system wide resources and are not maintained on a per-thread basis.
282 */
283
284#ifndef ASM
285
286typedef struct {
287    uint64_t   g1;
288    uint64_t   g2;
289    uint64_t   g3;
290    uint64_t   g4;
291    uint64_t   g5;
292    uint64_t   g6;
293    uint64_t   g7;
294
295    uint64_t   l0;
296    uint64_t   l1;
297    uint64_t   l2;
298    uint64_t   l3;
299    uint64_t   l4;
300    uint64_t   l5;
301    uint64_t   l6;
302    uint64_t   l7;
303
304    uint64_t   i0;
305    uint64_t   i1;
306    uint64_t   i2;
307    uint64_t   i3;
308    uint64_t   i4;
309    uint64_t   i5;
310    uint64_t   i6_fp;
311    uint64_t   i7;
312
313    uint64_t   o0;
314    uint64_t   o1;
315    uint64_t   o2;
316    uint64_t   o3;
317    uint64_t   o4;
318    uint64_t   o5;
319    uint64_t   o6_sp;
320    uint64_t   o7;
321
322    uint32_t   isr_dispatch_disable;
323    uint32_t   pad;
324} Context_Control;
325
326#define _CPU_Context_Get_SP( _context ) \
327  (_context)->o6_sp
328
329#endif /* ASM */
330
331/*
332 *  Offsets of fields with Context_Control for assembly routines.
333 */
334
335#define G1_OFFSET    0x00
336#define G2_OFFSET    0x08
337#define G3_OFFSET    0x10
338#define G4_OFFSET    0x18
339#define G5_OFFSET    0x20
340#define G6_OFFSET    0x28
341#define G7_OFFSET    0x30
342
343#define L0_OFFSET    0x38
344#define L1_OFFSET    0x40
345#define L2_OFFSET    0x48
346#define L3_OFFSET    0x50
347#define L4_OFFSET    0x58
348#define L5_OFFSET    0x60
349#define L6_OFFSET    0x68
350#define L7_OFFSET    0x70
351
352#define I0_OFFSET    0x78
353#define I1_OFFSET    0x80
354#define I2_OFFSET    0x88
355#define I3_OFFSET    0x90
356#define I4_OFFSET    0x98
357#define I5_OFFSET    0xA0
358#define I6_FP_OFFSET    0xA8
359#define I7_OFFSET 0xB0
360
361#define O0_OFFSET    0xB8
362#define O1_OFFSET    0xC0
363#define O2_OFFSET    0xC8
364#define O3_OFFSET    0xD0
365#define O4_OFFSET    0xD8
366#define O5_OFFSET    0xE0
367#define O6_SP_OFFSET    0xE8
368#define O7_OFFSET 0xF0
369
370#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
371#define ISR_PAD_OFFSET 0xFC
372
373/*
374 *  The floating point context area.
375 */
376
377#ifndef ASM
378
379typedef struct {
380    double      f0;     /* f0-f1 */
381    double      f2;     /* f2-f3 */
382    double      f4;     /* f4-f5 */
383    double      f6;     /* f6-f7 */
384    double      f8;     /* f8-f9 */
385    double      f10;    /* f10-f11 */
386    double      f12;    /* f12-f13 */
387    double      f14;    /* f14-f15 */
388    double      f16;    /* f16-f17 */
389    double      f18;    /* f18-f19 */
390    double      f20;    /* f20-f21 */
391    double      f22;    /* f22-f23 */
392    double      f24;    /* f24-f25 */
393    double      f26;    /* f26-f27 */
394    double      f28;    /* f28-f29 */
395    double      f30;    /* f30-f31 */
396    double      f32;
397    double      f34;
398    double      f36;
399    double      f38;
400    double      f40;
401    double      f42;
402    double      f44;
403    double      f46;
404    double      f48;
405    double      f50;
406    double      f52;
407    double      f54;
408    double      f56;
409    double      f58;
410    double      f60;
411    double      f62;
412    uint64_t    fsr;
413} Context_Control_fp;
414
415#endif /* !ASM */
416
417/*
418 *  Offsets of fields with Context_Control_fp for assembly routines.
419 */
420
421#define FO_OFFSET    0x00
422#define F2_OFFSET    0x08
423#define F4_OFFSET    0x10
424#define F6_OFFSET    0x18
425#define F8_OFFSET    0x20
426#define F1O_OFFSET   0x28
427#define F12_OFFSET   0x30
428#define F14_OFFSET   0x38
429#define F16_OFFSET   0x40
430#define F18_OFFSET   0x48
431#define F2O_OFFSET   0x50
432#define F22_OFFSET   0x58
433#define F24_OFFSET   0x60
434#define F26_OFFSET   0x68
435#define F28_OFFSET   0x70
436#define F3O_OFFSET   0x78
437#define F32_OFFSET   0x80
438#define F34_OFFSET   0x88
439#define F36_OFFSET   0x90
440#define F38_OFFSET   0x98
441#define F4O_OFFSET   0xA0
442#define F42_OFFSET   0xA8
443#define F44_OFFSET   0xB0
444#define F46_OFFSET   0xB8
445#define F48_OFFSET   0xC0
446#define F5O_OFFSET   0xC8
447#define F52_OFFSET   0xD0
448#define F54_OFFSET   0xD8
449#define F56_OFFSET   0xE0
450#define F58_OFFSET   0xE8
451#define F6O_OFFSET   0xF0
452#define F62_OFFSET   0xF8
453#define FSR_OFFSET   0x100
454
455#define CONTEXT_CONTROL_FP_SIZE 0x108
456
457#ifndef ASM
458
459/*
460 *  Context saved on stack for an interrupt.
461 *
462 *  NOTE:  The tstate, tpc, and tnpc are saved in this structure
463 *         to allow resetting the TL while still being able to return
464 *         from a trap later.  The PIL is saved because
[1362b7a]465 *         if this is an external interrupt, we will mask lower
[c86da31c]466 *         priority interrupts until finishing. Even though the y register
467 *         is deprecated, gcc still uses it.
468 */
469
470typedef struct {
[d18560a]471  SPARC64_Minimum_stack_frame Stack_frame;
[c86da31c]472  uint64_t                 tstate;
473  uint64_t                 tpc;
474  uint64_t                 tnpc;
475  uint64_t                 pil;
476  uint64_t                 y;
477  uint64_t                 g1;
478  uint64_t                 g2;
479  uint64_t                 g3;
480  uint64_t                 g4;
481  uint64_t                 g5;
482  uint64_t                 g6;
483  uint64_t                 g7;
484  uint64_t                 o0;
485  uint64_t                 o1;
486  uint64_t                 o2;
487  uint64_t                 o3;
488  uint64_t                 o4;
489  uint64_t                 o5;
490  uint64_t                 o6_sp;
491  uint64_t                 o7;
[67baf60]492  uint64_t                 tvec;
[c86da31c]493} CPU_Interrupt_frame;
494
495#endif /* ASM */
496
497/*
498 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
499 */
500
[d18560a]501#define ISF_TSTATE_OFFSET      SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x00
502#define ISF_TPC_OFFSET         SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x08
503#define ISF_TNPC_OFFSET        SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x10
504#define ISF_PIL_OFFSET         SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x18
505#define ISF_Y_OFFSET           SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x20
506#define ISF_G1_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x28
507#define ISF_G2_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x30
508#define ISF_G3_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x38
509#define ISF_G4_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x40
510#define ISF_G5_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x48
511#define ISF_G6_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x50
512#define ISF_G7_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x58
513#define ISF_O0_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x60
514#define ISF_O1_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x68
515#define ISF_O2_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x70
516#define ISF_O3_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x78
517#define ISF_O4_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x80
518#define ISF_O5_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x88
519#define ISF_O6_SP_OFFSET       SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x90
520#define ISF_O7_OFFSET          SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x98
521#define ISF_TVEC_OFFSET        SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA0
522
523#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA8
[c86da31c]524#ifndef ASM
525/*
526 *  This variable is contains the initialize context for the FP unit.
[1362b7a]527 *  It is filled in by _CPU_Initialize and copied into the task's FP
[c86da31c]528 *  context area during _CPU_Context_Initialize.
529 */
530
[51dc9a61]531extern Context_Control_fp _CPU_Null_fp_context;
[c86da31c]532
533/*
534 *  This flag is context switched with each thread.  It indicates
535 *  that THIS thread has an _ISR_Dispatch stack frame on its stack.
536 *  By using this flag, we can avoid nesting more interrupt dispatching
537 *  attempts on a previously interrupted thread's stack.
538 */
539
[51dc9a61]540extern volatile uint32_t _CPU_ISR_Dispatch_disable;
[c86da31c]541
542/*
543 *  The following type defines an entry in the SPARC's trap table.
544 *
545 *  NOTE: The instructions chosen are RTEMS dependent although one is
546 *        obligated to use two of the four instructions to perform a
547 *        long jump.  The other instructions load one register with the
548 *        trap type (a.k.a. vector) and another with the psr.
549 */
[1362b7a]550/* For SPARC V9, we must use 6 of these instructions to perform a long
551 * jump, because the _handler value is now 64-bits. We also need to store
552 * temporary values in the global register set at this trap level. Because
[c86da31c]553 * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
554 * to pass parameters to ISR_Handler.
555 *
556 * The instruction sequence is now more like:
557 *      rdpr %tstate, %g4
558 *      setx _handler, %g2, %g3
559 *      jmp %g3+0
560 *      mov _vector, %g2
561 */
562typedef struct {
563  uint32_t     rdpr_tstate_g4;                  /* rdpr  %tstate, %g4        */
564  uint32_t     sethi_of_hh_handler_to_g2;       /* sethi %hh(_handler), %g2  */
565  uint32_t     or_g2_hm_handler_to_g2;          /* or %l3, %hm(_handler), %g2 */
566  uint32_t     sllx_g2_by_32_to_g2;             /* sllx   %g2, 32, %g2 */
567  uint32_t     sethi_of_handler_to_g3;          /* sethi %hi(_handler), %g3  */
568  uint32_t     or_g3_g2_to_g3;                  /* or     %g3, %g2, %g3 */
569  uint32_t     jmp_to_low_of_handler_plus_g3;   /* jmp   %g3 + %lo(_handler) */
570  uint32_t     mov_vector_g2;                   /* mov   _vector, %g2        */
571} CPU_Trap_table_entry;
[1362b7a]572
[c86da31c]573/*
574 *  This is the set of opcodes for the instructions loaded into a trap
575 *  table entry.  The routine which installs a handler is responsible
576 *  for filling in the fields for the _handler address and the _vector
577 *  trap type.
578 *
579 *  The constants following this structure are masks for the fields which
580 *  must be filled in when the handler is installed.
581 */
[1362b7a]582
[c86da31c]583extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
584
585/*
[1362b7a]586 *  The size of the floating point context area.
[c86da31c]587 */
588
589#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
590
591#endif
592
593/*
594 *  Amount of extra stack (above minimum stack size) required by
595 *  MPCI receive server thread.  Remember that in a multiprocessor
596 *  system this thread must exist and be able to process all directives.
597 */
598
599#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
600
601/*
602 *  This defines the number of entries in the ISR_Vector_table managed
603 *  by the executive.
604 *
605 *  On the SPARC, there are really only 256 vectors.  However, the executive
606 *  has no easy, fast, reliable way to determine which traps are synchronous
607 *  and which are asynchronous.  By default, synchronous traps return to the
608 *  instruction which caused the interrupt.  So if you install a software
609 *  trap handler as an executive interrupt handler (which is desirable since
610 *  RTEMS takes care of window and register issues), then the executive needs
611 *  to know that the return address is to the trap rather than the instruction
612 *  following the trap.
613 *
614 *  So vectors 0 through 255 are treated as regular asynchronous traps which
615 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
616 *  by the executive to be synchronous and to require that the return address
617 *  be fudged.
618 *
619 *  If you use this mechanism to install a trap handler which must reexecute
620 *  the instruction which caused the trap, then it should be installed as
621 *  an asynchronous trap.  This will avoid the executive changing the return
622 *  address.
623 */
[1362b7a]624/* On SPARC v9, there are 512 vectors. The same philosophy applies to
[c86da31c]625 * vector installation and use, we just provide a larger table.
626 */
627#define CPU_INTERRUPT_NUMBER_OF_VECTORS     512
628#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
629
630#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x200
631#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
632#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 512 )
633
634#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 512)
635
636/*
637 *  This is defined if the port has a special way to report the ISR nesting
638 *  level.  Most ports maintain the variable _ISR_Nest_level.
639 */
640
641#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
642
643/*
644 *  Should be large enough to run all tests.  This ensures
645 *  that a "reasonable" small application should not have any problems.
646 *
647 *  This appears to be a fairly generous number for the SPARC since
648 *  represents a call depth of about 20 routines based on the minimum
649 *  stack frame.
650 */
651
652#define CPU_STACK_MINIMUM_SIZE  (1024*8)
653
[f1738ed]654#define CPU_SIZEOF_POINTER 8
655
[c86da31c]656/*
657 *  CPU's worst alignment requirement for data types on a byte boundary.  This
658 *  alignment does not take into account the requirements for the stack.
659 *
660 *  On the SPARC, this is required for double word loads and stores.
661 *
662 *  Note: quad-word loads/stores need alignment of 16, but currently supported
663 *  architectures do not provide HW implemented quad-word operations.
664 */
665
666#define CPU_ALIGNMENT      8
667
668/*
669 *  This number corresponds to the byte alignment requirement for the
670 *  heap handler.  This alignment requirement may be stricter than that
671 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
672 *  common for the heap to follow the same alignment requirement as
673 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
674 *  then this should be set to CPU_ALIGNMENT.
675 *
676 *  NOTE:  This does not have to be a power of 2.  It does have to
677 *         be greater or equal to than CPU_ALIGNMENT.
678 */
679
680#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
681
682/*
683 *  This number corresponds to the byte alignment requirement for memory
684 *  buffers allocated by the partition manager.  This alignment requirement
685 *  may be stricter than that for the data types alignment specified by
686 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
687 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
688 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
689 *
690 *  NOTE:  This does not have to be a power of 2.  It does have to
691 *         be greater or equal to than CPU_ALIGNMENT.
692 */
693
694#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
695
696/*
697 *  This number corresponds to the byte alignment requirement for the
698 *  stack.  This alignment requirement may be stricter than that for the
699 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
700 *  is strict enough for the stack, then this should be set to 0.
701 *
702 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
703 *
704 *  The alignment restrictions for the SPARC are not that strict but this
705 *  should unsure that the stack is always sufficiently alignment that the
706 *  window overflow, underflow, and flush routines can use double word loads
707 *  and stores.
708 */
709
710#define CPU_STACK_ALIGNMENT        16
711
712#ifndef ASM
713
714/*
715 *  ISR handler macros
716 */
717
718/*
719 *  Support routine to initialize the RTEMS vector table after it is allocated.
720 */
721
722#define _CPU_Initialize_vectors()
723
724/*
725 *  Disable all interrupts for a critical section.  The previous
726 *  level is returned in _level.
727 */
728
729 #define _CPU_ISR_Disable( _level ) \
730  (_level) = sparc_disable_interrupts()
731
732/*
733 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
734 *  This indicates the end of a critical section.  The parameter
735 *  _level is not modified.
736 */
737
738#define _CPU_ISR_Enable( _level ) \
739  sparc_enable_interrupts( _level )
740
741/*
742 *  This temporarily restores the interrupt to _level before immediately
743 *  disabling them again.  This is used to divide long critical
744 *  sections into two or more parts.  The parameter _level is not
745 *  modified.
746 */
747
748#define _CPU_ISR_Flash( _level ) \
749   sparc_flash_interrupts( _level )
750
[408609f6]751RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
752{
[27eccda]753  return ( level & SPARC_PSTATE_IE_MASK ) != 0;
[408609f6]754}
755
[c86da31c]756/*
757 *  Map interrupt level in task mode onto the hardware that the CPU
758 *  actually provides.  Currently, interrupt levels which do not
[1362b7a]759 *  map onto the CPU in a straight fashion are undefined.
[c86da31c]760 */
761
762#define _CPU_ISR_Set_level( _newlevel ) \
763   sparc_enable_interrupts( _newlevel)
764
765uint32_t   _CPU_ISR_Get_level( void );
[1362b7a]766
[c86da31c]767/* end of ISR handler macros */
768
769/* Context handler macros */
770
771/*
772 *  Initialize the context to a state suitable for starting a
773 *  task after a context restore operation.  Generally, this
774 *  involves:
775 *
776 *     - setting a starting address
777 *     - preparing the stack
778 *     - preparing the stack and frame pointers
779 *     - setting the proper interrupt level in the context
780 *     - initializing the floating point context
781 *
782 *  NOTE:  Implemented as a subroutine for the SPARC port.
783 */
784
785void _CPU_Context_Initialize(
786  Context_Control  *the_context,
787  void         *stack_base,
788  uint32_t          size,
789  uint32_t          new_level,
790  void             *entry_point,
[022851a]791  bool              is_fp,
792  void             *tls_area
[c86da31c]793);
794
795/*
796 *  This macro is invoked from _Thread_Handler to do whatever CPU
797 *  specific magic is required that must be done in the context of
798 *  the thread when it starts.
799 *
800 *  On the SPARC, this is setting the frame pointer so GDB is happy.
801 *  Make GDB stop unwinding at _Thread_Handler, previous register window
802 *  Frame pointer is 0 and calling address must be a function with starting
[1362b7a]803 *  with a SAVE instruction. If return address is leaf-function (no SAVE)
[c86da31c]804 *  GDB will not look at prev reg window fp.
805 *
806 *  _Thread_Handler is known to start with SAVE.
807 */
808
809#define _CPU_Context_Initialization_at_thread_begin() \
810  do { \
[d00ca42]811    __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
[c86da31c]812  } while (0)
813
814/*
815 *  This routine is responsible for somehow restarting the currently
[1362b7a]816 *  executing task.
[c86da31c]817 *
818 *  On the SPARC, this is is relatively painless but requires a small
819 *  amount of wrapper code before using the regular restore code in
820 *  of the context switch.
821 */
822
823#define _CPU_Context_Restart_self( _the_context ) \
824   _CPU_Context_restore( (_the_context) );
825
826/*
827 *  This routine initializes the FP context area passed to it to.
828 *
829 *  The SPARC allows us to use the simple initialization model
[1362b7a]830 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
[c86da31c]831 *  at CPU initialization and it is simply copied into the destination
832 *  context.
833 */
834
835#define _CPU_Context_Initialize_fp( _destination ) \
836  do { \
837   *(*(_destination)) = _CPU_Null_fp_context; \
838  } while (0)
839
840/* end of Context handler macros */
841
842/* Fatal Error manager macros */
843
844/*
845 *  This routine copies _error into a known place -- typically a stack
846 *  location or a register, optionally disables interrupts, and
847 *  halts/stops the CPU.
848 */
849
[f82752a4]850#define _CPU_Fatal_halt( _source, _error ) \
[c86da31c]851  do { \
852    uint32_t   level; \
853    \
854    level = sparc_disable_interrupts(); \
[d00ca42]855    __asm__ volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
[c86da31c]856    while (1); /* loop forever */ \
857  } while (0)
858
859/* end of Fatal Error manager macros */
860
861/* Bitfield handler macros */
862
863/*
864 *  The SPARC port uses the generic C algorithm for bitfield scan if the
865 *  CPU model does not have a scan instruction.
866 */
867
868#if ( SPARC_HAS_BITSCAN == 0 )
869#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
870#else
871#error "scan instruction not currently supported by RTEMS!!"
872#endif
873
874/* end of Bitfield handler macros */
875
876/* Priority handler handler macros */
877
878/*
879 *  The SPARC port uses the generic C algorithm for bitfield scan if the
880 *  CPU model does not have a scan instruction.
881 */
882
883#if ( SPARC_HAS_BITSCAN == 1 )
884#error "scan instruction not currently supported by RTEMS!!"
885#endif
886
887/* end of Priority handler macros */
888
889/* functions */
890
891/*
892 *  _CPU_Initialize
893 *
894 *  This routine performs CPU dependent initialization.
895 */
896
897void _CPU_Initialize(void);
898
899/*
900 *  _CPU_ISR_install_raw_handler
901 *
902 *  This routine installs new_handler to be directly called from the trap
903 *  table.
904 */
[1362b7a]905
[c86da31c]906void _CPU_ISR_install_raw_handler(
907  uint32_t    vector,
908  proc_ptr    new_handler,
909  proc_ptr   *old_handler
910);
911
912/*
913 *  _CPU_ISR_install_vector
914 *
915 *  This routine installs an interrupt vector.
916 */
917
918void _CPU_ISR_install_vector(
919  uint64_t    vector,
920  proc_ptr    new_handler,
921  proc_ptr   *old_handler
922);
923
924#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
[1362b7a]925
[c86da31c]926/*
927 *  _CPU_Thread_Idle_body
928 *
929 *  Some SPARC implementations have low power, sleep, or idle modes.  This
930 *  tries to take advantage of those models.
931 */
[1362b7a]932
[c86da31c]933void *_CPU_Thread_Idle_body( uintptr_t ignored );
934
935#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
936
937/*
938 *  _CPU_Context_switch
939 *
940 *  This routine switches from the run context to the heir context.
941 */
942
943void _CPU_Context_switch(
944  Context_Control  *run,
945  Context_Control  *heir
946);
947
948/*
949 *  _CPU_Context_restore
950 *
951 *  This routine is generally used only to restart self in an
952 *  efficient manner.
953 */
954
955void _CPU_Context_restore(
956  Context_Control *new_context
[143696a]957) RTEMS_NO_RETURN;
[c86da31c]958
959/*
960 *  _CPU_Context_save_fp
961 *
962 *  This routine saves the floating point context passed to it.
963 */
964
965void _CPU_Context_save_fp(
966  Context_Control_fp **fp_context_ptr
967);
968
969/*
970 *  _CPU_Context_restore_fp
971 *
972 *  This routine restores the floating point context passed to it.
973 */
974
975void _CPU_Context_restore_fp(
976  Context_Control_fp **fp_context_ptr
977);
978
[39993d6]979static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
980{
981  /* TODO */
982}
983
984static inline void _CPU_Context_validate( uintptr_t pattern )
985{
986  while (1) {
987    /* TODO */
988  }
989}
990
[815994f]991/* FIXME */
992typedef CPU_Interrupt_frame CPU_Exception_frame;
993
994void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
995
[c86da31c]996/*
997 *  CPU_swap_u32
998 *
999 *  The following routine swaps the endian format of an unsigned int.
1000 *  It must be static because it is referenced indirectly.
1001 *
1002 *  This version will work on any processor, but if you come across a better
[1362b7a]1003 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
[c86da31c]1004 *  entity as shown below is not any more efficient on the SPARC.
1005 *
1006 *     swap least significant two bytes with 16-bit rotate
1007 *     swap upper and lower 16-bits
1008 *     swap most significant two bytes with 16-bit rotate
1009 *
1010 *  It is not obvious how the SPARC can do significantly better than the
1011 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
1012 *  following code at optimization level four (i.e. -O4).
1013 */
[1362b7a]1014
[c86da31c]1015static inline uint32_t CPU_swap_u32(
1016  uint32_t value
1017)
1018{
1019  uint32_t   byte1, byte2, byte3, byte4, swapped;
[1362b7a]1020
[c86da31c]1021  byte4 = (value >> 24) & 0xff;
1022  byte3 = (value >> 16) & 0xff;
1023  byte2 = (value >> 8)  & 0xff;
1024  byte1 =  value        & 0xff;
[1362b7a]1025
[c86da31c]1026  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1027  return( swapped );
1028}
1029
1030#define CPU_swap_u16( value ) \
1031  (((value&0xff) << 8) | ((value >> 8)&0xff))
1032
[24bf11e]1033typedef uint32_t CPU_Counter_ticks;
1034
1035CPU_Counter_ticks _CPU_Counter_read( void );
1036
1037static inline CPU_Counter_ticks _CPU_Counter_difference(
1038  CPU_Counter_ticks second,
1039  CPU_Counter_ticks first
1040)
1041{
1042  return second - first;
1043}
1044
[c86da31c]1045#endif /* ASM */
1046
1047#ifdef __cplusplus
1048}
1049#endif
1050
1051#endif
Note: See TracBrowser for help on using the repository browser.