source: rtems/cpukit/score/cpu/sparc64/rtems/score/cpu.h @ 24bf11e

4.115
Last change on this file since 24bf11e was 24bf11e, checked in by Sebastian Huber <sebastian.huber@…>, on 02/12/14 at 09:31:38

score: Add CPU counter support

Add a CPU counter interface to allow access to a free-running counter.
It is useful to measure short time intervals. This can be used for
example to enable profiling of critical low-level functions.

Add two busy wait functions rtems_counter_delay_ticks() and
rtems_counter_delay_nanoseconds() implemented via the CPU counter.

  • Property mode set to 100644
File size: 31.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief SPARC64 CPU Department Source
5 *
6 * This include file contains information pertaining to the port of
7 * the executive to the SPARC64 processor.
8 */
9
10/*
11 *
12 *
13 *  COPYRIGHT (c) 1989-2006.
14 *  On-Line Applications Research Corporation (OAR).
15 *
16 *  This file is based on the SPARC cpu.h file. Modifications are made
17 *  to support the SPARC64 processor.
18 *    COPYRIGHT (c) 2010. Gedare Bloom.
19 *
20 *  The license and distribution terms for this file may be
21 *  found in the file LICENSE in this distribution or at
22 *  http://www.rtems.com/license/LICENSE.
23 */
24
25#ifndef _RTEMS_SCORE_CPU_H
26#define _RTEMS_SCORE_CPU_H
27
28#ifdef __cplusplus
29extern "C" {
30#endif
31
32#include <rtems/score/types.h>
33#include <rtems/score/sparc64.h>
34
35/* conditional compilation parameters */
36
37/*
38 *  Should the calls to _Thread_Enable_dispatch be inlined?
39 *
40 *  If TRUE, then they are inlined.
41 *  If FALSE, then a subroutine call is made.
42 */
43
44#define CPU_INLINE_ENABLE_DISPATCH       TRUE
45
46/*
47 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
48 *  be unrolled one time?  In unrolled each iteration of the loop examines
49 *  two "nodes" on the chain being searched.  Otherwise, only one node
50 *  is examined per iteration.
51 *
52 *  If TRUE, then the loops are unrolled.
53 *  If FALSE, then the loops are not unrolled.
54 *
55 *  This parameter could go either way on the SPARC.  The interrupt flash
56 *  code is relatively lengthy given the requirements for nops following
57 *  writes to the psr.  But if the clock speed were high enough, this would
58 *  not represent a great deal of time.
59 */
60
61#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
62
63/*
64 *  Does the executive manage a dedicated interrupt stack in software?
65 *
66 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
67 *  If FALSE, nothing is done.
68 *
69 *  The SPARC does not have a dedicated HW interrupt stack and one has
70 *  been implemented in SW.
71 */
72
73#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
74
75/*
76 *  Does the CPU follow the simple vectored interrupt model?
77 *
78 *  If TRUE, then RTEMS allocates the vector table it internally manages.
79 *  If FALSE, then the BSP is assumed to allocate and manage the vector
80 *  table
81 *
82 *  SPARC Specific Information:
83 *
84 *  XXX document implementation including references if appropriate
85 */
86#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
87
88/*
89 *  Does this CPU have hardware support for a dedicated interrupt stack?
90 *
91 *  If TRUE, then it must be installed during initialization.
92 *  If FALSE, then no installation is performed.
93 *
94 *  The SPARC does not have a dedicated HW interrupt stack.
95 */
96
97#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
98
99/*
100 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
101 *
102 *  If TRUE, then the memory is allocated during initialization.
103 *  If FALSE, then the memory is allocated during initialization.
104 */
105
106#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
107
108/*
109 *  Does the RTEMS invoke the user's ISR with the vector number and
110 *  a pointer to the saved interrupt frame (1) or just the vector
111 *  number (0)?
112 */
113
114#define CPU_ISR_PASSES_FRAME_POINTER 0
115
116/*
117 *  Does the CPU have hardware floating point?
118 *
119 *  If TRUE, then the FLOATING_POINT task attribute is supported.
120 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
121 */
122
123#if ( SPARC_HAS_FPU == 1 )
124#define CPU_HARDWARE_FP     TRUE
125#else
126#define CPU_HARDWARE_FP     FALSE
127#endif
128#define CPU_SOFTWARE_FP     FALSE
129
130/*
131 *  Are all tasks FLOATING_POINT tasks implicitly?
132 *
133 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
134 *  If FALSE, then the FLOATING_POINT task attribute is followed.
135 */
136
137#define CPU_ALL_TASKS_ARE_FP     FALSE
138
139/*
140 *  Should the IDLE task have a floating point context?
141 *
142 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
143 *  and it has a floating point context which is switched in and out.
144 *  If FALSE, then the IDLE task does not have a floating point context.
145 */
146
147#define CPU_IDLE_TASK_IS_FP      FALSE
148
149/*
150 *  Should the saving of the floating point registers be deferred
151 *  until a context switch is made to another different floating point
152 *  task?
153 *
154 *  If TRUE, then the floating point context will not be stored until
155 *  necessary.  It will remain in the floating point registers and not
156 *  disturned until another floating point task is switched to.
157 *
158 *  If FALSE, then the floating point context is saved when a floating
159 *  point task is switched out and restored when the next floating point
160 *  task is restored.  The state of the floating point registers between
161 *  those two operations is not specified.
162 */
163
164#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
165
166/*
167 *  Does this port provide a CPU dependent IDLE task implementation?
168 *
169 *  If TRUE, then the routine _CPU_Thread_Idle_body
170 *  must be provided and is the default IDLE thread body instead of
171 *  _CPU_Thread_Idle_body.
172 *
173 *  If FALSE, then use the generic IDLE thread body if the BSP does
174 *  not provide one.
175 */
176
177#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
178
179/*
180 *  Does the stack grow up (toward higher addresses) or down
181 *  (toward lower addresses)?
182 *
183 *  If TRUE, then the grows upward.
184 *  If FALSE, then the grows toward smaller addresses.
185 *
186 *  The stack grows to lower addresses on the SPARC.
187 */
188
189#define CPU_STACK_GROWS_UP               FALSE
190
191/*
192 *  The following is the variable attribute used to force alignment
193 *  of critical data structures.  On some processors it may make
194 *  sense to have these aligned on tighter boundaries than
195 *  the minimum requirements of the compiler in order to have as
196 *  much of the critical data area as possible in a cache line.
197 *
198 *  The SPARC does not appear to have particularly strict alignment
199 *  requirements.  This value (16) was chosen to take advantages of caches.
200 *
201 *  SPARC 64 requirements on floating point alignment is at least 8,
202 *  and is 16 if quad-word fp instructions are available (e.g. LDQF).
203 */
204
205#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
206
207#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
208
209/*
210 *  Define what is required to specify how the network to host conversion
211 *  routines are handled.
212 */
213
214#define CPU_BIG_ENDIAN                           TRUE
215#define CPU_LITTLE_ENDIAN                        FALSE
216
217/*
218 *  The following defines the number of bits actually used in the
219 *  interrupt field of the task mode.  How those bits map to the
220 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
221 *
222 *  The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
223 */
224
225#define CPU_MODES_INTERRUPT_MASK   0x0000000F
226
227#define CPU_PER_CPU_CONTROL_SIZE 0
228
229/*
230 *  This structure represents the organization of the minimum stack frame
231 *  for the SPARC.  More framing information is required in certain situaions
232 *  such as when there are a large number of out parameters or when the callee
233 *  must save floating point registers.
234 */
235
236#ifndef ASM
237
238typedef struct {
239  /* There is no CPU specific per-CPU state */
240} CPU_Per_CPU_control;
241
242typedef struct {
243  uint64_t    l0;
244  uint64_t    l1;
245  uint64_t    l2;
246  uint64_t    l3;
247  uint64_t    l4;
248  uint64_t    l5;
249  uint64_t    l6;
250  uint64_t    l7;
251  uint64_t    i0;
252  uint64_t    i1;
253  uint64_t    i2;
254  uint64_t    i3;
255  uint64_t    i4;
256  uint64_t    i5;
257  uint64_t    i6_fp;
258  uint64_t    i7;
259  void       *structure_return_address;
260  /*
261   *  The following are for the callee to save the register arguments in
262   *  should this be necessary.
263   */
264  uint64_t    saved_arg0;
265  uint64_t    saved_arg1;
266  uint64_t    saved_arg2;
267  uint64_t    saved_arg3;
268  uint64_t    saved_arg4;
269  uint64_t    saved_arg5;
270  uint64_t    pad0;
271}  CPU_Minimum_stack_frame;
272
273#endif /* !ASM */
274
275#define CPU_STACK_FRAME_L0_OFFSET             0x00
276#define CPU_STACK_FRAME_L1_OFFSET             0x08
277#define CPU_STACK_FRAME_L2_OFFSET             0x10
278#define CPU_STACK_FRAME_L3_OFFSET             0x18
279#define CPU_STACK_FRAME_L4_OFFSET             0x20
280#define CPU_STACK_FRAME_L5_OFFSET             0x28
281#define CPU_STACK_FRAME_L6_OFFSET             0x30
282#define CPU_STACK_FRAME_L7_OFFSET             0x38
283#define CPU_STACK_FRAME_I0_OFFSET             0x40
284#define CPU_STACK_FRAME_I1_OFFSET             0x48
285#define CPU_STACK_FRAME_I2_OFFSET             0x50
286#define CPU_STACK_FRAME_I3_OFFSET             0x58
287#define CPU_STACK_FRAME_I4_OFFSET             0x60
288#define CPU_STACK_FRAME_I5_OFFSET             0x68
289#define CPU_STACK_FRAME_I6_FP_OFFSET          0x70
290#define CPU_STACK_FRAME_I7_OFFSET             0x78
291#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x80
292#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x88
293#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x90
294#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x98
295#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0xA0
296#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0xA8
297#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0xB0
298#define CPU_STACK_FRAME_PAD0_OFFSET           0xB8
299
300#define CPU_MINIMUM_STACK_FRAME_SIZE          0xC0
301
302/*
303 * Contexts
304 *
305 *  Generally there are 2 types of context to save.
306 *     1. Interrupt registers to save
307 *     2. Task level registers to save
308 *
309 *  This means we have the following 3 context items:
310 *     1. task level context stuff::  Context_Control
311 *     2. floating point task stuff:: Context_Control_fp
312 *     3. special interrupt level context :: Context_Control_interrupt
313 *
314 *  On the SPARC, we are relatively conservative in that we save most
315 *  of the CPU state in the context area.  The ET (enable trap) bit and
316 *  the CWP (current window pointer) fields of the PSR are considered
317 *  system wide resources and are not maintained on a per-thread basis.
318 */
319
320#ifndef ASM
321
322typedef struct {
323    uint64_t   g1;
324    uint64_t   g2;
325    uint64_t   g3;
326    uint64_t   g4;
327    uint64_t   g5;
328    uint64_t   g6;
329    uint64_t   g7;
330
331    uint64_t   l0;
332    uint64_t   l1;
333    uint64_t   l2;
334    uint64_t   l3;
335    uint64_t   l4;
336    uint64_t   l5;
337    uint64_t   l6;
338    uint64_t   l7;
339
340    uint64_t   i0;
341    uint64_t   i1;
342    uint64_t   i2;
343    uint64_t   i3;
344    uint64_t   i4;
345    uint64_t   i5;
346    uint64_t   i6_fp;
347    uint64_t   i7;
348
349    uint64_t   o0;
350    uint64_t   o1;
351    uint64_t   o2;
352    uint64_t   o3;
353    uint64_t   o4;
354    uint64_t   o5;
355    uint64_t   o6_sp;
356    uint64_t   o7;
357
358    uint32_t   isr_dispatch_disable;
359    uint32_t   pad;
360} Context_Control;
361
362#define _CPU_Context_Get_SP( _context ) \
363  (_context)->o6_sp
364
365#endif /* ASM */
366
367/*
368 *  Offsets of fields with Context_Control for assembly routines.
369 */
370
371#define G1_OFFSET    0x00
372#define G2_OFFSET    0x08
373#define G3_OFFSET    0x10
374#define G4_OFFSET    0x18
375#define G5_OFFSET    0x20
376#define G6_OFFSET    0x28
377#define G7_OFFSET    0x30
378
379#define L0_OFFSET    0x38
380#define L1_OFFSET    0x40
381#define L2_OFFSET    0x48
382#define L3_OFFSET    0x50
383#define L4_OFFSET    0x58
384#define L5_OFFSET    0x60
385#define L6_OFFSET    0x68
386#define L7_OFFSET    0x70
387
388#define I0_OFFSET    0x78
389#define I1_OFFSET    0x80
390#define I2_OFFSET    0x88
391#define I3_OFFSET    0x90
392#define I4_OFFSET    0x98
393#define I5_OFFSET    0xA0
394#define I6_FP_OFFSET    0xA8
395#define I7_OFFSET 0xB0
396
397#define O0_OFFSET    0xB8
398#define O1_OFFSET    0xC0
399#define O2_OFFSET    0xC8
400#define O3_OFFSET    0xD0
401#define O4_OFFSET    0xD8
402#define O5_OFFSET    0xE0
403#define O6_SP_OFFSET    0xE8
404#define O7_OFFSET 0xF0
405
406#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
407#define ISR_PAD_OFFSET 0xFC
408
409#define CONTEXT_CONTROL_SIZE 0x100
410
411/*
412 *  The floating point context area.
413 */
414
415#ifndef ASM
416
417typedef struct {
418    double      f0;     /* f0-f1 */
419    double      f2;     /* f2-f3 */
420    double      f4;     /* f4-f5 */
421    double      f6;     /* f6-f7 */
422    double      f8;     /* f8-f9 */
423    double      f10;    /* f10-f11 */
424    double      f12;    /* f12-f13 */
425    double      f14;    /* f14-f15 */
426    double      f16;    /* f16-f17 */
427    double      f18;    /* f18-f19 */
428    double      f20;    /* f20-f21 */
429    double      f22;    /* f22-f23 */
430    double      f24;    /* f24-f25 */
431    double      f26;    /* f26-f27 */
432    double      f28;    /* f28-f29 */
433    double      f30;    /* f30-f31 */
434    double      f32;
435    double      f34;
436    double      f36;
437    double      f38;
438    double      f40;
439    double      f42;
440    double      f44;
441    double      f46;
442    double      f48;
443    double      f50;
444    double      f52;
445    double      f54;
446    double      f56;
447    double      f58;
448    double      f60;
449    double      f62;
450    uint64_t    fsr;
451} Context_Control_fp;
452
453#endif /* !ASM */
454
455/*
456 *  Offsets of fields with Context_Control_fp for assembly routines.
457 */
458
459#define FO_OFFSET    0x00
460#define F2_OFFSET    0x08
461#define F4_OFFSET    0x10
462#define F6_OFFSET    0x18
463#define F8_OFFSET    0x20
464#define F1O_OFFSET   0x28
465#define F12_OFFSET   0x30
466#define F14_OFFSET   0x38
467#define F16_OFFSET   0x40
468#define F18_OFFSET   0x48
469#define F2O_OFFSET   0x50
470#define F22_OFFSET   0x58
471#define F24_OFFSET   0x60
472#define F26_OFFSET   0x68
473#define F28_OFFSET   0x70
474#define F3O_OFFSET   0x78
475#define F32_OFFSET   0x80
476#define F34_OFFSET   0x88
477#define F36_OFFSET   0x90
478#define F38_OFFSET   0x98
479#define F4O_OFFSET   0xA0
480#define F42_OFFSET   0xA8
481#define F44_OFFSET   0xB0
482#define F46_OFFSET   0xB8
483#define F48_OFFSET   0xC0
484#define F5O_OFFSET   0xC8
485#define F52_OFFSET   0xD0
486#define F54_OFFSET   0xD8
487#define F56_OFFSET   0xE0
488#define F58_OFFSET   0xE8
489#define F6O_OFFSET   0xF0
490#define F62_OFFSET   0xF8
491#define FSR_OFFSET   0x100
492
493#define CONTEXT_CONTROL_FP_SIZE 0x108
494
495#ifndef ASM
496
497/*
498 *  Context saved on stack for an interrupt.
499 *
500 *  NOTE:  The tstate, tpc, and tnpc are saved in this structure
501 *         to allow resetting the TL while still being able to return
502 *         from a trap later.  The PIL is saved because
503 *         if this is an external interrupt, we will mask lower
504 *         priority interrupts until finishing. Even though the y register
505 *         is deprecated, gcc still uses it.
506 */
507
508typedef struct {
509  CPU_Minimum_stack_frame  Stack_frame;
510  uint64_t                 tstate;
511  uint64_t                 tpc;
512  uint64_t                 tnpc;
513  uint64_t                 pil;
514  uint64_t                 y;
515  uint64_t                 g1;
516  uint64_t                 g2;
517  uint64_t                 g3;
518  uint64_t                 g4;
519  uint64_t                 g5;
520  uint64_t                 g6;
521  uint64_t                 g7;
522  uint64_t                 o0;
523  uint64_t                 o1;
524  uint64_t                 o2;
525  uint64_t                 o3;
526  uint64_t                 o4;
527  uint64_t                 o5;
528  uint64_t                 o6_sp;
529  uint64_t                 o7;
530  uint64_t                 tvec;
531} CPU_Interrupt_frame;
532
533#endif /* ASM */
534
535/*
536 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
537 */
538
539#define ISF_STACK_FRAME_OFFSET 0x00
540#define ISF_TSTATE_OFFSET      CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
541#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
542#define ISF_TNPC_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
543#define ISF_PIL_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
544#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
545#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
546#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
547#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
548#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
549#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
550#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
551#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x58
552#define ISF_O0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x60
553#define ISF_O1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x68
554#define ISF_O2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x70
555#define ISF_O3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x78
556#define ISF_O4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x80
557#define ISF_O5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x88
558#define ISF_O6_SP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x90
559#define ISF_O7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x98
560#define ISF_TVEC_OFFSET        CPU_MINIMUM_STACK_FRAME_SIZE + 0xA0
561
562#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0xA8
563#ifndef ASM
564/*
565 *  This variable is contains the initialize context for the FP unit.
566 *  It is filled in by _CPU_Initialize and copied into the task's FP
567 *  context area during _CPU_Context_Initialize.
568 */
569
570SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
571
572/*
573 *  This stack is allocated by the Interrupt Manager and the switch
574 *  is performed in _ISR_Handler.  These variables contain pointers
575 *  to the lowest and highest addresses in the chunk of memory allocated
576 *  for the interrupt stack.  Since it is unknown whether the stack
577 *  grows up or down (in general), this give the CPU dependent
578 *  code the option of picking the version it wants to use.  Thus
579 *  both must be present if either is.
580 *
581 *  The SPARC supports a software based interrupt stack and these
582 *  are required.
583 */
584/*
585SCORE_EXTERN void *_CPU_Interrupt_stack_low;
586SCORE_EXTERN void *_CPU_Interrupt_stack_high;
587*/
588/*
589 *  This flag is context switched with each thread.  It indicates
590 *  that THIS thread has an _ISR_Dispatch stack frame on its stack.
591 *  By using this flag, we can avoid nesting more interrupt dispatching
592 *  attempts on a previously interrupted thread's stack.
593 */
594
595SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
596
597/*
598 *  The following type defines an entry in the SPARC's trap table.
599 *
600 *  NOTE: The instructions chosen are RTEMS dependent although one is
601 *        obligated to use two of the four instructions to perform a
602 *        long jump.  The other instructions load one register with the
603 *        trap type (a.k.a. vector) and another with the psr.
604 */
605/* For SPARC V9, we must use 6 of these instructions to perform a long
606 * jump, because the _handler value is now 64-bits. We also need to store
607 * temporary values in the global register set at this trap level. Because
608 * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
609 * to pass parameters to ISR_Handler.
610 *
611 * The instruction sequence is now more like:
612 *      rdpr %tstate, %g4
613 *      setx _handler, %g2, %g3
614 *      jmp %g3+0
615 *      mov _vector, %g2
616 */
617typedef struct {
618  uint32_t     rdpr_tstate_g4;                  /* rdpr  %tstate, %g4        */
619  uint32_t     sethi_of_hh_handler_to_g2;       /* sethi %hh(_handler), %g2  */
620  uint32_t     or_g2_hm_handler_to_g2;          /* or %l3, %hm(_handler), %g2 */
621  uint32_t     sllx_g2_by_32_to_g2;             /* sllx   %g2, 32, %g2 */
622  uint32_t     sethi_of_handler_to_g3;          /* sethi %hi(_handler), %g3  */
623  uint32_t     or_g3_g2_to_g3;                  /* or     %g3, %g2, %g3 */
624  uint32_t     jmp_to_low_of_handler_plus_g3;   /* jmp   %g3 + %lo(_handler) */
625  uint32_t     mov_vector_g2;                   /* mov   _vector, %g2        */
626} CPU_Trap_table_entry;
627
628/*
629 *  This is the set of opcodes for the instructions loaded into a trap
630 *  table entry.  The routine which installs a handler is responsible
631 *  for filling in the fields for the _handler address and the _vector
632 *  trap type.
633 *
634 *  The constants following this structure are masks for the fields which
635 *  must be filled in when the handler is installed.
636 */
637
638extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
639
640/*
641 *  The size of the floating point context area.
642 */
643
644#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
645
646#endif
647
648/*
649 *  Amount of extra stack (above minimum stack size) required by
650 *  MPCI receive server thread.  Remember that in a multiprocessor
651 *  system this thread must exist and be able to process all directives.
652 */
653
654#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
655
656/*
657 *  This defines the number of entries in the ISR_Vector_table managed
658 *  by the executive.
659 *
660 *  On the SPARC, there are really only 256 vectors.  However, the executive
661 *  has no easy, fast, reliable way to determine which traps are synchronous
662 *  and which are asynchronous.  By default, synchronous traps return to the
663 *  instruction which caused the interrupt.  So if you install a software
664 *  trap handler as an executive interrupt handler (which is desirable since
665 *  RTEMS takes care of window and register issues), then the executive needs
666 *  to know that the return address is to the trap rather than the instruction
667 *  following the trap.
668 *
669 *  So vectors 0 through 255 are treated as regular asynchronous traps which
670 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
671 *  by the executive to be synchronous and to require that the return address
672 *  be fudged.
673 *
674 *  If you use this mechanism to install a trap handler which must reexecute
675 *  the instruction which caused the trap, then it should be installed as
676 *  an asynchronous trap.  This will avoid the executive changing the return
677 *  address.
678 */
679/* On SPARC v9, there are 512 vectors. The same philosophy applies to
680 * vector installation and use, we just provide a larger table.
681 */
682#define CPU_INTERRUPT_NUMBER_OF_VECTORS     512
683#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
684
685#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x200
686#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
687#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 512 )
688
689#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 512)
690
691/*
692 *  This is defined if the port has a special way to report the ISR nesting
693 *  level.  Most ports maintain the variable _ISR_Nest_level.
694 */
695
696#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
697
698/*
699 *  Should be large enough to run all tests.  This ensures
700 *  that a "reasonable" small application should not have any problems.
701 *
702 *  This appears to be a fairly generous number for the SPARC since
703 *  represents a call depth of about 20 routines based on the minimum
704 *  stack frame.
705 */
706
707#define CPU_STACK_MINIMUM_SIZE  (1024*8)
708
709#define CPU_SIZEOF_POINTER 8
710
711/*
712 *  CPU's worst alignment requirement for data types on a byte boundary.  This
713 *  alignment does not take into account the requirements for the stack.
714 *
715 *  On the SPARC, this is required for double word loads and stores.
716 *
717 *  Note: quad-word loads/stores need alignment of 16, but currently supported
718 *  architectures do not provide HW implemented quad-word operations.
719 */
720
721#define CPU_ALIGNMENT      8
722
723/*
724 *  This number corresponds to the byte alignment requirement for the
725 *  heap handler.  This alignment requirement may be stricter than that
726 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
727 *  common for the heap to follow the same alignment requirement as
728 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
729 *  then this should be set to CPU_ALIGNMENT.
730 *
731 *  NOTE:  This does not have to be a power of 2.  It does have to
732 *         be greater or equal to than CPU_ALIGNMENT.
733 */
734
735#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
736
737/*
738 *  This number corresponds to the byte alignment requirement for memory
739 *  buffers allocated by the partition manager.  This alignment requirement
740 *  may be stricter than that for the data types alignment specified by
741 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
742 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
743 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
744 *
745 *  NOTE:  This does not have to be a power of 2.  It does have to
746 *         be greater or equal to than CPU_ALIGNMENT.
747 */
748
749#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
750
751/*
752 *  This number corresponds to the byte alignment requirement for the
753 *  stack.  This alignment requirement may be stricter than that for the
754 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
755 *  is strict enough for the stack, then this should be set to 0.
756 *
757 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
758 *
759 *  The alignment restrictions for the SPARC are not that strict but this
760 *  should unsure that the stack is always sufficiently alignment that the
761 *  window overflow, underflow, and flush routines can use double word loads
762 *  and stores.
763 */
764
765#define CPU_STACK_ALIGNMENT        16
766
767#ifndef ASM
768
769/*
770 *  ISR handler macros
771 */
772
773/*
774 *  Support routine to initialize the RTEMS vector table after it is allocated.
775 */
776
777#define _CPU_Initialize_vectors()
778
779/*
780 *  Disable all interrupts for a critical section.  The previous
781 *  level is returned in _level.
782 */
783
784 #define _CPU_ISR_Disable( _level ) \
785  (_level) = sparc_disable_interrupts()
786
787/*
788 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
789 *  This indicates the end of a critical section.  The parameter
790 *  _level is not modified.
791 */
792
793#define _CPU_ISR_Enable( _level ) \
794  sparc_enable_interrupts( _level )
795
796/*
797 *  This temporarily restores the interrupt to _level before immediately
798 *  disabling them again.  This is used to divide long critical
799 *  sections into two or more parts.  The parameter _level is not
800 *  modified.
801 */
802
803#define _CPU_ISR_Flash( _level ) \
804   sparc_flash_interrupts( _level )
805
806/*
807 *  Map interrupt level in task mode onto the hardware that the CPU
808 *  actually provides.  Currently, interrupt levels which do not
809 *  map onto the CPU in a straight fashion are undefined.
810 */
811
812#define _CPU_ISR_Set_level( _newlevel ) \
813   sparc_enable_interrupts( _newlevel)
814
815uint32_t   _CPU_ISR_Get_level( void );
816
817/* end of ISR handler macros */
818
819/* Context handler macros */
820
821/*
822 *  Initialize the context to a state suitable for starting a
823 *  task after a context restore operation.  Generally, this
824 *  involves:
825 *
826 *     - setting a starting address
827 *     - preparing the stack
828 *     - preparing the stack and frame pointers
829 *     - setting the proper interrupt level in the context
830 *     - initializing the floating point context
831 *
832 *  NOTE:  Implemented as a subroutine for the SPARC port.
833 */
834
835void _CPU_Context_Initialize(
836  Context_Control  *the_context,
837  void         *stack_base,
838  uint32_t          size,
839  uint32_t          new_level,
840  void             *entry_point,
841  bool              is_fp,
842  void             *tls_area
843);
844
845/*
846 *  This macro is invoked from _Thread_Handler to do whatever CPU
847 *  specific magic is required that must be done in the context of
848 *  the thread when it starts.
849 *
850 *  On the SPARC, this is setting the frame pointer so GDB is happy.
851 *  Make GDB stop unwinding at _Thread_Handler, previous register window
852 *  Frame pointer is 0 and calling address must be a function with starting
853 *  with a SAVE instruction. If return address is leaf-function (no SAVE)
854 *  GDB will not look at prev reg window fp.
855 *
856 *  _Thread_Handler is known to start with SAVE.
857 */
858
859#define _CPU_Context_Initialization_at_thread_begin() \
860  do { \
861    __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
862  } while (0)
863
864/*
865 *  This routine is responsible for somehow restarting the currently
866 *  executing task.
867 *
868 *  On the SPARC, this is is relatively painless but requires a small
869 *  amount of wrapper code before using the regular restore code in
870 *  of the context switch.
871 */
872
873#define _CPU_Context_Restart_self( _the_context ) \
874   _CPU_Context_restore( (_the_context) );
875
876/*
877 *  The FP context area for the SPARC is a simple structure and nothing
878 *  special is required to find the "starting load point"
879 */
880
881#define _CPU_Context_Fp_start( _base, _offset ) \
882   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
883
884/*
885 *  This routine initializes the FP context area passed to it to.
886 *
887 *  The SPARC allows us to use the simple initialization model
888 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
889 *  at CPU initialization and it is simply copied into the destination
890 *  context.
891 */
892
893#define _CPU_Context_Initialize_fp( _destination ) \
894  do { \
895   *(*(_destination)) = _CPU_Null_fp_context; \
896  } while (0)
897
898/* end of Context handler macros */
899
900/* Fatal Error manager macros */
901
902/*
903 *  This routine copies _error into a known place -- typically a stack
904 *  location or a register, optionally disables interrupts, and
905 *  halts/stops the CPU.
906 */
907
908#define _CPU_Fatal_halt( _error ) \
909  do { \
910    uint32_t   level; \
911    \
912    level = sparc_disable_interrupts(); \
913    __asm__ volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
914    while (1); /* loop forever */ \
915  } while (0)
916
917/* end of Fatal Error manager macros */
918
919/* Bitfield handler macros */
920
921/*
922 *  The SPARC port uses the generic C algorithm for bitfield scan if the
923 *  CPU model does not have a scan instruction.
924 */
925
926#if ( SPARC_HAS_BITSCAN == 0 )
927#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
928#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
929#else
930#error "scan instruction not currently supported by RTEMS!!"
931#endif
932
933/* end of Bitfield handler macros */
934
935/* Priority handler handler macros */
936
937/*
938 *  The SPARC port uses the generic C algorithm for bitfield scan if the
939 *  CPU model does not have a scan instruction.
940 */
941
942#if ( SPARC_HAS_BITSCAN == 1 )
943#error "scan instruction not currently supported by RTEMS!!"
944#endif
945
946/* end of Priority handler macros */
947
948/* functions */
949
950/*
951 *  _CPU_Initialize
952 *
953 *  This routine performs CPU dependent initialization.
954 */
955
956void _CPU_Initialize(void);
957
958/*
959 *  _CPU_ISR_install_raw_handler
960 *
961 *  This routine installs new_handler to be directly called from the trap
962 *  table.
963 */
964
965void _CPU_ISR_install_raw_handler(
966  uint32_t    vector,
967  proc_ptr    new_handler,
968  proc_ptr   *old_handler
969);
970
971/*
972 *  _CPU_ISR_install_vector
973 *
974 *  This routine installs an interrupt vector.
975 */
976
977void _CPU_ISR_install_vector(
978  uint64_t    vector,
979  proc_ptr    new_handler,
980  proc_ptr   *old_handler
981);
982
983#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
984
985/*
986 *  _CPU_Thread_Idle_body
987 *
988 *  Some SPARC implementations have low power, sleep, or idle modes.  This
989 *  tries to take advantage of those models.
990 */
991
992void *_CPU_Thread_Idle_body( uintptr_t ignored );
993
994#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
995
996/*
997 *  _CPU_Context_switch
998 *
999 *  This routine switches from the run context to the heir context.
1000 */
1001
1002void _CPU_Context_switch(
1003  Context_Control  *run,
1004  Context_Control  *heir
1005);
1006
1007/*
1008 *  _CPU_Context_restore
1009 *
1010 *  This routine is generally used only to restart self in an
1011 *  efficient manner.
1012 */
1013
1014void _CPU_Context_restore(
1015  Context_Control *new_context
1016) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
1017
1018/*
1019 *  _CPU_Context_save_fp
1020 *
1021 *  This routine saves the floating point context passed to it.
1022 */
1023
1024void _CPU_Context_save_fp(
1025  Context_Control_fp **fp_context_ptr
1026);
1027
1028/*
1029 *  _CPU_Context_restore_fp
1030 *
1031 *  This routine restores the floating point context passed to it.
1032 */
1033
1034void _CPU_Context_restore_fp(
1035  Context_Control_fp **fp_context_ptr
1036);
1037
1038static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
1039{
1040  /* TODO */
1041}
1042
1043static inline void _CPU_Context_validate( uintptr_t pattern )
1044{
1045  while (1) {
1046    /* TODO */
1047  }
1048}
1049
1050/* FIXME */
1051typedef CPU_Interrupt_frame CPU_Exception_frame;
1052
1053void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1054
1055/*
1056 *  CPU_swap_u32
1057 *
1058 *  The following routine swaps the endian format of an unsigned int.
1059 *  It must be static because it is referenced indirectly.
1060 *
1061 *  This version will work on any processor, but if you come across a better
1062 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
1063 *  entity as shown below is not any more efficient on the SPARC.
1064 *
1065 *     swap least significant two bytes with 16-bit rotate
1066 *     swap upper and lower 16-bits
1067 *     swap most significant two bytes with 16-bit rotate
1068 *
1069 *  It is not obvious how the SPARC can do significantly better than the
1070 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
1071 *  following code at optimization level four (i.e. -O4).
1072 */
1073
1074static inline uint32_t CPU_swap_u32(
1075  uint32_t value
1076)
1077{
1078  uint32_t   byte1, byte2, byte3, byte4, swapped;
1079
1080  byte4 = (value >> 24) & 0xff;
1081  byte3 = (value >> 16) & 0xff;
1082  byte2 = (value >> 8)  & 0xff;
1083  byte1 =  value        & 0xff;
1084
1085  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1086  return( swapped );
1087}
1088
1089#define CPU_swap_u16( value ) \
1090  (((value&0xff) << 8) | ((value >> 8)&0xff))
1091
1092typedef uint32_t CPU_Counter_ticks;
1093
1094CPU_Counter_ticks _CPU_Counter_read( void );
1095
1096static inline CPU_Counter_ticks _CPU_Counter_difference(
1097  CPU_Counter_ticks second,
1098  CPU_Counter_ticks first
1099)
1100{
1101  return second - first;
1102}
1103
1104#endif /* ASM */
1105
1106#ifdef __cplusplus
1107}
1108#endif
1109
1110#endif
Note: See TracBrowser for help on using the repository browser.