source: rtems/cpukit/score/cpu/sparc/rtems/score/cpu.h @ 06dcaf0

4.115
Last change on this file since 06dcaf0 was 06dcaf0, checked in by Joel Sherrill <joel.sherrill@…>, on 03/16/11 at 20:05:06

2011-03-16 Jennifer Averett <jennifer.averett@…>

PR 1729/cpukit

  • configure.ac, sapi/include/confdefs.h, sapi/src/exinit.c, score/Makefile.am, score/preinstall.am, score/cpu/i386/rtems/score/cpu.h, score/cpu/sparc/cpu_asm.S, score/cpu/sparc/rtems/score/cpu.h, score/include/rtems/score/basedefs.h, score/include/rtems/score/context.h, score/include/rtems/score/percpu.h, score/src/percpu.c, score/src/thread.c, score/src/threadcreateidle.c: Add next step in SMP support. This adds an allocated array of the Per_CPU structures to support multiple cpus vs a single instance of the structure which is still used if SMP support is disabled. Configuration support is also added to explicitly enable or disable SMP. But SMP can only be enabled for the CPUs which will support it initially -- SPARC and i386. With the stub BSP support, a BSP can be run as a single core SMP system from an RTEMS data structure standpoint.
  • aclocal/check-smp.m4, aclocal/enable-smp.m4, score/include/rtems/bspsmp.h, score/include/rtems/score/smplock.h, score/src/smp.c, score/src/smplock.c: New files.
  • Property mode set to 100644
File size: 28.9 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains information pertaining to the port of
7 *  the executive to the SPARC processor.
8 *
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 *
16 *  $Id$
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
26#include <rtems/score/types.h>
27#include <rtems/score/sparc.h>
28
29/* conditional compilation parameters */
30
31/*
32 *  Should the calls to _Thread_Enable_dispatch be inlined?
33 *
34 *  If TRUE, then they are inlined.
35 *  If FALSE, then a subroutine call is made.
36 */
37
38#define CPU_INLINE_ENABLE_DISPATCH       TRUE
39
40/*
41 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
42 *  be unrolled one time?  In unrolled each iteration of the loop examines
43 *  two "nodes" on the chain being searched.  Otherwise, only one node
44 *  is examined per iteration.
45 *
46 *  If TRUE, then the loops are unrolled.
47 *  If FALSE, then the loops are not unrolled.
48 *
49 *  This parameter could go either way on the SPARC.  The interrupt flash
50 *  code is relatively lengthy given the requirements for nops following
51 *  writes to the psr.  But if the clock speed were high enough, this would
52 *  not represent a great deal of time.
53 */
54
55#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
56
57/*
58 *  Does the executive manage a dedicated interrupt stack in software?
59 *
60 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
61 *  If FALSE, nothing is done.
62 *
63 *  The SPARC does not have a dedicated HW interrupt stack and one has
64 *  been implemented in SW.
65 */
66
67#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   TRUE
68
69/*
70 *  Does the CPU follow the simple vectored interrupt model?
71 *
72 *  If TRUE, then RTEMS allocates the vector table it internally manages.
73 *  If FALSE, then the BSP is assumed to allocate and manage the vector
74 *  table
75 *
76 *  SPARC Specific Information:
77 *
78 *  XXX document implementation including references if appropriate
79 */
80#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
81
82/*
83 *  Does this CPU have hardware support for a dedicated interrupt stack?
84 *
85 *  If TRUE, then it must be installed during initialization.
86 *  If FALSE, then no installation is performed.
87 *
88 *  The SPARC does not have a dedicated HW interrupt stack.
89 */
90
91#define CPU_HAS_HARDWARE_INTERRUPT_STACK  FALSE
92
93/*
94 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
95 *
96 *  If TRUE, then the memory is allocated during initialization.
97 *  If FALSE, then the memory is allocated during initialization.
98 */
99
100#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
101
102/*
103 *  Does the RTEMS invoke the user's ISR with the vector number and
104 *  a pointer to the saved interrupt frame (1) or just the vector
105 *  number (0)?
106 */
107
108#define CPU_ISR_PASSES_FRAME_POINTER 0
109
110/*
111 *  Does the CPU have hardware floating point?
112 *
113 *  If TRUE, then the FLOATING_POINT task attribute is supported.
114 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
115 */
116
117#if ( SPARC_HAS_FPU == 1 )
118#define CPU_HARDWARE_FP     TRUE
119#else
120#define CPU_HARDWARE_FP     FALSE
121#endif
122#define CPU_SOFTWARE_FP     FALSE
123
124/*
125 *  Are all tasks FLOATING_POINT tasks implicitly?
126 *
127 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
128 *  If FALSE, then the FLOATING_POINT task attribute is followed.
129 */
130
131#define CPU_ALL_TASKS_ARE_FP     FALSE
132
133/*
134 *  Should the IDLE task have a floating point context?
135 *
136 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
137 *  and it has a floating point context which is switched in and out.
138 *  If FALSE, then the IDLE task does not have a floating point context.
139 */
140
141#define CPU_IDLE_TASK_IS_FP      FALSE
142
143/*
144 *  Should the saving of the floating point registers be deferred
145 *  until a context switch is made to another different floating point
146 *  task?
147 *
148 *  If TRUE, then the floating point context will not be stored until
149 *  necessary.  It will remain in the floating point registers and not
150 *  disturned until another floating point task is switched to.
151 *
152 *  If FALSE, then the floating point context is saved when a floating
153 *  point task is switched out and restored when the next floating point
154 *  task is restored.  The state of the floating point registers between
155 *  those two operations is not specified.
156 */
157
158#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
159
160/*
161 *  Does this port provide a CPU dependent IDLE task implementation?
162 *
163 *  If TRUE, then the routine _CPU_Thread_Idle_body
164 *  must be provided and is the default IDLE thread body instead of
165 *  _CPU_Thread_Idle_body.
166 *
167 *  If FALSE, then use the generic IDLE thread body if the BSP does
168 *  not provide one.
169 */
170
171#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
172
173/*
174 *  Does the stack grow up (toward higher addresses) or down
175 *  (toward lower addresses)?
176 *
177 *  If TRUE, then the grows upward.
178 *  If FALSE, then the grows toward smaller addresses.
179 *
180 *  The stack grows to lower addresses on the SPARC.
181 */
182
183#define CPU_STACK_GROWS_UP               FALSE
184
185/*
186 *  The following is the variable attribute used to force alignment
187 *  of critical data structures.  On some processors it may make
188 *  sense to have these aligned on tighter boundaries than
189 *  the minimum requirements of the compiler in order to have as
190 *  much of the critical data area as possible in a cache line.
191 *
192 *  The SPARC does not appear to have particularly strict alignment
193 *  requirements.  This value was chosen to take advantages of caches.
194 */
195
196#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
197
198/*
199 *  Define what is required to specify how the network to host conversion
200 *  routines are handled.
201 */
202
203#define CPU_BIG_ENDIAN                           TRUE
204#define CPU_LITTLE_ENDIAN                        FALSE
205
206/*
207 *  The following defines the number of bits actually used in the
208 *  interrupt field of the task mode.  How those bits map to the
209 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
210 *
211 *  The SPARC has 16 interrupt levels in the PIL field of the PSR.
212 */
213
214#define CPU_MODES_INTERRUPT_MASK   0x0000000F
215
216/*
217 *  This structure represents the organization of the minimum stack frame
218 *  for the SPARC.  More framing information is required in certain situaions
219 *  such as when there are a large number of out parameters or when the callee
220 *  must save floating point registers.
221 */
222
223#ifndef ASM
224
225typedef struct {
226  uint32_t    l0;
227  uint32_t    l1;
228  uint32_t    l2;
229  uint32_t    l3;
230  uint32_t    l4;
231  uint32_t    l5;
232  uint32_t    l6;
233  uint32_t    l7;
234  uint32_t    i0;
235  uint32_t    i1;
236  uint32_t    i2;
237  uint32_t    i3;
238  uint32_t    i4;
239  uint32_t    i5;
240  uint32_t    i6_fp;
241  uint32_t    i7;
242  void       *structure_return_address;
243  /*
244   *  The following are for the callee to save the register arguments in
245   *  should this be necessary.
246   */
247  uint32_t    saved_arg0;
248  uint32_t    saved_arg1;
249  uint32_t    saved_arg2;
250  uint32_t    saved_arg3;
251  uint32_t    saved_arg4;
252  uint32_t    saved_arg5;
253  uint32_t    pad0;
254}  CPU_Minimum_stack_frame;
255
256#endif /* ASM */
257
258#define CPU_STACK_FRAME_L0_OFFSET             0x00
259#define CPU_STACK_FRAME_L1_OFFSET             0x04
260#define CPU_STACK_FRAME_L2_OFFSET             0x08
261#define CPU_STACK_FRAME_L3_OFFSET             0x0c
262#define CPU_STACK_FRAME_L4_OFFSET             0x10
263#define CPU_STACK_FRAME_L5_OFFSET             0x14
264#define CPU_STACK_FRAME_L6_OFFSET             0x18
265#define CPU_STACK_FRAME_L7_OFFSET             0x1c
266#define CPU_STACK_FRAME_I0_OFFSET             0x20
267#define CPU_STACK_FRAME_I1_OFFSET             0x24
268#define CPU_STACK_FRAME_I2_OFFSET             0x28
269#define CPU_STACK_FRAME_I3_OFFSET             0x2c
270#define CPU_STACK_FRAME_I4_OFFSET             0x30
271#define CPU_STACK_FRAME_I5_OFFSET             0x34
272#define CPU_STACK_FRAME_I6_FP_OFFSET          0x38
273#define CPU_STACK_FRAME_I7_OFFSET             0x3c
274#define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET   0x40
275#define CPU_STACK_FRAME_SAVED_ARG0_OFFSET     0x44
276#define CPU_STACK_FRAME_SAVED_ARG1_OFFSET     0x48
277#define CPU_STACK_FRAME_SAVED_ARG2_OFFSET     0x4c
278#define CPU_STACK_FRAME_SAVED_ARG3_OFFSET     0x50
279#define CPU_STACK_FRAME_SAVED_ARG4_OFFSET     0x54
280#define CPU_STACK_FRAME_SAVED_ARG5_OFFSET     0x58
281#define CPU_STACK_FRAME_PAD0_OFFSET           0x5c
282
283#define CPU_MINIMUM_STACK_FRAME_SIZE          0x60
284
285/*
286 * Contexts
287 *
288 *  Generally there are 2 types of context to save.
289 *     1. Interrupt registers to save
290 *     2. Task level registers to save
291 *
292 *  This means we have the following 3 context items:
293 *     1. task level context stuff::  Context_Control
294 *     2. floating point task stuff:: Context_Control_fp
295 *     3. special interrupt level context :: Context_Control_interrupt
296 *
297 *  On the SPARC, we are relatively conservative in that we save most
298 *  of the CPU state in the context area.  The ET (enable trap) bit and
299 *  the CWP (current window pointer) fields of the PSR are considered
300 *  system wide resources and are not maintained on a per-thread basis.
301 */
302
303#ifndef ASM
304
305typedef struct {
306    /*
307     *  Using a double g0_g1 will put everything in this structure on a
308     *  double word boundary which allows us to use double word loads
309     *  and stores safely in the context switch.
310     */
311    double     g0_g1;
312    uint32_t   g2;
313    uint32_t   g3;
314    uint32_t   g4;
315    uint32_t   g5;
316    uint32_t   g6;
317    uint32_t   g7;
318
319    uint32_t   l0;
320    uint32_t   l1;
321    uint32_t   l2;
322    uint32_t   l3;
323    uint32_t   l4;
324    uint32_t   l5;
325    uint32_t   l6;
326    uint32_t   l7;
327
328    uint32_t   i0;
329    uint32_t   i1;
330    uint32_t   i2;
331    uint32_t   i3;
332    uint32_t   i4;
333    uint32_t   i5;
334    uint32_t   i6_fp;
335    uint32_t   i7;
336
337    uint32_t   o0;
338    uint32_t   o1;
339    uint32_t   o2;
340    uint32_t   o3;
341    uint32_t   o4;
342    uint32_t   o5;
343    uint32_t   o6_sp;
344    uint32_t   o7;
345
346    uint32_t   psr;
347    uint32_t   isr_dispatch_disable;
348} Context_Control;
349
350#define _CPU_Context_Get_SP( _context ) \
351  (_context)->o6_sp
352
353#endif /* ASM */
354
355/*
356 *  Offsets of fields with Context_Control for assembly routines.
357 */
358
359#define G0_OFFSET    0x00
360#define G1_OFFSET    0x04
361#define G2_OFFSET    0x08
362#define G3_OFFSET    0x0C
363#define G4_OFFSET    0x10
364#define G5_OFFSET    0x14
365#define G6_OFFSET    0x18
366#define G7_OFFSET    0x1C
367
368#define L0_OFFSET    0x20
369#define L1_OFFSET    0x24
370#define L2_OFFSET    0x28
371#define L3_OFFSET    0x2C
372#define L4_OFFSET    0x30
373#define L5_OFFSET    0x34
374#define L6_OFFSET    0x38
375#define L7_OFFSET    0x3C
376
377#define I0_OFFSET    0x40
378#define I1_OFFSET    0x44
379#define I2_OFFSET    0x48
380#define I3_OFFSET    0x4C
381#define I4_OFFSET    0x50
382#define I5_OFFSET    0x54
383#define I6_FP_OFFSET 0x58
384#define I7_OFFSET    0x5C
385
386#define O0_OFFSET    0x60
387#define O1_OFFSET    0x64
388#define O2_OFFSET    0x68
389#define O3_OFFSET    0x6C
390#define O4_OFFSET    0x70
391#define O5_OFFSET    0x74
392#define O6_SP_OFFSET 0x78
393#define O7_OFFSET    0x7C
394
395#define PSR_OFFSET   0x80
396#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x84
397
398#define CONTEXT_CONTROL_SIZE 0x88
399
400/*
401 *  The floating point context area.
402 */
403
404#ifndef ASM
405
406typedef struct {
407    double      f0_f1;
408    double      f2_f3;
409    double      f4_f5;
410    double      f6_f7;
411    double      f8_f9;
412    double      f10_f11;
413    double      f12_f13;
414    double      f14_f15;
415    double      f16_f17;
416    double      f18_f19;
417    double      f20_f21;
418    double      f22_f23;
419    double      f24_f25;
420    double      f26_f27;
421    double      f28_f29;
422    double      f30_f31;
423    uint32_t    fsr;
424} Context_Control_fp;
425
426#endif /* ASM */
427
428/*
429 *  Offsets of fields with Context_Control_fp for assembly routines.
430 */
431
432#define FO_F1_OFFSET     0x00
433#define F2_F3_OFFSET     0x08
434#define F4_F5_OFFSET     0x10
435#define F6_F7_OFFSET     0x18
436#define F8_F9_OFFSET     0x20
437#define F1O_F11_OFFSET   0x28
438#define F12_F13_OFFSET   0x30
439#define F14_F15_OFFSET   0x38
440#define F16_F17_OFFSET   0x40
441#define F18_F19_OFFSET   0x48
442#define F2O_F21_OFFSET   0x50
443#define F22_F23_OFFSET   0x58
444#define F24_F25_OFFSET   0x60
445#define F26_F27_OFFSET   0x68
446#define F28_F29_OFFSET   0x70
447#define F3O_F31_OFFSET   0x78
448#define FSR_OFFSET       0x80
449
450#define CONTEXT_CONTROL_FP_SIZE 0x84
451
452#ifndef ASM
453
454/*
455 *  Context saved on stack for an interrupt.
456 *
457 *  NOTE:  The PSR, PC, and NPC are only saved in this structure for the
458 *         benefit of the user's handler.
459 */
460
461typedef struct {
462  CPU_Minimum_stack_frame  Stack_frame;
463  uint32_t                 psr;
464  uint32_t                 pc;
465  uint32_t                 npc;
466  uint32_t                 g1;
467  uint32_t                 g2;
468  uint32_t                 g3;
469  uint32_t                 g4;
470  uint32_t                 g5;
471  uint32_t                 g6;
472  uint32_t                 g7;
473  uint32_t                 i0;
474  uint32_t                 i1;
475  uint32_t                 i2;
476  uint32_t                 i3;
477  uint32_t                 i4;
478  uint32_t                 i5;
479  uint32_t                 i6_fp;
480  uint32_t                 i7;
481  uint32_t                 y;
482  uint32_t                 tpc;
483} CPU_Interrupt_frame;
484
485#endif /* ASM */
486
487/*
488 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
489 */
490
491#define ISF_STACK_FRAME_OFFSET 0x00
492#define ISF_PSR_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x00
493#define ISF_PC_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x04
494#define ISF_NPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x08
495#define ISF_G1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x0c
496#define ISF_G2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x10
497#define ISF_G3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x14
498#define ISF_G4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x18
499#define ISF_G5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x1c
500#define ISF_G6_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x20
501#define ISF_G7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x24
502#define ISF_I0_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x28
503#define ISF_I1_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x2c
504#define ISF_I2_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x30
505#define ISF_I3_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x34
506#define ISF_I4_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x38
507#define ISF_I5_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x3c
508#define ISF_I6_FP_OFFSET       CPU_MINIMUM_STACK_FRAME_SIZE + 0x40
509#define ISF_I7_OFFSET          CPU_MINIMUM_STACK_FRAME_SIZE + 0x44
510#define ISF_Y_OFFSET           CPU_MINIMUM_STACK_FRAME_SIZE + 0x48
511#define ISF_TPC_OFFSET         CPU_MINIMUM_STACK_FRAME_SIZE + 0x4c
512
513#define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE CPU_MINIMUM_STACK_FRAME_SIZE + 0x50
514#ifndef ASM
515/*
516 *  This variable is contains the initialize context for the FP unit.
517 *  It is filled in by _CPU_Initialize and copied into the task's FP
518 *  context area during _CPU_Context_Initialize.
519 */
520
521SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
522
523/*
524 *  This flag is context switched with each thread.  It indicates
525 *  that THIS thread has an _ISR_Dispatch stack frame on its stack.
526 *  By using this flag, we can avoid nesting more interrupt dispatching
527 *  attempts on a previously interrupted thread's stack.
528 */
529
530SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
531
532/*
533 *  The following type defines an entry in the SPARC's trap table.
534 *
535 *  NOTE: The instructions chosen are RTEMS dependent although one is
536 *        obligated to use two of the four instructions to perform a
537 *        long jump.  The other instructions load one register with the
538 *        trap type (a.k.a. vector) and another with the psr.
539 */
540
541typedef struct {
542  uint32_t     mov_psr_l0;                     /* mov   %psr, %l0           */
543  uint32_t     sethi_of_handler_to_l4;         /* sethi %hi(_handler), %l4  */
544  uint32_t     jmp_to_low_of_handler_plus_l4;  /* jmp   %l4 + %lo(_handler) */
545  uint32_t     mov_vector_l3;                  /* mov   _vector, %l3        */
546} CPU_Trap_table_entry;
547
548/*
549 *  This is the set of opcodes for the instructions loaded into a trap
550 *  table entry.  The routine which installs a handler is responsible
551 *  for filling in the fields for the _handler address and the _vector
552 *  trap type.
553 *
554 *  The constants following this structure are masks for the fields which
555 *  must be filled in when the handler is installed.
556 */
557
558extern const CPU_Trap_table_entry _CPU_Trap_slot_template;
559
560/*
561 *  The size of the floating point context area.
562 */
563
564#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
565
566#endif
567
568/*
569 *  Amount of extra stack (above minimum stack size) required by
570 *  MPCI receive server thread.  Remember that in a multiprocessor
571 *  system this thread must exist and be able to process all directives.
572 */
573
574#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
575
576/*
577 *  This defines the number of entries in the ISR_Vector_table managed
578 *  by the executive.
579 *
580 *  On the SPARC, there are really only 256 vectors.  However, the executive
581 *  has no easy, fast, reliable way to determine which traps are synchronous
582 *  and which are asynchronous.  By default, synchronous traps return to the
583 *  instruction which caused the interrupt.  So if you install a software
584 *  trap handler as an executive interrupt handler (which is desirable since
585 *  RTEMS takes care of window and register issues), then the executive needs
586 *  to know that the return address is to the trap rather than the instruction
587 *  following the trap.
588 *
589 *  So vectors 0 through 255 are treated as regular asynchronous traps which
590 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
591 *  by the executive to be synchronous and to require that the return address
592 *  be fudged.
593 *
594 *  If you use this mechanism to install a trap handler which must reexecute
595 *  the instruction which caused the trap, then it should be installed as
596 *  an asynchronous trap.  This will avoid the executive changing the return
597 *  address.
598 */
599
600#define CPU_INTERRUPT_NUMBER_OF_VECTORS     256
601#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 511
602
603#define SPARC_SYNCHRONOUS_TRAP_BIT_MASK     0x100
604#define SPARC_ASYNCHRONOUS_TRAP( _trap )    (_trap)
605#define SPARC_SYNCHRONOUS_TRAP( _trap )     ((_trap) + 256 )
606
607#define SPARC_REAL_TRAP_NUMBER( _trap )     ((_trap) % 256)
608
609/*
610 *  This is defined if the port has a special way to report the ISR nesting
611 *  level.  Most ports maintain the variable _ISR_Nest_level.
612 */
613
614#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
615
616/*
617 *  Should be large enough to run all tests.  This ensures
618 *  that a "reasonable" small application should not have any problems.
619 *
620 *  This appears to be a fairly generous number for the SPARC since
621 *  represents a call depth of about 20 routines based on the minimum
622 *  stack frame.
623 */
624
625#define CPU_STACK_MINIMUM_SIZE  (1024*4)
626
627/*
628 *  CPU's worst alignment requirement for data types on a byte boundary.  This
629 *  alignment does not take into account the requirements for the stack.
630 *
631 *  On the SPARC, this is required for double word loads and stores.
632 */
633
634#define CPU_ALIGNMENT      8
635
636/*
637 *  This number corresponds to the byte alignment requirement for the
638 *  heap handler.  This alignment requirement may be stricter than that
639 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
640 *  common for the heap to follow the same alignment requirement as
641 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
642 *  then this should be set to CPU_ALIGNMENT.
643 *
644 *  NOTE:  This does not have to be a power of 2.  It does have to
645 *         be greater or equal to than CPU_ALIGNMENT.
646 */
647
648#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
649
650/*
651 *  This number corresponds to the byte alignment requirement for memory
652 *  buffers allocated by the partition manager.  This alignment requirement
653 *  may be stricter than that for the data types alignment specified by
654 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
655 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
656 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
657 *
658 *  NOTE:  This does not have to be a power of 2.  It does have to
659 *         be greater or equal to than CPU_ALIGNMENT.
660 */
661
662#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
663
664/*
665 *  This number corresponds to the byte alignment requirement for the
666 *  stack.  This alignment requirement may be stricter than that for the
667 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
668 *  is strict enough for the stack, then this should be set to 0.
669 *
670 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
671 *
672 *  The alignment restrictions for the SPARC are not that strict but this
673 *  should unsure that the stack is always sufficiently alignment that the
674 *  window overflow, underflow, and flush routines can use double word loads
675 *  and stores.
676 */
677
678#define CPU_STACK_ALIGNMENT        16
679
680#ifndef ASM
681
682/*
683 *  ISR handler macros
684 */
685
686/*
687 *  Support routine to initialize the RTEMS vector table after it is allocated.
688 */
689
690#define _CPU_Initialize_vectors()
691
692/*
693 *  Disable all interrupts for a critical section.  The previous
694 *  level is returned in _level.
695 */
696
697#define _CPU_ISR_Disable( _level ) \
698  (_level) = sparc_disable_interrupts()
699
700/*
701 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
702 *  This indicates the end of a critical section.  The parameter
703 *  _level is not modified.
704 */
705
706#define _CPU_ISR_Enable( _level ) \
707  sparc_enable_interrupts( _level )
708
709/*
710 *  This temporarily restores the interrupt to _level before immediately
711 *  disabling them again.  This is used to divide long critical
712 *  sections into two or more parts.  The parameter _level is not
713 *  modified.
714 */
715
716#define _CPU_ISR_Flash( _level ) \
717  sparc_flash_interrupts( _level )
718
719/*
720 *  Map interrupt level in task mode onto the hardware that the CPU
721 *  actually provides.  Currently, interrupt levels which do not
722 *  map onto the CPU in a straight fashion are undefined.
723 */
724
725#define _CPU_ISR_Set_level( _newlevel ) \
726   sparc_enable_interrupts( _newlevel << 8)
727
728uint32_t   _CPU_ISR_Get_level( void );
729
730/* end of ISR handler macros */
731
732/* Context handler macros */
733
734/*
735 *  Initialize the context to a state suitable for starting a
736 *  task after a context restore operation.  Generally, this
737 *  involves:
738 *
739 *     - setting a starting address
740 *     - preparing the stack
741 *     - preparing the stack and frame pointers
742 *     - setting the proper interrupt level in the context
743 *     - initializing the floating point context
744 *
745 *  NOTE:  Implemented as a subroutine for the SPARC port.
746 */
747
748void _CPU_Context_Initialize(
749  Context_Control  *the_context,
750  uint32_t         *stack_base,
751  uint32_t          size,
752  uint32_t          new_level,
753  void             *entry_point,
754  bool              is_fp
755);
756
757/*
758 *  This macro is invoked from _Thread_Handler to do whatever CPU
759 *  specific magic is required that must be done in the context of
760 *  the thread when it starts.
761 *
762 *  On the SPARC, this is setting the frame pointer so GDB is happy.
763 *  Make GDB stop unwinding at _Thread_Handler, previous register window
764 *  Frame pointer is 0 and calling address must be a function with starting
765 *  with a SAVE instruction. If return address is leaf-function (no SAVE)
766 *  GDB will not look at prev reg window fp.
767 *
768 *  _Thread_Handler is known to start with SAVE.
769 */
770
771#define _CPU_Context_Initialization_at_thread_begin() \
772  do { \
773    __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
774  } while (0)
775
776/*
777 *  This routine is responsible for somehow restarting the currently
778 *  executing task.
779 *
780 *  On the SPARC, this is is relatively painless but requires a small
781 *  amount of wrapper code before using the regular restore code in
782 *  of the context switch.
783 */
784
785#define _CPU_Context_Restart_self( _the_context ) \
786   _CPU_Context_restore( (_the_context) );
787
788/*
789 *  The FP context area for the SPARC is a simple structure and nothing
790 *  special is required to find the "starting load point"
791 */
792
793#define _CPU_Context_Fp_start( _base, _offset ) \
794   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
795
796/*
797 *  This routine initializes the FP context area passed to it to.
798 *
799 *  The SPARC allows us to use the simple initialization model
800 *  in which an "initial" FP context was saved into _CPU_Null_fp_context
801 *  at CPU initialization and it is simply copied into the destination
802 *  context.
803 */
804
805#define _CPU_Context_Initialize_fp( _destination ) \
806  do { \
807   *(*(_destination)) = _CPU_Null_fp_context; \
808  } while (0)
809
810/* end of Context handler macros */
811
812/* Fatal Error manager macros */
813
814/*
815 *  This routine copies _error into a known place -- typically a stack
816 *  location or a register, optionally disables interrupts, and
817 *  halts/stops the CPU.
818 */
819
820#define _CPU_Fatal_halt( _error ) \
821  do { \
822    uint32_t   level; \
823    \
824    level = sparc_disable_interrupts(); \
825    __asm__ volatile ( "mov  %0, %%g1 " : "=r" (level) : "0" (level) ); \
826    while (1); /* loop forever */ \
827  } while (0)
828
829/* end of Fatal Error manager macros */
830
831/* Bitfield handler macros */
832
833/*
834 *  The SPARC port uses the generic C algorithm for bitfield scan if the
835 *  CPU model does not have a scan instruction.
836 */
837
838#if ( SPARC_HAS_BITSCAN == 0 )
839#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
840#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
841#else
842#error "scan instruction not currently supported by RTEMS!!"
843#endif
844
845/* end of Bitfield handler macros */
846
847/* Priority handler handler macros */
848
849/*
850 *  The SPARC port uses the generic C algorithm for bitfield scan if the
851 *  CPU model does not have a scan instruction.
852 */
853
854#if ( SPARC_HAS_BITSCAN == 1 )
855#error "scan instruction not currently supported by RTEMS!!"
856#endif
857
858/* end of Priority handler macros */
859
860/* functions */
861
862/*
863 *  _CPU_Initialize
864 *
865 *  This routine performs CPU dependent initialization.
866 */
867
868void _CPU_Initialize(void);
869
870/*
871 *  _CPU_ISR_install_raw_handler
872 *
873 *  This routine installs new_handler to be directly called from the trap
874 *  table.
875 */
876
877void _CPU_ISR_install_raw_handler(
878  uint32_t    vector,
879  proc_ptr    new_handler,
880  proc_ptr   *old_handler
881);
882
883/*
884 *  _CPU_ISR_install_vector
885 *
886 *  This routine installs an interrupt vector.
887 */
888
889void _CPU_ISR_install_vector(
890  uint32_t    vector,
891  proc_ptr    new_handler,
892  proc_ptr   *old_handler
893);
894
895#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
896
897/*
898 *  _CPU_Thread_Idle_body
899 *
900 *  Some SPARC implementations have low power, sleep, or idle modes.  This
901 *  tries to take advantage of those models.
902 */
903
904void *_CPU_Thread_Idle_body( uintptr_t ignored );
905
906#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
907
908/*
909 *  _CPU_Context_switch
910 *
911 *  This routine switches from the run context to the heir context.
912 */
913
914void _CPU_Context_switch(
915  Context_Control  *run,
916  Context_Control  *heir
917);
918
919/*
920 *  _CPU_Context_restore
921 *
922 *  This routine is generally used only to restart self in an
923 *  efficient manner.
924 */
925
926void _CPU_Context_restore(
927  Context_Control *new_context
928) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
929
930#if defined(RTEMS_SMP)
931  /*
932   *  _CPU_Context_switch_to_first_task_smp
933   *
934   *  This routine is only used to switch to the first task on a
935   *  secondary core in an SMP configuration.  We do not need to
936   *  flush all the windows and, in fact, this can be dangerous
937   *  as they may or may not be initialized properly.
938   */
939  void _CPU_Context_switch_to_first_task_smp(
940    Context_Control *new_context
941  );
942
943  /* address space 1 is uncacheable */
944  #define SMP_CPU_SWAP( _address, _value, _previous ) \
945    do { \
946      register unsigned int _val = _value; \
947      asm volatile( \
948        "swapa [%2] %3, %0" : \
949        "=r" (_val) : \
950        "0" (_val), \
951        "r" (_address), \
952        "i" (1) \
953      ); \
954      _previous = _val; \
955    } while (0)
956
957#endif
958
959/*
960 *  _CPU_Context_save_fp
961 *
962 *  This routine saves the floating point context passed to it.
963 */
964
965void _CPU_Context_save_fp(
966  Context_Control_fp **fp_context_ptr
967);
968
969/*
970 *  _CPU_Context_restore_fp
971 *
972 *  This routine restores the floating point context passed to it.
973 */
974
975void _CPU_Context_restore_fp(
976  Context_Control_fp **fp_context_ptr
977);
978
979
980/*
981 *  CPU_swap_u32
982 *
983 *  The following routine swaps the endian format of an unsigned int.
984 *  It must be static because it is referenced indirectly.
985 *
986 *  This version will work on any processor, but if you come across a better
987 *  way for the SPARC PLEASE use it.  The most common way to swap a 32-bit
988 *  entity as shown below is not any more efficient on the SPARC.
989 *
990 *     swap least significant two bytes with 16-bit rotate
991 *     swap upper and lower 16-bits
992 *     swap most significant two bytes with 16-bit rotate
993 *
994 *  It is not obvious how the SPARC can do significantly better than the
995 *  generic code.  gcc 2.7.0 only generates about 12 instructions for the
996 *  following code at optimization level four (i.e. -O4).
997 */
998
999static inline uint32_t CPU_swap_u32(
1000  uint32_t value
1001)
1002{
1003  uint32_t   byte1, byte2, byte3, byte4, swapped;
1004
1005  byte4 = (value >> 24) & 0xff;
1006  byte3 = (value >> 16) & 0xff;
1007  byte2 = (value >> 8)  & 0xff;
1008  byte1 =  value        & 0xff;
1009
1010  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1011  return( swapped );
1012}
1013
1014#define CPU_swap_u16( value ) \
1015  (((value&0xff) << 8) | ((value >> 8)&0xff))
1016
1017#endif /* ASM */
1018
1019#ifdef __cplusplus
1020}
1021#endif
1022
1023#endif
Note: See TracBrowser for help on using the repository browser.