source: rtems/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h @ d8de6b9

5
Last change on this file since d8de6b9 was 2afb22b, checked in by Chris Johns <chrisj@…>, on 12/23/17 at 07:18:56

Remove make preinstall

A speciality of the RTEMS build system was the make preinstall step. It
copied header files from arbitrary locations into the build tree. The
header files were included via the -Bsome/build/tree/path GCC command
line option.

This has at least seven problems:

  • The make preinstall step itself needs time and disk space.
  • Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error.
  • There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult.
  • The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit.
  • An introduction of a new build system is difficult.
  • Include paths specified by the -B option are system headers. This may suppress warnings.
  • The parallel build had sporadic failures on some hosts.

This patch removes the make preinstall step. All installed header
files are moved to dedicated include directories in the source tree.
Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc,
etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g.
erc32, imx, qoriq, etc.

The new cpukit include directories are:

  • cpukit/include
  • cpukit/score/cpu/@RTEMS_CPU@/include
  • cpukit/libnetworking

The new BSP include directories are:

  • bsps/include
  • bsps/@RTEMS_CPU@/include
  • bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include

There are build tree include directories for generated files.

The include directory order favours the most general header file, e.g.
it is not possible to override general header files via the include path
order.

The "bootstrap -p" option was removed. The new "bootstrap -H" option
should be used to regenerate the "headers.am" files.

Update #3254.

  • Property mode set to 100644
File size: 36.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief PowerPC CPU Department Source
5 */
6
7/*
8 *  COPYRIGHT (c) 1989-2012.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  COPYRIGHT (c) 1995 i-cubed ltd.
12 *
13 *  To anyone who acknowledges that this file is provided "AS IS"
14 *  without any express or implied warranty:
15 *      permission to use, copy, modify, and distribute this file
16 *      for any purpose is hereby granted without fee, provided that
17 *      the above copyright notice and this notice appears in all
18 *      copies, and that the name of i-cubed limited not be used in
19 *      advertising or publicity pertaining to distribution of the
20 *      software without specific, written prior permission.
21 *      i-cubed limited makes no representations about the suitability
22 *      of this software for any purpose.
23 *
24 *  Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
25 *
26 *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
27 *
28 *  Copyright (c) 2010, 2017 embedded brains GmbH.
29 *
30 *  The license and distribution terms for this file may be
31 *  found in the file LICENSE in this distribution or at
32 *  http://www.rtems.org/license/LICENSE.
33 */
34
35#ifndef _RTEMS_SCORE_CPU_H
36#define _RTEMS_SCORE_CPU_H
37
38#include <rtems/score/types.h>
39#include <rtems/score/powerpc.h>
40#include <rtems/powerpc/registers.h>
41
42#ifndef ASM
43  #include <string.h> /* for memset() */
44#endif
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50/* conditional compilation parameters */
51
52/*
53 *  Does this port provide a CPU dependent IDLE task implementation?
54 *
55 *  If TRUE, then the routine _CPU_Thread_Idle_body
56 *  must be provided and is the default IDLE thread body instead of
57 *  _CPU_Thread_Idle_body.
58 *
59 *  If FALSE, then use the generic IDLE thread body if the BSP does
60 *  not provide one.
61 *
62 *  This is intended to allow for supporting processors which have
63 *  a low power or idle mode.  When the IDLE thread is executed, then
64 *  the CPU can be powered down.
65 *
66 *  The order of precedence for selecting the IDLE thread body is:
67 *
68 *    1.  BSP provided
69 *    2.  CPU dependent (if provided)
70 *    3.  generic (if no BSP and no CPU dependent)
71 */
72
73#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
74
75/*
76 *  Does the stack grow up (toward higher addresses) or down
77 *  (toward lower addresses)?
78 *
79 *  If TRUE, then the grows upward.
80 *  If FALSE, then the grows toward smaller addresses.
81 */
82
83#define CPU_STACK_GROWS_UP               FALSE
84
85#define CPU_CACHE_LINE_BYTES PPC_STRUCTURE_ALIGNMENT
86
87#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
88
89/*
90 *  Does the CPU have hardware floating point?
91 *
92 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
93 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
94 *
95 *  If there is a FP coprocessor such as the i387 or mc68881, then
96 *  the answer is TRUE.
97 *
98 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
99 *  It indicates whether or not this CPU model has FP support.  For
100 *  example, it would be possible to have an i386_nofp CPU model
101 *  which set this to false to indicate that you have an i386 without
102 *  an i387 and wish to leave floating point support out of RTEMS.
103 */
104
105#if ( PPC_HAS_FPU == 1 )
106#define CPU_HARDWARE_FP     TRUE
107#define CPU_SOFTWARE_FP     FALSE
108#else
109#define CPU_HARDWARE_FP     FALSE
110#define CPU_SOFTWARE_FP     FALSE
111#endif
112
113/*
114 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
115 *
116 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
117 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
118 *
119 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
120 *
121 *  PowerPC Note: It appears the GCC can implicitly generate FPU
122 *  and Altivec instructions when you least expect them.  So make
123 *  all tasks floating point.
124 */
125
126#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
127
128/*
129 *  Should the IDLE task have a floating point context?
130 *
131 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
132 *  and it has a floating point context which is switched in and out.
133 *  If FALSE, then the IDLE task does not have a floating point context.
134 *
135 *  Setting this to TRUE negatively impacts the time required to preempt
136 *  the IDLE task from an interrupt because the floating point context
137 *  must be saved as part of the preemption.
138 */
139
140#define CPU_IDLE_TASK_IS_FP      FALSE
141
142#define CPU_MAXIMUM_PROCESSORS 32
143
144/*
145 *  Processor defined structures required for cpukit/score.
146 */
147
148/*
149 * Contexts
150 *
151 *  Generally there are 2 types of context to save.
152 *     1. Interrupt registers to save
153 *     2. Task level registers to save
154 *
155 *  This means we have the following 3 context items:
156 *     1. task level context stuff::  Context_Control
157 *     2. floating point task stuff:: Context_Control_fp
158 *     3. special interrupt level context :: Context_Control_interrupt
159 *
160 *  On some processors, it is cost-effective to save only the callee
161 *  preserved registers during a task context switch.  This means
162 *  that the ISR code needs to save those registers which do not
163 *  persist across function calls.  It is not mandatory to make this
164 *  distinctions between the caller/callee saves registers for the
165 *  purpose of minimizing context saved during task switch and on interrupts.
166 *  If the cost of saving extra registers is minimal, simplicity is the
167 *  choice.  Save the same context on interrupt entry as for tasks in
168 *  this case.
169 *
170 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
171 *  care should be used in designing the context area.
172 *
173 *  On some CPUs with hardware floating point support, the Context_Control_fp
174 *  structure will not be used or it simply consist of an array of a
175 *  fixed number of bytes.   This is done when the floating point context
176 *  is dumped by a "FP save context" type instruction and the format
177 *  is not really defined by the CPU.  In this case, there is no need
178 *  to figure out the exact format -- only the size.  Of course, although
179 *  this is enough information for RTEMS, it is probably not enough for
180 *  a debugger such as gdb.  But that is another problem.
181 */
182
183#ifndef __SPE__
184  #define PPC_GPR_TYPE uintptr_t
185  #if defined(__powerpc64__)
186    #define PPC_GPR_SIZE 8
187    #define PPC_GPR_LOAD ld
188    #define PPC_GPR_STORE std
189  #else
190    #define PPC_GPR_SIZE 4
191    #define PPC_GPR_LOAD lwz
192    #define PPC_GPR_STORE stw
193  #endif
194#else
195  #define PPC_GPR_TYPE uint64_t
196  #define PPC_GPR_SIZE 8
197  #define PPC_GPR_LOAD evldd
198  #define PPC_GPR_STORE evstdd
199#endif
200
201#if defined(__powerpc64__)
202  #define PPC_REG_SIZE 8
203  #define PPC_REG_LOAD ld
204  #define PPC_REG_STORE std
205  #define PPC_REG_STORE_UPDATE stdu
206  #define PPC_REG_CMP cmpd
207#else
208  #define PPC_REG_SIZE 4
209  #define PPC_REG_LOAD lwz
210  #define PPC_REG_STORE stw
211  #define PPC_REG_STORE_UPDATE stwu
212  #define PPC_REG_CMP cmpw
213#endif
214
215#ifndef ASM
216
217/*
218 * Non-volatile context according to E500ABIUG, EABI and 32-bit TLS (according
219 * to "Power Architecture 32-bit Application Binary Interface Supplement 1.0 -
220 * Linux and Embedded")
221 */
222typedef struct {
223  uint32_t msr;
224  uint32_t cr;
225  uintptr_t gpr1;
226  uintptr_t lr;
227  PPC_GPR_TYPE gpr14;
228  PPC_GPR_TYPE gpr15;
229  PPC_GPR_TYPE gpr16;
230  PPC_GPR_TYPE gpr17;
231  PPC_GPR_TYPE gpr18;
232  PPC_GPR_TYPE gpr19;
233  PPC_GPR_TYPE gpr20;
234  PPC_GPR_TYPE gpr21;
235  PPC_GPR_TYPE gpr22;
236  PPC_GPR_TYPE gpr23;
237  PPC_GPR_TYPE gpr24;
238  PPC_GPR_TYPE gpr25;
239  PPC_GPR_TYPE gpr26;
240  PPC_GPR_TYPE gpr27;
241  PPC_GPR_TYPE gpr28;
242  PPC_GPR_TYPE gpr29;
243  PPC_GPR_TYPE gpr30;
244  PPC_GPR_TYPE gpr31;
245  uint32_t isr_dispatch_disable;
246  uint32_t reserved_for_alignment;
247  #if defined(PPC_MULTILIB_ALTIVEC)
248    uint8_t v20[16];
249    uint8_t v21[16];
250    uint8_t v22[16];
251    uint8_t v23[16];
252    uint8_t v24[16];
253    uint8_t v25[16];
254    uint8_t v26[16];
255    uint8_t v27[16];
256    uint8_t v28[16];
257    uint8_t v29[16];
258    uint8_t v30[16];
259    uint8_t v31[16];
260    uint32_t vrsave;
261  #elif defined(__ALTIVEC__)
262    /*
263     * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
264     * and padding to ensure cache-alignment.  Unfortunately, we can't verify
265     * the cache line size here in the cpukit but altivec support code will
266     * produce an error if this is ever different from 32 bytes.
267     *
268     * Note: it is the BSP/CPU-support's responsibility to save/restore
269     *       volatile vregs across interrupts and exceptions.
270     */
271    uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
272  #endif
273  #if defined(PPC_MULTILIB_FPU)
274    double f14;
275    double f15;
276    double f16;
277    double f17;
278    double f18;
279    double f19;
280    double f20;
281    double f21;
282    double f22;
283    double f23;
284    double f24;
285    double f25;
286    double f26;
287    double f27;
288    double f28;
289    double f29;
290    double f30;
291    double f31;
292  #endif
293  /*
294   * The following items are at the structure end, so that we can use dcbz for
295   * the previous items to optimize the context switch.  We must not set the
296   * following items to zero via the dcbz.
297   */
298  uintptr_t tp;
299  #if defined(RTEMS_SMP)
300    volatile uint32_t is_executing;
301  #endif
302} ppc_context;
303
304typedef struct {
305  uint8_t context [
306    PPC_DEFAULT_CACHE_LINE_SIZE
307      + sizeof(ppc_context)
308      + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
309        ? 0
310          : PPC_DEFAULT_CACHE_LINE_SIZE
311            - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
312  ];
313} Context_Control;
314
315static inline ppc_context *ppc_get_context( const Context_Control *context )
316{
317  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
318  uintptr_t mask = clsz - 1;
319  uintptr_t addr = (uintptr_t) context;
320
321  return (ppc_context *) ((addr & ~mask) + clsz);
322}
323
324#define _CPU_Context_Get_SP( _context ) \
325  ppc_get_context(_context)->gpr1
326
327#ifdef RTEMS_SMP
328  static inline bool _CPU_Context_Get_is_executing(
329    const Context_Control *context
330  )
331  {
332    return ppc_get_context(context)->is_executing;
333  }
334
335  static inline void _CPU_Context_Set_is_executing(
336    Context_Control *context,
337    bool is_executing
338  )
339  {
340    ppc_get_context(context)->is_executing = is_executing;
341  }
342#endif
343#endif /* ASM */
344
345#define PPC_CONTEXT_OFFSET_MSR (PPC_DEFAULT_CACHE_LINE_SIZE)
346#define PPC_CONTEXT_OFFSET_CR (PPC_DEFAULT_CACHE_LINE_SIZE + 4)
347#define PPC_CONTEXT_OFFSET_GPR1 (PPC_DEFAULT_CACHE_LINE_SIZE + 8)
348#define PPC_CONTEXT_OFFSET_LR (PPC_DEFAULT_CACHE_LINE_SIZE + PPC_REG_SIZE + 8)
349
350#define PPC_CONTEXT_GPR_OFFSET( gpr ) \
351  (((gpr) - 14) * PPC_GPR_SIZE + \
352    PPC_DEFAULT_CACHE_LINE_SIZE + 8 + 2 * PPC_REG_SIZE)
353
354#define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
355#define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
356#define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
357#define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
358#define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
359#define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
360#define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
361#define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
362#define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
363#define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
364#define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
365#define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
366#define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
367#define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
368#define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
369#define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
370#define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
371#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
372#define PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE PPC_CONTEXT_GPR_OFFSET( 32 )
373
374#ifdef PPC_MULTILIB_ALTIVEC
375  #define PPC_CONTEXT_OFFSET_V( v ) \
376    ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8)
377  #define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 )
378  #define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 )
379  #define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 )
380  #define PPC_CONTEXT_OFFSET_V23 PPC_CONTEXT_OFFSET_V( 23 )
381  #define PPC_CONTEXT_OFFSET_V24 PPC_CONTEXT_OFFSET_V( 24 )
382  #define PPC_CONTEXT_OFFSET_V25 PPC_CONTEXT_OFFSET_V( 25 )
383  #define PPC_CONTEXT_OFFSET_V26 PPC_CONTEXT_OFFSET_V( 26 )
384  #define PPC_CONTEXT_OFFSET_V27 PPC_CONTEXT_OFFSET_V( 27 )
385  #define PPC_CONTEXT_OFFSET_V28 PPC_CONTEXT_OFFSET_V( 28 )
386  #define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 )
387  #define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 )
388  #define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 )
389  #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 )
390  #define PPC_CONTEXT_OFFSET_F( f ) \
391    ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_VRSAVE + 8 )
392#else
393  #define PPC_CONTEXT_OFFSET_F( f ) \
394    ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 )
395#endif
396
397#ifdef PPC_MULTILIB_FPU
398  #define PPC_CONTEXT_OFFSET_F14 PPC_CONTEXT_OFFSET_F( 14 )
399  #define PPC_CONTEXT_OFFSET_F15 PPC_CONTEXT_OFFSET_F( 15 )
400  #define PPC_CONTEXT_OFFSET_F16 PPC_CONTEXT_OFFSET_F( 16 )
401  #define PPC_CONTEXT_OFFSET_F17 PPC_CONTEXT_OFFSET_F( 17 )
402  #define PPC_CONTEXT_OFFSET_F18 PPC_CONTEXT_OFFSET_F( 18 )
403  #define PPC_CONTEXT_OFFSET_F19 PPC_CONTEXT_OFFSET_F( 19 )
404  #define PPC_CONTEXT_OFFSET_F20 PPC_CONTEXT_OFFSET_F( 20 )
405  #define PPC_CONTEXT_OFFSET_F21 PPC_CONTEXT_OFFSET_F( 21 )
406  #define PPC_CONTEXT_OFFSET_F22 PPC_CONTEXT_OFFSET_F( 22 )
407  #define PPC_CONTEXT_OFFSET_F23 PPC_CONTEXT_OFFSET_F( 23 )
408  #define PPC_CONTEXT_OFFSET_F24 PPC_CONTEXT_OFFSET_F( 24 )
409  #define PPC_CONTEXT_OFFSET_F25 PPC_CONTEXT_OFFSET_F( 25 )
410  #define PPC_CONTEXT_OFFSET_F26 PPC_CONTEXT_OFFSET_F( 26 )
411  #define PPC_CONTEXT_OFFSET_F27 PPC_CONTEXT_OFFSET_F( 27 )
412  #define PPC_CONTEXT_OFFSET_F28 PPC_CONTEXT_OFFSET_F( 28 )
413  #define PPC_CONTEXT_OFFSET_F29 PPC_CONTEXT_OFFSET_F( 29 )
414  #define PPC_CONTEXT_OFFSET_F30 PPC_CONTEXT_OFFSET_F( 30 )
415  #define PPC_CONTEXT_OFFSET_F31 PPC_CONTEXT_OFFSET_F( 31 )
416#endif
417
418#if defined(PPC_MULTILIB_FPU)
419  #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 )
420#elif defined(PPC_MULTILIB_ALTIVEC)
421  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4)
422#elif defined(__ALTIVEC__)
423  #define PPC_CONTEXT_VOLATILE_SIZE \
424    (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8 \
425      + 16 * 12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE)
426#else
427  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8)
428#endif
429
430#define PPC_CONTEXT_OFFSET_TP PPC_CONTEXT_VOLATILE_SIZE
431
432#ifdef RTEMS_SMP
433  #define PPC_CONTEXT_OFFSET_IS_EXECUTING \
434    (PPC_CONTEXT_OFFSET_TP + PPC_REG_SIZE)
435#endif
436
437#ifndef ASM
438typedef struct {
439#if (PPC_HAS_FPU == 1)
440    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
441     * procedure calls.  However, this would mean that the interrupt
442     * frame had to hold f0-f13, and the fpscr.  And as the majority
443     * of tasks will not have an FP context, we will save the whole
444     * context here.
445     */
446#if (PPC_HAS_DOUBLE == 1)
447    double      f[32];
448    uint64_t    fpscr;
449#else
450    float       f[32];
451    uint32_t    fpscr;
452#endif
453#endif /* (PPC_HAS_FPU == 1) */
454} Context_Control_fp;
455
456#endif /* ASM */
457
458/*
459 *  Does the CPU follow the simple vectored interrupt model?
460 *
461 *  If TRUE, then RTEMS allocates the vector table it internally manages.
462 *  If FALSE, then the BSP is assumed to allocate and manage the vector
463 *  table
464 *
465 *  PowerPC Specific Information:
466 *
467 *  The PowerPC and x86 were the first to use the PIC interrupt model.
468 *  They do not use the simple vectored interrupt model.
469 */
470#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
471
472/*
473 *  Does RTEMS manage a dedicated interrupt stack in software?
474 *
475 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
476 *  If FALSE, nothing is done.
477 *
478 *  If the CPU supports a dedicated interrupt stack in hardware,
479 *  then it is generally the responsibility of the BSP to allocate it
480 *  and set it up.
481 *
482 *  If the CPU does not support a dedicated interrupt stack, then
483 *  the porter has two options: (1) execute interrupts on the
484 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
485 *  interrupt stack.
486 *
487 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
488 *
489 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
490 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
491 *  possible that both are FALSE for a particular CPU.  Although it
492 *  is unclear what that would imply about the interrupt processing
493 *  procedure on that CPU.
494 */
495
496#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
497
498/*
499 *  Does this CPU have hardware support for a dedicated interrupt stack?
500 *
501 *  If TRUE, then it must be installed during initialization.
502 *  If FALSE, then no installation is performed.
503 *
504 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
505 *
506 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
507 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
508 *  possible that both are FALSE for a particular CPU.  Although it
509 *  is unclear what that would imply about the interrupt processing
510 *  procedure on that CPU.
511 */
512
513#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
514
515/*
516 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
517 *
518 *  If TRUE, then the memory is allocated during initialization.
519 *  If FALSE, then the memory is allocated during initialization.
520 *
521 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
522 */
523
524#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
525
526/*
527 *  Does the RTEMS invoke the user's ISR with the vector number and
528 *  a pointer to the saved interrupt frame (1) or just the vector
529 *  number (0)?
530 */
531
532#define CPU_ISR_PASSES_FRAME_POINTER FALSE
533
534/*
535 *  Should the saving of the floating point registers be deferred
536 *  until a context switch is made to another different floating point
537 *  task?
538 *
539 *  If TRUE, then the floating point context will not be stored until
540 *  necessary.  It will remain in the floating point registers and not
541 *  disturned until another floating point task is switched to.
542 *
543 *  If FALSE, then the floating point context is saved when a floating
544 *  point task is switched out and restored when the next floating point
545 *  task is restored.  The state of the floating point registers between
546 *  those two operations is not specified.
547 *
548 *  If the floating point context does NOT have to be saved as part of
549 *  interrupt dispatching, then it should be safe to set this to TRUE.
550 *
551 *  Setting this flag to TRUE results in using a different algorithm
552 *  for deciding when to save and restore the floating point context.
553 *  The deferred FP switch algorithm minimizes the number of times
554 *  the FP context is saved and restored.  The FP context is not saved
555 *  until a context switch is made to another, different FP task.
556 *  Thus in a system with only one FP task, the FP context will never
557 *  be saved or restored.
558 *
559 *  Note, however that compilers may use floating point registers/
560 *  instructions for optimization or they may save/restore FP registers
561 *  on the stack. You must not use deferred switching in these cases
562 *  and on the PowerPC attempting to do so will raise a "FP unavailable"
563 *  exception.
564 */
565/*
566 *  ACB Note:  This could make debugging tricky..
567 */
568
569/* conservative setting (FALSE); probably doesn't affect performance too much */
570#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
571
572#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
573
574/*
575 *  Processor defined structures required for cpukit/score.
576 */
577
578#ifndef ASM
579
580/*
581 *  This variable is optional.  It is used on CPUs on which it is difficult
582 *  to generate an "uninitialized" FP context.  It is filled in by
583 *  _CPU_Initialize and copied into the task's FP context area during
584 *  _CPU_Context_Initialize.
585 */
586
587/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
588
589#endif /* ndef ASM */
590
591/*
592 *  This defines the number of levels and the mask used to pick those
593 *  bits out of a thread mode.
594 */
595
596#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
597#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
598
599/*
600 *  The size of the floating point context area.  On some CPUs this
601 *  will not be a "sizeof" because the format of the floating point
602 *  area is not defined -- only the size is.  This is usually on
603 *  CPUs with a "floating point save context" instruction.
604 */
605
606#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
607
608/*
609 * (Optional) # of bytes for libmisc/stackchk to check
610 * If not specifed, then it defaults to something reasonable
611 * for most architectures.
612 */
613
614#define CPU_STACK_CHECK_PATTERN_INITIALIZER \
615  { 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
616    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
617    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
618    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
619    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
620    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
621    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
622    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06 }
623
624/*
625 *  Amount of extra stack (above minimum stack size) required by
626 *  MPCI receive server thread.  Remember that in a multiprocessor
627 *  system this thread must exist and be able to process all directives.
628 */
629
630#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
631
632/*
633 *  This is defined if the port has a special way to report the ISR nesting
634 *  level.  Most ports maintain the variable _ISR_Nest_level. Note that
635 *  this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
636 *  being maintained (e.g. watchdog queues).
637 */
638
639#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
640
641/*
642 *  ISR handler macros
643 */
644
645/*
646 *  Disable all interrupts for an RTEMS critical section.  The previous
647 *  level is returned in _isr_cookie.
648 */
649
650#ifndef ASM
651
652RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
653{
654  return ( level & MSR_EE ) != 0;
655}
656
657static inline uint32_t   _CPU_ISR_Get_level( void )
658{
659  register unsigned int msr;
660  _CPU_MSR_GET(msr);
661  if (msr & MSR_EE) return 0;
662  else  return 1;
663}
664
665static inline void _CPU_ISR_Set_level( uint32_t   level )
666{
667  register unsigned int msr;
668  _CPU_MSR_GET(msr);
669  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
670    msr |= ppc_interrupt_get_disable_mask();
671  }
672  else {
673    msr &= ~ppc_interrupt_get_disable_mask();
674  }
675  _CPU_MSR_SET(msr);
676}
677
678#endif /* ASM */
679
680#define _CPU_Fatal_halt( _source, _error ) \
681  do { \
682    ppc_interrupt_disable(); \
683    __asm__ volatile ( \
684      "mr 3, %0\n" \
685      "mr 4, %1\n" \
686      "1:\n" \
687      "b 1b\n" \
688      : \
689      : "r" (_source), "r" (_error) \
690      : "memory" \
691    ); \
692  } while ( 0 )
693
694/*
695 *  Should be large enough to run all RTEMS tests.  This ensures
696 *  that a "reasonable" small application should not have any problems.
697 */
698
699#define CPU_STACK_MINIMUM_SIZE          (1024*8)
700
701#if defined(__powerpc64__)
702#define CPU_SIZEOF_POINTER 8
703#else
704#define CPU_SIZEOF_POINTER 4
705#endif
706
707/*
708 *  CPU's worst alignment requirement for data types on a byte boundary.  This
709 *  alignment does not take into account the requirements for the stack.
710 */
711
712#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
713
714/*
715 *  This number corresponds to the byte alignment requirement for the
716 *  heap handler.  This alignment requirement may be stricter than that
717 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
718 *  common for the heap to follow the same alignment requirement as
719 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
720 *  then this should be set to CPU_ALIGNMENT.
721 *
722 *  NOTE:  This does not have to be a power of 2.  It does have to
723 *         be greater or equal to than CPU_ALIGNMENT.
724 */
725
726#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
727
728/*
729 *  This number corresponds to the byte alignment requirement for memory
730 *  buffers allocated by the partition manager.  This alignment requirement
731 *  may be stricter than that for the data types alignment specified by
732 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
733 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
734 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
735 *
736 *  NOTE:  This does not have to be a power of 2.  It does have to
737 *         be greater or equal to than CPU_ALIGNMENT.
738 */
739
740#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
741
742/*
743 *  This number corresponds to the byte alignment requirement for the
744 *  stack.  This alignment requirement may be stricter than that for the
745 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
746 *  is strict enough for the stack, then this should be set to 0.
747 *
748 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
749 */
750
751#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
752
753#ifndef ASM
754/*  The following routine swaps the endian format of an unsigned int.
755 *  It must be static because it is referenced indirectly.
756 *
757 *  This version will work on any processor, but if there is a better
758 *  way for your CPU PLEASE use it.  The most common way to do this is to:
759 *
760 *     swap least significant two bytes with 16-bit rotate
761 *     swap upper and lower 16-bits
762 *     swap most significant two bytes with 16-bit rotate
763 *
764 *  Some CPUs have special instructions which swap a 32-bit quantity in
765 *  a single instruction (e.g. i486).  It is probably best to avoid
766 *  an "endian swapping control bit" in the CPU.  One good reason is
767 *  that interrupts would probably have to be disabled to ensure that
768 *  an interrupt does not try to access the same "chunk" with the wrong
769 *  endian.  Another good reason is that on some CPUs, the endian bit
770 *  endianness for ALL fetches -- both code and data -- so the code
771 *  will be fetched incorrectly.
772 */
773
774static inline uint32_t CPU_swap_u32(
775  uint32_t value
776)
777{
778  uint32_t   swapped;
779
780  __asm__ volatile("rlwimi %0,%1,8,24,31;"
781               "rlwimi %0,%1,24,16,23;"
782               "rlwimi %0,%1,8,8,15;"
783               "rlwimi %0,%1,24,0,7;" :
784               "=&r" ((swapped)) : "r" ((value)));
785
786  return( swapped );
787}
788
789#define CPU_swap_u16( value ) \
790  (((value&0xff) << 8) | ((value >> 8)&0xff))
791
792typedef uint32_t CPU_Counter_ticks;
793
794static inline CPU_Counter_ticks _CPU_Counter_read( void )
795{
796  CPU_Counter_ticks value;
797
798#if defined(__PPC_CPU_E6500__)
799  /* Use Alternate Time Base */
800  __asm__ volatile( "mfspr %0, 526" : "=r" (value) );
801#else
802  __asm__ volatile( "mfspr %0, 268" : "=r" (value) );
803#endif
804
805  return value;
806}
807
808static inline CPU_Counter_ticks _CPU_Counter_difference(
809  CPU_Counter_ticks second,
810  CPU_Counter_ticks first
811)
812{
813  return second - first;
814}
815
816#endif /* ASM */
817
818
819#ifndef ASM
820/* Context handler macros */
821
822/*
823 *  Initialize the context to a state suitable for starting a
824 *  task after a context restore operation.  Generally, this
825 *  involves:
826 *
827 *     - setting a starting address
828 *     - preparing the stack
829 *     - preparing the stack and frame pointers
830 *     - setting the proper interrupt level in the context
831 *     - initializing the floating point context
832 *
833 *  This routine generally does not set any unnecessary register
834 *  in the context.  The state of the "general data" registers is
835 *  undefined at task start time.
836 */
837
838void _CPU_Context_Initialize(
839  Context_Control  *the_context,
840  void             *stack_base,
841  size_t            size,
842  uint32_t          new_level,
843  void             *entry_point,
844  bool              is_fp,
845  void             *tls_area
846);
847
848/*
849 *  This routine is responsible for somehow restarting the currently
850 *  executing task.  If you are lucky, then all that is necessary
851 *  is restoring the context.  Otherwise, there will need to be
852 *  a special assembly routine which does something special in this
853 *  case.  Context_Restore should work most of the time.  It will
854 *  not work if restarting self conflicts with the stack frame
855 *  assumptions of restoring a context.
856 */
857
858#define _CPU_Context_Restart_self( _the_context ) \
859   _CPU_Context_restore( (_the_context) );
860
861/*
862 *  This routine initializes the FP context area passed to it to.
863 *  There are a few standard ways in which to initialize the
864 *  floating point context.  The code included for this macro assumes
865 *  that this is a CPU in which a "initial" FP context was saved into
866 *  _CPU_Null_fp_context and it simply copies it to the destination
867 *  context passed to it.
868 *
869 *  Other models include (1) not doing anything, and (2) putting
870 *  a "null FP status word" in the correct place in the FP context.
871 */
872
873#define _CPU_Context_Initialize_fp( _destination ) \
874  memset( *(_destination), 0, sizeof( **(_destination) ) )
875
876/* end of Context handler macros */
877#endif /* ASM */
878
879#ifndef ASM
880/* Bitfield handler macros */
881
882#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
883
884/*
885 *  This routine sets _output to the bit number of the first bit
886 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Word.
887 *  This type may be either 16 or 32 bits wide although only the 16
888 *  least significant bits will be used.
889 *
890 *  There are a number of variables in using a "find first bit" type
891 *  instruction.
892 *
893 *    (1) What happens when run on a value of zero?
894 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
895 *    (3) The numbering may be zero or one based.
896 *    (4) The "find first bit" instruction may search from MSB or LSB.
897 *
898 *  RTEMS guarantees that (1) will never happen so it is not a concern.
899 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
900 *  _CPU_Priority_Bits_index().  These three form a set of routines
901 *  which must logically operate together.  Bits in the _value are
902 *  set and cleared based on masks built by _CPU_Priority_mask().
903 *  The basic major and minor values calculated by _Priority_Major()
904 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
905 *  to properly range between the values returned by the "find first bit"
906 *  instruction.  This makes it possible for _Priority_Get_highest() to
907 *  calculate the major and directly index into the minor table.
908 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
909 *  is the first bit found.
910 *
911 *  This entire "find first bit" and mapping process depends heavily
912 *  on the manner in which a priority is broken into a major and minor
913 *  components with the major being the 4 MSB of a priority and minor
914 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
915 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
916 *  to the lowest priority.
917 *
918 *  If your CPU does not have a "find first bit" instruction, then
919 *  there are ways to make do without it.  Here are a handful of ways
920 *  to implement this in software:
921 *
922 *    - a series of 16 bit test instructions
923 *    - a "binary search using if's"
924 *    - _number = 0
925 *      if _value > 0x00ff
926 *        _value >>=8
927 *        _number = 8;
928 *
929 *      if _value > 0x0000f
930 *        _value >=8
931 *        _number += 4
932 *
933 *      _number += bit_set_table[ _value ]
934 *
935 *    where bit_set_table[ 16 ] has values which indicate the first
936 *      bit set
937 */
938
939#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
940  { \
941    __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
942                  "1" ((_value))); \
943    (_output) = (_output) - 16; \
944  }
945
946/* end of Bitfield handler macros */
947
948/*
949 *  This routine builds the mask which corresponds to the bit fields
950 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
951 *  for that routine.
952 */
953
954#define _CPU_Priority_Mask( _bit_number ) \
955  ( 0x8000u >> (_bit_number) )
956
957/*
958 *  This routine translates the bit numbers returned by
959 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
960 *  a major or minor component of a priority.  See the discussion
961 *  for that routine.
962 */
963
964#define _CPU_Priority_bits_index( _priority ) \
965  (_priority)
966
967/* end of Priority handler macros */
968#endif /* ASM */
969
970/* functions */
971
972#ifndef ASM
973
974/*
975 *  _CPU_Initialize
976 *
977 *  This routine performs CPU dependent initialization.
978 */
979
980void _CPU_Initialize(void);
981
982/*
983 *  _CPU_ISR_install_vector
984 *
985 *  This routine installs an interrupt vector.
986 */
987
988void _CPU_ISR_install_vector(
989  uint32_t    vector,
990  proc_ptr    new_handler,
991  proc_ptr   *old_handler
992);
993
994/*
995 *  _CPU_Context_switch
996 *
997 *  This routine switches from the run context to the heir context.
998 */
999
1000void _CPU_Context_switch(
1001  Context_Control  *run,
1002  Context_Control  *heir
1003);
1004
1005/*
1006 *  _CPU_Context_restore
1007 *
1008 *  This routine is generallu used only to restart self in an
1009 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1010 *
1011 *  NOTE: May be unnecessary to reload some registers.
1012 */
1013
1014void _CPU_Context_restore(
1015  Context_Control *new_context
1016) RTEMS_NO_RETURN;
1017
1018/*
1019 *  _CPU_Context_save_fp
1020 *
1021 *  This routine saves the floating point context passed to it.
1022 */
1023
1024void _CPU_Context_save_fp(
1025  Context_Control_fp **fp_context_ptr
1026);
1027
1028/*
1029 *  _CPU_Context_restore_fp
1030 *
1031 *  This routine restores the floating point context passed to it.
1032 */
1033
1034void _CPU_Context_restore_fp(
1035  Context_Control_fp **fp_context_ptr
1036);
1037
1038void _CPU_Context_volatile_clobber( uintptr_t pattern );
1039
1040void _CPU_Context_validate( uintptr_t pattern );
1041
1042#ifdef RTEMS_SMP
1043  uint32_t _CPU_SMP_Initialize( void );
1044
1045  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
1046
1047  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
1048
1049  void _CPU_SMP_Prepare_start_multitasking( void );
1050
1051  static inline uint32_t _CPU_SMP_Get_current_processor( void )
1052  {
1053    uint32_t pir;
1054
1055    /* Use Book E Processor ID Register (PIR) */
1056    __asm__ volatile (
1057      "mfspr %[pir], 286"
1058      : [pir] "=&r" (pir)
1059    );
1060
1061    return pir;
1062  }
1063
1064  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
1065
1066  static inline void _CPU_SMP_Processor_event_broadcast( void )
1067  {
1068    __asm__ volatile ( "" : : : "memory" );
1069  }
1070
1071  static inline void _CPU_SMP_Processor_event_receive( void )
1072  {
1073    __asm__ volatile ( "" : : : "memory" );
1074  }
1075#endif
1076
1077typedef struct {
1078  uintptr_t EXC_SRR0;
1079  uintptr_t EXC_SRR1;
1080  uint32_t _EXC_number;
1081  uint32_t RESERVED_FOR_ALIGNMENT_0;
1082  uint32_t EXC_CR;
1083  uint32_t EXC_XER;
1084  uintptr_t EXC_CTR;
1085  uintptr_t EXC_LR;
1086  uintptr_t RESERVED_FOR_ALIGNMENT_1;
1087  #ifdef __SPE__
1088    uint32_t EXC_SPEFSCR;
1089    uint64_t EXC_ACC;
1090  #endif
1091  PPC_GPR_TYPE GPR0;
1092  PPC_GPR_TYPE GPR1;
1093  PPC_GPR_TYPE GPR2;
1094  PPC_GPR_TYPE GPR3;
1095  PPC_GPR_TYPE GPR4;
1096  PPC_GPR_TYPE GPR5;
1097  PPC_GPR_TYPE GPR6;
1098  PPC_GPR_TYPE GPR7;
1099  PPC_GPR_TYPE GPR8;
1100  PPC_GPR_TYPE GPR9;
1101  PPC_GPR_TYPE GPR10;
1102  PPC_GPR_TYPE GPR11;
1103  PPC_GPR_TYPE GPR12;
1104  PPC_GPR_TYPE GPR13;
1105  PPC_GPR_TYPE GPR14;
1106  PPC_GPR_TYPE GPR15;
1107  PPC_GPR_TYPE GPR16;
1108  PPC_GPR_TYPE GPR17;
1109  PPC_GPR_TYPE GPR18;
1110  PPC_GPR_TYPE GPR19;
1111  PPC_GPR_TYPE GPR20;
1112  PPC_GPR_TYPE GPR21;
1113  PPC_GPR_TYPE GPR22;
1114  PPC_GPR_TYPE GPR23;
1115  PPC_GPR_TYPE GPR24;
1116  PPC_GPR_TYPE GPR25;
1117  PPC_GPR_TYPE GPR26;
1118  PPC_GPR_TYPE GPR27;
1119  PPC_GPR_TYPE GPR28;
1120  PPC_GPR_TYPE GPR29;
1121  PPC_GPR_TYPE GPR30;
1122  PPC_GPR_TYPE GPR31;
1123  uintptr_t RESERVED_FOR_ALIGNMENT_2;
1124  #ifdef PPC_MULTILIB_ALTIVEC
1125    uint32_t VRSAVE;
1126    uint32_t RESERVED_FOR_ALIGNMENT_3[3];
1127
1128    /* This field must take stvewx/lvewx requirements into account */
1129    uint32_t RESERVED_FOR_ALIGNMENT_4[3];
1130    uint32_t VSCR;
1131
1132    uint8_t V0[16];
1133    uint8_t V1[16];
1134    uint8_t V2[16];
1135    uint8_t V3[16];
1136    uint8_t V4[16];
1137    uint8_t V5[16];
1138    uint8_t V6[16];
1139    uint8_t V7[16];
1140    uint8_t V8[16];
1141    uint8_t V9[16];
1142    uint8_t V10[16];
1143    uint8_t V11[16];
1144    uint8_t V12[16];
1145    uint8_t V13[16];
1146    uint8_t V14[16];
1147    uint8_t V15[16];
1148    uint8_t V16[16];
1149    uint8_t V17[16];
1150    uint8_t V18[16];
1151    uint8_t V19[16];
1152    uint8_t V20[16];
1153    uint8_t V21[16];
1154    uint8_t V22[16];
1155    uint8_t V23[16];
1156    uint8_t V24[16];
1157    uint8_t V25[16];
1158    uint8_t V26[16];
1159    uint8_t V27[16];
1160    uint8_t V28[16];
1161    uint8_t V29[16];
1162    uint8_t V30[16];
1163    uint8_t V31[16];
1164  #endif
1165  #ifdef PPC_MULTILIB_FPU
1166    double F0;
1167    double F1;
1168    double F2;
1169    double F3;
1170    double F4;
1171    double F5;
1172    double F6;
1173    double F7;
1174    double F8;
1175    double F9;
1176    double F10;
1177    double F11;
1178    double F12;
1179    double F13;
1180    double F14;
1181    double F15;
1182    double F16;
1183    double F17;
1184    double F18;
1185    double F19;
1186    double F20;
1187    double F21;
1188    double F22;
1189    double F23;
1190    double F24;
1191    double F25;
1192    double F26;
1193    double F27;
1194    double F28;
1195    double F29;
1196    double F30;
1197    double F31;
1198    uint64_t FPSCR;
1199    uint64_t RESERVED_FOR_ALIGNMENT_5;
1200  #endif
1201} CPU_Exception_frame;
1202
1203void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1204
1205/*
1206 * _CPU_Initialize_altivec()
1207 *
1208 * Global altivec-related initialization.
1209 */
1210void
1211_CPU_Initialize_altivec(void);
1212
1213/*
1214 * _CPU_Context_switch_altivec
1215 *
1216 * This routine switches the altivec contexts passed to it.
1217 */
1218
1219void
1220_CPU_Context_switch_altivec(
1221  ppc_context *from,
1222  ppc_context *to
1223);
1224
1225/*
1226 * _CPU_Context_restore_altivec
1227 *
1228 * This routine restores the altivec context passed to it.
1229 */
1230
1231void
1232_CPU_Context_restore_altivec(
1233  ppc_context *ctxt
1234);
1235
1236/*
1237 * _CPU_Context_initialize_altivec
1238 *
1239 * This routine initializes the altivec context passed to it.
1240 */
1241
1242void
1243_CPU_Context_initialize_altivec(
1244  ppc_context *ctxt
1245);
1246
1247void _CPU_Fatal_error(
1248  uint32_t   _error
1249);
1250
1251#endif /* ASM */
1252
1253#ifdef __cplusplus
1254}
1255#endif
1256
1257#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.