source: rtems/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h @ 4adaed73

Last change on this file since 4adaed73 was 4adaed73, checked in by Sebastian Huber <sebastian.huber@…>, on 07/27/21 at 09:08:54

score: Remove processor event broadcast/receive

Remove _CPU_SMP_Processor_event_broadcast() and
_CPU_SMP_Processor_event_receive(). These functions are hard to use since they
are subject to the lost wake up problem.

  • Property mode set to 100644
File size: 33.2 KB
Line 
1/**
2 * @file
3 *
4 * @addtogroup RTEMSScoreCPUPowerPC
5 *
6 * @brief PowerPC CPU Department Source
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2012.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  COPYRIGHT (c) 1995 i-cubed ltd.
14 *
15 *  To anyone who acknowledges that this file is provided "AS IS"
16 *  without any express or implied warranty:
17 *      permission to use, copy, modify, and distribute this file
18 *      for any purpose is hereby granted without fee, provided that
19 *      the above copyright notice and this notice appears in all
20 *      copies, and that the name of i-cubed limited not be used in
21 *      advertising or publicity pertaining to distribution of the
22 *      software without specific, written prior permission.
23 *      i-cubed limited makes no representations about the suitability
24 *      of this software for any purpose.
25 *
26 *  Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
27 *
28 *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
29 *
30 *  Copyright (c) 2010, 2017 embedded brains GmbH.
31 *
32 *  The license and distribution terms for this file may be
33 *  found in the file LICENSE in this distribution or at
34 *  http://www.rtems.org/license/LICENSE.
35 */
36
37#ifndef _RTEMS_SCORE_CPU_H
38#define _RTEMS_SCORE_CPU_H
39
40#include <rtems/score/basedefs.h>
41#if defined(RTEMS_PARAVIRT)
42#include <rtems/score/paravirt.h>
43#endif
44#include <rtems/score/powerpc.h>
45#include <rtems/powerpc/registers.h>
46
47#ifndef ASM
48  #include <string.h> /* for memset() */
49#endif
50
51#ifdef __cplusplus
52extern "C" {
53#endif
54
55/* conditional compilation parameters */
56
57/*
58 *  Does the stack grow up (toward higher addresses) or down
59 *  (toward lower addresses)?
60 *
61 *  If TRUE, then the grows upward.
62 *  If FALSE, then the grows toward smaller addresses.
63 */
64
65#define CPU_STACK_GROWS_UP               FALSE
66
67#define CPU_CACHE_LINE_BYTES PPC_STRUCTURE_ALIGNMENT
68
69#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
70
71/*
72 *  Does the CPU have hardware floating point?
73 *
74 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
75 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
76 *
77 *  If there is a FP coprocessor such as the i387 or mc68881, then
78 *  the answer is TRUE.
79 *
80 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
81 *  It indicates whether or not this CPU model has FP support.  For
82 *  example, it would be possible to have an i386_nofp CPU model
83 *  which set this to false to indicate that you have an i386 without
84 *  an i387 and wish to leave floating point support out of RTEMS.
85 */
86
87#if ( PPC_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE
89#define CPU_SOFTWARE_FP     FALSE
90#else
91#define CPU_HARDWARE_FP     FALSE
92#define CPU_SOFTWARE_FP     FALSE
93#endif
94
95/*
96 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
97 *
98 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
99 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
100 *
101 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
102 *
103 *  PowerPC Note: It appears the GCC can implicitly generate FPU
104 *  and Altivec instructions when you least expect them.  So make
105 *  all tasks floating point.
106 */
107
108#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
109
110/*
111 *  Should the IDLE task have a floating point context?
112 *
113 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
114 *  and it has a floating point context which is switched in and out.
115 *  If FALSE, then the IDLE task does not have a floating point context.
116 *
117 *  Setting this to TRUE negatively impacts the time required to preempt
118 *  the IDLE task from an interrupt because the floating point context
119 *  must be saved as part of the preemption.
120 */
121
122#define CPU_IDLE_TASK_IS_FP      FALSE
123
124#define CPU_MAXIMUM_PROCESSORS 32
125
126/*
127 *  Processor defined structures required for cpukit/score.
128 */
129
130/*
131 * Contexts
132 *
133 *  Generally there are 2 types of context to save.
134 *     1. Interrupt registers to save
135 *     2. Task level registers to save
136 *
137 *  This means we have the following 3 context items:
138 *     1. task level context stuff::  Context_Control
139 *     2. floating point task stuff:: Context_Control_fp
140 *     3. special interrupt level context :: Context_Control_interrupt
141 *
142 *  On some processors, it is cost-effective to save only the callee
143 *  preserved registers during a task context switch.  This means
144 *  that the ISR code needs to save those registers which do not
145 *  persist across function calls.  It is not mandatory to make this
146 *  distinctions between the caller/callee saves registers for the
147 *  purpose of minimizing context saved during task switch and on interrupts.
148 *  If the cost of saving extra registers is minimal, simplicity is the
149 *  choice.  Save the same context on interrupt entry as for tasks in
150 *  this case.
151 *
152 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
153 *  care should be used in designing the context area.
154 *
155 *  On some CPUs with hardware floating point support, the Context_Control_fp
156 *  structure will not be used or it simply consist of an array of a
157 *  fixed number of bytes.   This is done when the floating point context
158 *  is dumped by a "FP save context" type instruction and the format
159 *  is not really defined by the CPU.  In this case, there is no need
160 *  to figure out the exact format -- only the size.  Of course, although
161 *  this is enough information for RTEMS, it is probably not enough for
162 *  a debugger such as gdb.  But that is another problem.
163 */
164
165#ifndef __SPE__
166  #define PPC_GPR_TYPE uintptr_t
167  #if defined(__powerpc64__)
168    #define PPC_GPR_SIZE 8
169    #define PPC_GPR_LOAD ld
170    #define PPC_GPR_STORE std
171  #else
172    #define PPC_GPR_SIZE 4
173    #define PPC_GPR_LOAD lwz
174    #define PPC_GPR_STORE stw
175  #endif
176#else
177  #define PPC_GPR_TYPE uint64_t
178  #define PPC_GPR_SIZE 8
179  #define PPC_GPR_LOAD evldd
180  #define PPC_GPR_STORE evstdd
181#endif
182
183#if defined(__powerpc64__)
184  #define PPC_REG_SIZE 8
185  #define PPC_REG_LOAD ld
186  #define PPC_REG_STORE std
187  #define PPC_REG_STORE_UPDATE stdu
188  #define PPC_REG_CMP cmpd
189#else
190  #define PPC_REG_SIZE 4
191  #define PPC_REG_LOAD lwz
192  #define PPC_REG_STORE stw
193  #define PPC_REG_STORE_UPDATE stwu
194  #define PPC_REG_CMP cmpw
195#endif
196
197#ifndef ASM
198
199/*
200 * Non-volatile context according to E500ABIUG, EABI and 32-bit TLS (according
201 * to "Power Architecture 32-bit Application Binary Interface Supplement 1.0 -
202 * Linux and Embedded")
203 */
204typedef struct {
205  uint32_t msr;
206  uint32_t cr;
207  uintptr_t gpr1;
208  uintptr_t lr;
209  PPC_GPR_TYPE gpr14;
210  PPC_GPR_TYPE gpr15;
211  PPC_GPR_TYPE gpr16;
212  PPC_GPR_TYPE gpr17;
213  PPC_GPR_TYPE gpr18;
214  PPC_GPR_TYPE gpr19;
215  PPC_GPR_TYPE gpr20;
216  PPC_GPR_TYPE gpr21;
217  PPC_GPR_TYPE gpr22;
218  PPC_GPR_TYPE gpr23;
219  PPC_GPR_TYPE gpr24;
220  PPC_GPR_TYPE gpr25;
221  PPC_GPR_TYPE gpr26;
222  PPC_GPR_TYPE gpr27;
223  PPC_GPR_TYPE gpr28;
224  PPC_GPR_TYPE gpr29;
225  PPC_GPR_TYPE gpr30;
226  PPC_GPR_TYPE gpr31;
227  uint32_t isr_dispatch_disable;
228  uint32_t reserved_for_alignment;
229  #if defined(PPC_MULTILIB_ALTIVEC)
230    uint8_t v20[16];
231    uint8_t v21[16];
232    uint8_t v22[16];
233    uint8_t v23[16];
234    uint8_t v24[16];
235    uint8_t v25[16];
236    uint8_t v26[16];
237    uint8_t v27[16];
238    uint8_t v28[16];
239    uint8_t v29[16];
240    uint8_t v30[16];
241    uint8_t v31[16];
242    uint32_t vrsave;
243  #elif defined(__ALTIVEC__)
244    /*
245     * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
246     * and padding to ensure cache-alignment.  Unfortunately, we can't verify
247     * the cache line size here in the cpukit but altivec support code will
248     * produce an error if this is ever different from 32 bytes.
249     *
250     * Note: it is the BSP/CPU-support's responsibility to save/restore
251     *       volatile vregs across interrupts and exceptions.
252     */
253    uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
254  #endif
255  #if defined(PPC_MULTILIB_FPU)
256    double f14;
257    double f15;
258    double f16;
259    double f17;
260    double f18;
261    double f19;
262    double f20;
263    double f21;
264    double f22;
265    double f23;
266    double f24;
267    double f25;
268    double f26;
269    double f27;
270    double f28;
271    double f29;
272    double f30;
273    double f31;
274  #endif
275  /*
276   * The following items are at the structure end, so that we can use dcbz for
277   * the previous items to optimize the context switch.  We must not set the
278   * following items to zero via the dcbz.
279   */
280  uintptr_t tp;
281  #if defined(RTEMS_SMP)
282    volatile uint32_t is_executing;
283  #endif
284} ppc_context;
285
286typedef struct {
287  uint8_t context [
288    PPC_DEFAULT_CACHE_LINE_SIZE
289      + sizeof(ppc_context)
290      + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
291        ? 0
292          : PPC_DEFAULT_CACHE_LINE_SIZE
293            - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
294  ];
295} Context_Control;
296
297static inline ppc_context *ppc_get_context( const Context_Control *context )
298{
299  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
300  uintptr_t mask = clsz - 1;
301  uintptr_t addr = (uintptr_t) context;
302
303  return (ppc_context *) ((addr & ~mask) + clsz);
304}
305
306#define _CPU_Context_Get_SP( _context ) \
307  ppc_get_context(_context)->gpr1
308
309#ifdef RTEMS_SMP
310  static inline bool _CPU_Context_Get_is_executing(
311    const Context_Control *context
312  )
313  {
314    return ppc_get_context(context)->is_executing;
315  }
316
317  static inline void _CPU_Context_Set_is_executing(
318    Context_Control *context,
319    bool is_executing
320  )
321  {
322    ppc_get_context(context)->is_executing = is_executing;
323  }
324#endif
325#endif /* ASM */
326
327#define PPC_CONTEXT_OFFSET_MSR (PPC_DEFAULT_CACHE_LINE_SIZE)
328#define PPC_CONTEXT_OFFSET_CR (PPC_DEFAULT_CACHE_LINE_SIZE + 4)
329#define PPC_CONTEXT_OFFSET_GPR1 (PPC_DEFAULT_CACHE_LINE_SIZE + 8)
330#define PPC_CONTEXT_OFFSET_LR (PPC_DEFAULT_CACHE_LINE_SIZE + PPC_REG_SIZE + 8)
331
332#define PPC_CONTEXT_GPR_OFFSET( gpr ) \
333  (((gpr) - 14) * PPC_GPR_SIZE + \
334    PPC_DEFAULT_CACHE_LINE_SIZE + 8 + 2 * PPC_REG_SIZE)
335
336#define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
337#define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
338#define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
339#define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
340#define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
341#define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
342#define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
343#define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
344#define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
345#define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
346#define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
347#define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
348#define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
349#define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
350#define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
351#define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
352#define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
353#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
354#define PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE PPC_CONTEXT_GPR_OFFSET( 32 )
355
356#ifdef PPC_MULTILIB_ALTIVEC
357  #define PPC_CONTEXT_OFFSET_V( v ) \
358    ( ( ( v ) - 20 ) * 16 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8)
359  #define PPC_CONTEXT_OFFSET_V20 PPC_CONTEXT_OFFSET_V( 20 )
360  #define PPC_CONTEXT_OFFSET_V21 PPC_CONTEXT_OFFSET_V( 21 )
361  #define PPC_CONTEXT_OFFSET_V22 PPC_CONTEXT_OFFSET_V( 22 )
362  #define PPC_CONTEXT_OFFSET_V23 PPC_CONTEXT_OFFSET_V( 23 )
363  #define PPC_CONTEXT_OFFSET_V24 PPC_CONTEXT_OFFSET_V( 24 )
364  #define PPC_CONTEXT_OFFSET_V25 PPC_CONTEXT_OFFSET_V( 25 )
365  #define PPC_CONTEXT_OFFSET_V26 PPC_CONTEXT_OFFSET_V( 26 )
366  #define PPC_CONTEXT_OFFSET_V27 PPC_CONTEXT_OFFSET_V( 27 )
367  #define PPC_CONTEXT_OFFSET_V28 PPC_CONTEXT_OFFSET_V( 28 )
368  #define PPC_CONTEXT_OFFSET_V29 PPC_CONTEXT_OFFSET_V( 29 )
369  #define PPC_CONTEXT_OFFSET_V30 PPC_CONTEXT_OFFSET_V( 30 )
370  #define PPC_CONTEXT_OFFSET_V31 PPC_CONTEXT_OFFSET_V( 31 )
371  #define PPC_CONTEXT_OFFSET_VRSAVE PPC_CONTEXT_OFFSET_V( 32 )
372  #define PPC_CONTEXT_OFFSET_F( f ) \
373    ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_VRSAVE + 8 )
374#else
375  #define PPC_CONTEXT_OFFSET_F( f ) \
376    ( ( ( f ) - 14 ) * 8 + PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE + 8 )
377#endif
378
379#ifdef PPC_MULTILIB_FPU
380  #define PPC_CONTEXT_OFFSET_F14 PPC_CONTEXT_OFFSET_F( 14 )
381  #define PPC_CONTEXT_OFFSET_F15 PPC_CONTEXT_OFFSET_F( 15 )
382  #define PPC_CONTEXT_OFFSET_F16 PPC_CONTEXT_OFFSET_F( 16 )
383  #define PPC_CONTEXT_OFFSET_F17 PPC_CONTEXT_OFFSET_F( 17 )
384  #define PPC_CONTEXT_OFFSET_F18 PPC_CONTEXT_OFFSET_F( 18 )
385  #define PPC_CONTEXT_OFFSET_F19 PPC_CONTEXT_OFFSET_F( 19 )
386  #define PPC_CONTEXT_OFFSET_F20 PPC_CONTEXT_OFFSET_F( 20 )
387  #define PPC_CONTEXT_OFFSET_F21 PPC_CONTEXT_OFFSET_F( 21 )
388  #define PPC_CONTEXT_OFFSET_F22 PPC_CONTEXT_OFFSET_F( 22 )
389  #define PPC_CONTEXT_OFFSET_F23 PPC_CONTEXT_OFFSET_F( 23 )
390  #define PPC_CONTEXT_OFFSET_F24 PPC_CONTEXT_OFFSET_F( 24 )
391  #define PPC_CONTEXT_OFFSET_F25 PPC_CONTEXT_OFFSET_F( 25 )
392  #define PPC_CONTEXT_OFFSET_F26 PPC_CONTEXT_OFFSET_F( 26 )
393  #define PPC_CONTEXT_OFFSET_F27 PPC_CONTEXT_OFFSET_F( 27 )
394  #define PPC_CONTEXT_OFFSET_F28 PPC_CONTEXT_OFFSET_F( 28 )
395  #define PPC_CONTEXT_OFFSET_F29 PPC_CONTEXT_OFFSET_F( 29 )
396  #define PPC_CONTEXT_OFFSET_F30 PPC_CONTEXT_OFFSET_F( 30 )
397  #define PPC_CONTEXT_OFFSET_F31 PPC_CONTEXT_OFFSET_F( 31 )
398#endif
399
400#if defined(PPC_MULTILIB_FPU)
401  #define PPC_CONTEXT_VOLATILE_SIZE PPC_CONTEXT_OFFSET_F( 32 )
402#elif defined(PPC_MULTILIB_ALTIVEC)
403  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_OFFSET_VRSAVE + 4)
404#elif defined(__ALTIVEC__)
405  #define PPC_CONTEXT_VOLATILE_SIZE \
406    (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8 \
407      + 16 * 12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE)
408#else
409  #define PPC_CONTEXT_VOLATILE_SIZE (PPC_CONTEXT_GPR_OFFSET( 32 ) + 8)
410#endif
411
412#define PPC_CONTEXT_OFFSET_TP PPC_CONTEXT_VOLATILE_SIZE
413
414#ifdef RTEMS_SMP
415  #define PPC_CONTEXT_OFFSET_IS_EXECUTING \
416    (PPC_CONTEXT_OFFSET_TP + PPC_REG_SIZE)
417#endif
418
419#ifndef ASM
420typedef struct {
421#if (PPC_HAS_FPU == 1)
422    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
423     * procedure calls.  However, this would mean that the interrupt
424     * frame had to hold f0-f13, and the fpscr.  And as the majority
425     * of tasks will not have an FP context, we will save the whole
426     * context here.
427     */
428#if (PPC_HAS_DOUBLE == 1)
429    double      f[32];
430    uint64_t    fpscr;
431#else
432    float       f[32];
433    uint32_t    fpscr;
434#endif
435#endif /* (PPC_HAS_FPU == 1) */
436} Context_Control_fp;
437
438#endif /* ASM */
439
440/*
441 *  Does the CPU follow the simple vectored interrupt model?
442 *
443 *  If TRUE, then RTEMS allocates the vector table it internally manages.
444 *  If FALSE, then the BSP is assumed to allocate and manage the vector
445 *  table
446 *
447 *  PowerPC Specific Information:
448 *
449 *  The PowerPC and x86 were the first to use the PIC interrupt model.
450 *  They do not use the simple vectored interrupt model.
451 */
452#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
453
454/*
455 *  Does the RTEMS invoke the user's ISR with the vector number and
456 *  a pointer to the saved interrupt frame (1) or just the vector
457 *  number (0)?
458 */
459
460#define CPU_ISR_PASSES_FRAME_POINTER FALSE
461
462/*
463 *  Should the saving of the floating point registers be deferred
464 *  until a context switch is made to another different floating point
465 *  task?
466 *
467 *  If TRUE, then the floating point context will not be stored until
468 *  necessary.  It will remain in the floating point registers and not
469 *  disturned until another floating point task is switched to.
470 *
471 *  If FALSE, then the floating point context is saved when a floating
472 *  point task is switched out and restored when the next floating point
473 *  task is restored.  The state of the floating point registers between
474 *  those two operations is not specified.
475 *
476 *  If the floating point context does NOT have to be saved as part of
477 *  interrupt dispatching, then it should be safe to set this to TRUE.
478 *
479 *  Setting this flag to TRUE results in using a different algorithm
480 *  for deciding when to save and restore the floating point context.
481 *  The deferred FP switch algorithm minimizes the number of times
482 *  the FP context is saved and restored.  The FP context is not saved
483 *  until a context switch is made to another, different FP task.
484 *  Thus in a system with only one FP task, the FP context will never
485 *  be saved or restored.
486 *
487 *  Note, however that compilers may use floating point registers/
488 *  instructions for optimization or they may save/restore FP registers
489 *  on the stack. You must not use deferred switching in these cases
490 *  and on the PowerPC attempting to do so will raise a "FP unavailable"
491 *  exception.
492 */
493/*
494 *  ACB Note:  This could make debugging tricky..
495 */
496
497/* conservative setting (FALSE); probably doesn't affect performance too much */
498#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
499
500#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
501
502/*
503 *  Processor defined structures required for cpukit/score.
504 */
505
506#ifndef ASM
507
508/*
509 *  This variable is optional.  It is used on CPUs on which it is difficult
510 *  to generate an "uninitialized" FP context.  It is filled in by
511 *  _CPU_Initialize and copied into the task's FP context area during
512 *  _CPU_Context_Initialize.
513 */
514
515/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
516
517#endif /* ndef ASM */
518
519/*
520 *  This defines the number of levels and the mask used to pick those
521 *  bits out of a thread mode.
522 */
523
524#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
525
526/*
527 *  The size of the floating point context area.  On some CPUs this
528 *  will not be a "sizeof" because the format of the floating point
529 *  area is not defined -- only the size is.  This is usually on
530 *  CPUs with a "floating point save context" instruction.
531 */
532
533#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
534
535/*
536 * (Optional) # of bytes for libmisc/stackchk to check
537 * If not specifed, then it defaults to something reasonable
538 * for most architectures.
539 */
540
541#define CPU_STACK_CHECK_PATTERN_INITIALIZER \
542  { 0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
543    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
544    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
545    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
546    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
547    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
548    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06, \
549    0xFEEDF00D, 0x0BAD0D06, 0xDEADF00D, 0x600D0D06 }
550
551/*
552 *  Amount of extra stack (above minimum stack size) required by
553 *  MPCI receive server thread.  Remember that in a multiprocessor
554 *  system this thread must exist and be able to process all directives.
555 */
556
557#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
558
559/*
560 *  This is defined if the port has a special way to report the ISR nesting
561 *  level.  Most ports maintain the variable _ISR_Nest_level. Note that
562 *  this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
563 *  being maintained (e.g. watchdog queues).
564 */
565
566#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
567
568/*
569 *  ISR handler macros
570 */
571
572/*
573 *  Disable all interrupts for an RTEMS critical section.  The previous
574 *  level is returned in _isr_cookie.
575 */
576
577#ifndef ASM
578
579RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
580{
581  return ( level & MSR_EE ) != 0;
582}
583
584#if !defined(PPC_DISABLE_INLINE_ISR_DISABLE_ENABLE)
585
586static inline uint32_t   _CPU_ISR_Get_level( void )
587{
588  uint32_t msr;
589  _CPU_MSR_GET(msr);
590  if (msr & MSR_EE) return 0;
591  else  return 1;
592}
593
594static inline void _CPU_ISR_Set_level( uint32_t   level )
595{
596  uint32_t msr;
597  _CPU_MSR_GET(msr);
598  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
599    msr |= ppc_interrupt_get_disable_mask();
600  }
601  else {
602    msr &= ~ppc_interrupt_get_disable_mask();
603  }
604  _CPU_MSR_SET(msr);
605}
606#else
607/* disable, enable, etc. are in registers.h */
608uint32_t ppc_get_interrupt_level( void );
609void ppc_set_interrupt_level( uint32_t level );
610#define _CPU_ISR_Get_level( _new_level ) ppc_get_interrupt_level()
611#define _CPU_ISR_Set_level( _new_level ) ppc_set_interrupt_level(_new_level)
612#endif
613
614#endif /* ASM */
615
616#define _CPU_Fatal_halt( _source, _error ) \
617  do { \
618    ppc_interrupt_disable(); \
619    __asm__ volatile ( \
620      "mr 3, %0\n" \
621      "mr 4, %1\n" \
622      "1:\n" \
623      "b 1b\n" \
624      : \
625      : "r" (_source), "r" (_error) \
626      : "memory" \
627    ); \
628  } while ( 0 )
629
630/*
631 *  Should be large enough to run all RTEMS tests.  This ensures
632 *  that a "reasonable" small application should not have any problems.
633 */
634
635#define CPU_STACK_MINIMUM_SIZE          (1024*8)
636
637#if defined(__powerpc64__)
638#define CPU_SIZEOF_POINTER 8
639#else
640#define CPU_SIZEOF_POINTER 4
641#endif
642
643/*
644 *  CPU's worst alignment requirement for data types on a byte boundary.  This
645 *  alignment does not take into account the requirements for the stack.
646 */
647
648#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
649
650/*
651 *  This number corresponds to the byte alignment requirement for the
652 *  heap handler.  This alignment requirement may be stricter than that
653 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
654 *  common for the heap to follow the same alignment requirement as
655 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
656 *  then this should be set to CPU_ALIGNMENT.
657 *
658 *  NOTE:  This does not have to be a power of 2.  It does have to
659 *         be greater or equal to than CPU_ALIGNMENT.
660 */
661
662#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
663
664#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
665
666#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
667
668#ifndef ASM
669/*  The following routine swaps the endian format of an unsigned int.
670 *  It must be static because it is referenced indirectly.
671 *
672 *  This version will work on any processor, but if there is a better
673 *  way for your CPU PLEASE use it.  The most common way to do this is to:
674 *
675 *     swap least significant two bytes with 16-bit rotate
676 *     swap upper and lower 16-bits
677 *     swap most significant two bytes with 16-bit rotate
678 *
679 *  Some CPUs have special instructions which swap a 32-bit quantity in
680 *  a single instruction (e.g. i486).  It is probably best to avoid
681 *  an "endian swapping control bit" in the CPU.  One good reason is
682 *  that interrupts would probably have to be disabled to ensure that
683 *  an interrupt does not try to access the same "chunk" with the wrong
684 *  endian.  Another good reason is that on some CPUs, the endian bit
685 *  endianness for ALL fetches -- both code and data -- so the code
686 *  will be fetched incorrectly.
687 */
688
689static inline uint32_t CPU_swap_u32(
690  uint32_t value
691)
692{
693  uint32_t   swapped;
694
695  __asm__ volatile("rlwimi %0,%1,8,24,31;"
696               "rlwimi %0,%1,24,16,23;"
697               "rlwimi %0,%1,8,8,15;"
698               "rlwimi %0,%1,24,0,7;" :
699               "=&r" ((swapped)) : "r" ((value)));
700
701  return( swapped );
702}
703
704#define CPU_swap_u16( value ) \
705  (((value&0xff) << 8) | ((value >> 8)&0xff))
706
707typedef uint32_t CPU_Counter_ticks;
708
709uint32_t _CPU_Counter_frequency( void );
710
711static inline CPU_Counter_ticks _CPU_Counter_read( void )
712{
713  CPU_Counter_ticks value;
714
715#if defined(__PPC_CPU_E6500__)
716  /* Use Alternate Time Base */
717  __asm__ volatile( "mfspr %0, 526" : "=r" (value) );
718#elif defined(mpc860)
719  __asm__ volatile( "mftb %0" : "=r" (value) );
720#else
721  __asm__ volatile( "mfspr %0, 268" : "=r" (value) );
722#endif
723
724  return value;
725}
726
727static inline CPU_Counter_ticks _CPU_Counter_difference(
728  CPU_Counter_ticks second,
729  CPU_Counter_ticks first
730)
731{
732  return second - first;
733}
734
735#endif /* ASM */
736
737
738#ifndef ASM
739/* Context handler macros */
740
741/*
742 *  Initialize the context to a state suitable for starting a
743 *  task after a context restore operation.  Generally, this
744 *  involves:
745 *
746 *     - setting a starting address
747 *     - preparing the stack
748 *     - preparing the stack and frame pointers
749 *     - setting the proper interrupt level in the context
750 *     - initializing the floating point context
751 *
752 *  This routine generally does not set any unnecessary register
753 *  in the context.  The state of the "general data" registers is
754 *  undefined at task start time.
755 */
756
757void _CPU_Context_Initialize(
758  Context_Control  *the_context,
759  void             *stack_base,
760  size_t            size,
761  uint32_t          new_level,
762  void             *entry_point,
763  bool              is_fp,
764  void             *tls_area
765);
766
767/*
768 *  This routine is responsible for somehow restarting the currently
769 *  executing task.  If you are lucky, then all that is necessary
770 *  is restoring the context.  Otherwise, there will need to be
771 *  a special assembly routine which does something special in this
772 *  case.  Context_Restore should work most of the time.  It will
773 *  not work if restarting self conflicts with the stack frame
774 *  assumptions of restoring a context.
775 */
776
777#define _CPU_Context_Restart_self( _the_context ) \
778   _CPU_Context_restore( (_the_context) );
779
780/*
781 *  This routine initializes the FP context area passed to it to.
782 *  There are a few standard ways in which to initialize the
783 *  floating point context.  The code included for this macro assumes
784 *  that this is a CPU in which a "initial" FP context was saved into
785 *  _CPU_Null_fp_context and it simply copies it to the destination
786 *  context passed to it.
787 *
788 *  Other models include (1) not doing anything, and (2) putting
789 *  a "null FP status word" in the correct place in the FP context.
790 */
791
792#define _CPU_Context_Initialize_fp( _destination ) \
793  memset( *(_destination), 0, sizeof( **(_destination) ) )
794
795/* end of Context handler macros */
796#endif /* ASM */
797
798#ifndef ASM
799
800#define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE
801
802/* Bitfield handler macros */
803
804#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
805
806/*
807 *  This routine sets _output to the bit number of the first bit
808 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Word.
809 *  This type may be either 16 or 32 bits wide although only the 16
810 *  least significant bits will be used.
811 *
812 *  There are a number of variables in using a "find first bit" type
813 *  instruction.
814 *
815 *    (1) What happens when run on a value of zero?
816 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
817 *    (3) The numbering may be zero or one based.
818 *    (4) The "find first bit" instruction may search from MSB or LSB.
819 *
820 *  RTEMS guarantees that (1) will never happen so it is not a concern.
821 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
822 *  _CPU_Priority_Bits_index().  These three form a set of routines
823 *  which must logically operate together.  Bits in the _value are
824 *  set and cleared based on masks built by _CPU_Priority_mask().
825 *  The basic major and minor values calculated by _Priority_Major()
826 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
827 *  to properly range between the values returned by the "find first bit"
828 *  instruction.  This makes it possible for _Priority_Get_highest() to
829 *  calculate the major and directly index into the minor table.
830 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
831 *  is the first bit found.
832 *
833 *  This entire "find first bit" and mapping process depends heavily
834 *  on the manner in which a priority is broken into a major and minor
835 *  components with the major being the 4 MSB of a priority and minor
836 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
837 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
838 *  to the lowest priority.
839 *
840 *  If your CPU does not have a "find first bit" instruction, then
841 *  there are ways to make do without it.  Here are a handful of ways
842 *  to implement this in software:
843 *
844 *    - a series of 16 bit test instructions
845 *    - a "binary search using if's"
846 *    - _number = 0
847 *      if _value > 0x00ff
848 *        _value >>=8
849 *        _number = 8;
850 *
851 *      if _value > 0x0000f
852 *        _value >=8
853 *        _number += 4
854 *
855 *      _number += bit_set_table[ _value ]
856 *
857 *    where bit_set_table[ 16 ] has values which indicate the first
858 *      bit set
859 */
860
861#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
862  { \
863    __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
864                  "1" ((_value))); \
865    (_output) = (_output) - 16; \
866  }
867
868/* end of Bitfield handler macros */
869
870/*
871 *  This routine builds the mask which corresponds to the bit fields
872 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
873 *  for that routine.
874 */
875
876#define _CPU_Priority_Mask( _bit_number ) \
877  ( 0x8000u >> (_bit_number) )
878
879/*
880 *  This routine translates the bit numbers returned by
881 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
882 *  a major or minor component of a priority.  See the discussion
883 *  for that routine.
884 */
885
886#define _CPU_Priority_bits_index( _priority ) \
887  (_priority)
888
889/* end of Priority handler macros */
890#endif /* ASM */
891
892/* functions */
893
894#ifndef ASM
895
896/*
897 *  _CPU_Initialize
898 *
899 *  This routine performs CPU dependent initialization.
900 */
901
902void _CPU_Initialize(void);
903
904void *_CPU_Thread_Idle_body( uintptr_t ignored );
905
906/*
907 *  _CPU_Context_switch
908 *
909 *  This routine switches from the run context to the heir context.
910 */
911
912void _CPU_Context_switch(
913  Context_Control  *run,
914  Context_Control  *heir
915);
916
917RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
918  Context_Control *executing,
919  Context_Control *heir
920);
921
922/*
923 *  _CPU_Context_restore
924 *
925 *  This routine is generallu used only to restart self in an
926 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
927 *
928 *  NOTE: May be unnecessary to reload some registers.
929 */
930
931RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
932
933/*
934 *  _CPU_Context_save_fp
935 *
936 *  This routine saves the floating point context passed to it.
937 */
938
939void _CPU_Context_save_fp(
940  Context_Control_fp **fp_context_ptr
941);
942
943/*
944 *  _CPU_Context_restore_fp
945 *
946 *  This routine restores the floating point context passed to it.
947 */
948
949void _CPU_Context_restore_fp(
950  Context_Control_fp **fp_context_ptr
951);
952
953#ifdef RTEMS_SMP
954  uint32_t _CPU_SMP_Initialize( void );
955
956  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
957
958  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
959
960  void _CPU_SMP_Prepare_start_multitasking( void );
961
962  static inline uint32_t _CPU_SMP_Get_current_processor( void )
963  {
964    uint32_t pir;
965
966    /* Use Book E Processor ID Register (PIR) */
967    __asm__ volatile (
968      "mfspr %[pir], 286"
969      : [pir] "=&r" (pir)
970    );
971
972    return pir;
973  }
974
975  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
976#endif
977
978typedef struct {
979  uintptr_t EXC_SRR0;
980  uintptr_t EXC_SRR1;
981  uint32_t _EXC_number;
982  uint32_t RESERVED_FOR_ALIGNMENT_0;
983  uint32_t EXC_CR;
984  uint32_t EXC_XER;
985  uintptr_t EXC_CTR;
986  uintptr_t EXC_LR;
987  uintptr_t RESERVED_FOR_ALIGNMENT_1;
988  #ifdef __SPE__
989    uint32_t EXC_SPEFSCR;
990    uint64_t EXC_ACC;
991  #endif
992  PPC_GPR_TYPE GPR0;
993  PPC_GPR_TYPE GPR1;
994  PPC_GPR_TYPE GPR2;
995  PPC_GPR_TYPE GPR3;
996  PPC_GPR_TYPE GPR4;
997  PPC_GPR_TYPE GPR5;
998  PPC_GPR_TYPE GPR6;
999  PPC_GPR_TYPE GPR7;
1000  PPC_GPR_TYPE GPR8;
1001  PPC_GPR_TYPE GPR9;
1002  PPC_GPR_TYPE GPR10;
1003  PPC_GPR_TYPE GPR11;
1004  PPC_GPR_TYPE GPR12;
1005  PPC_GPR_TYPE GPR13;
1006  PPC_GPR_TYPE GPR14;
1007  PPC_GPR_TYPE GPR15;
1008  PPC_GPR_TYPE GPR16;
1009  PPC_GPR_TYPE GPR17;
1010  PPC_GPR_TYPE GPR18;
1011  PPC_GPR_TYPE GPR19;
1012  PPC_GPR_TYPE GPR20;
1013  PPC_GPR_TYPE GPR21;
1014  PPC_GPR_TYPE GPR22;
1015  PPC_GPR_TYPE GPR23;
1016  PPC_GPR_TYPE GPR24;
1017  PPC_GPR_TYPE GPR25;
1018  PPC_GPR_TYPE GPR26;
1019  PPC_GPR_TYPE GPR27;
1020  PPC_GPR_TYPE GPR28;
1021  PPC_GPR_TYPE GPR29;
1022  PPC_GPR_TYPE GPR30;
1023  PPC_GPR_TYPE GPR31;
1024  uintptr_t RESERVED_FOR_ALIGNMENT_2;
1025  #ifdef PPC_MULTILIB_ALTIVEC
1026    uint32_t VRSAVE;
1027    uint32_t RESERVED_FOR_ALIGNMENT_3[3];
1028
1029    /* This field must take stvewx/lvewx requirements into account */
1030    uint32_t RESERVED_FOR_ALIGNMENT_4[3];
1031    uint32_t VSCR;
1032
1033    uint8_t V0[16];
1034    uint8_t V1[16];
1035    uint8_t V2[16];
1036    uint8_t V3[16];
1037    uint8_t V4[16];
1038    uint8_t V5[16];
1039    uint8_t V6[16];
1040    uint8_t V7[16];
1041    uint8_t V8[16];
1042    uint8_t V9[16];
1043    uint8_t V10[16];
1044    uint8_t V11[16];
1045    uint8_t V12[16];
1046    uint8_t V13[16];
1047    uint8_t V14[16];
1048    uint8_t V15[16];
1049    uint8_t V16[16];
1050    uint8_t V17[16];
1051    uint8_t V18[16];
1052    uint8_t V19[16];
1053    uint8_t V20[16];
1054    uint8_t V21[16];
1055    uint8_t V22[16];
1056    uint8_t V23[16];
1057    uint8_t V24[16];
1058    uint8_t V25[16];
1059    uint8_t V26[16];
1060    uint8_t V27[16];
1061    uint8_t V28[16];
1062    uint8_t V29[16];
1063    uint8_t V30[16];
1064    uint8_t V31[16];
1065  #endif
1066  #ifdef PPC_MULTILIB_FPU
1067    double F0;
1068    double F1;
1069    double F2;
1070    double F3;
1071    double F4;
1072    double F5;
1073    double F6;
1074    double F7;
1075    double F8;
1076    double F9;
1077    double F10;
1078    double F11;
1079    double F12;
1080    double F13;
1081    double F14;
1082    double F15;
1083    double F16;
1084    double F17;
1085    double F18;
1086    double F19;
1087    double F20;
1088    double F21;
1089    double F22;
1090    double F23;
1091    double F24;
1092    double F25;
1093    double F26;
1094    double F27;
1095    double F28;
1096    double F29;
1097    double F30;
1098    double F31;
1099    uint64_t FPSCR;
1100    uint64_t RESERVED_FOR_ALIGNMENT_5;
1101  #endif
1102} CPU_Exception_frame;
1103
1104void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1105
1106/*
1107 * _CPU_Initialize_altivec()
1108 *
1109 * Global altivec-related initialization.
1110 */
1111void
1112_CPU_Initialize_altivec(void);
1113
1114/*
1115 * _CPU_Context_switch_altivec
1116 *
1117 * This routine switches the altivec contexts passed to it.
1118 */
1119
1120void
1121_CPU_Context_switch_altivec(
1122  ppc_context *from,
1123  ppc_context *to
1124);
1125
1126/*
1127 * _CPU_Context_restore_altivec
1128 *
1129 * This routine restores the altivec context passed to it.
1130 */
1131
1132void
1133_CPU_Context_restore_altivec(
1134  ppc_context *ctxt
1135);
1136
1137/*
1138 * _CPU_Context_initialize_altivec
1139 *
1140 * This routine initializes the altivec context passed to it.
1141 */
1142
1143void
1144_CPU_Context_initialize_altivec(
1145  ppc_context *ctxt
1146);
1147
1148void _CPU_Fatal_error(
1149  uint32_t   _error
1150);
1151
1152/** Type that can store a 32-bit integer or a pointer. */
1153typedef uintptr_t CPU_Uint32ptr;
1154
1155#endif /* ASM */
1156
1157#ifdef __cplusplus
1158}
1159#endif
1160
1161#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.