source: rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h @ 6e0000c

4.115
Last change on this file since 6e0000c was 6e0000c, checked in by Sebastian Huber <sebastian.huber@…>, on 09/11/14 at 07:07:36

powerpc: Delete PPC_BSP_HAS_FIXED_PR288

  • Property mode set to 100644
File size: 35.0 KB
Line 
1/**
2 * @file
3 *
4 * @brief PowerPC CPU Department Source
5 */
6
7/*
8 *  COPYRIGHT (c) 1989-2012.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  COPYRIGHT (c) 1995 i-cubed ltd.
12 *
13 *  To anyone who acknowledges that this file is provided "AS IS"
14 *  without any express or implied warranty:
15 *      permission to use, copy, modify, and distribute this file
16 *      for any purpose is hereby granted without fee, provided that
17 *      the above copyright notice and this notice appears in all
18 *      copies, and that the name of i-cubed limited not be used in
19 *      advertising or publicity pertaining to distribution of the
20 *      software without specific, written prior permission.
21 *      i-cubed limited makes no representations about the suitability
22 *      of this software for any purpose.
23 *
24 *  Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
25 *
26 *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
27 *
28 *  Copyright (c) 2010-2013 embedded brains GmbH.
29 *
30 *  The license and distribution terms for this file may be
31 *  found in the file LICENSE in this distribution or at
32 *  http://www.rtems.org/license/LICENSE.
33 */
34
35#ifndef _RTEMS_SCORE_CPU_H
36#define _RTEMS_SCORE_CPU_H
37
38#include <rtems/score/types.h>
39#include <rtems/score/powerpc.h>
40#include <rtems/powerpc/registers.h>
41
42#ifndef ASM
43  #include <string.h> /* for memset() */
44#endif
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50/* conditional compilation parameters */
51
52/*
53 *  Should the calls to _Thread_Enable_dispatch be inlined?
54 *
55 *  If TRUE, then they are inlined.
56 *  If FALSE, then a subroutine call is made.
57 *
58 *  Basically this is an example of the classic trade-off of size
59 *  versus speed.  Inlining the call (TRUE) typically increases the
60 *  size of RTEMS while speeding up the enabling of dispatching.
61 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
62 *  only be 0 or 1 unless you are in an interrupt handler and that
63 *  interrupt handler invokes the executive.]  When not inlined
64 *  something calls _Thread_Enable_dispatch which in turns calls
65 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
66 *  one subroutine call is avoided entirely.]
67 */
68
69#define CPU_INLINE_ENABLE_DISPATCH       FALSE
70
71/*
72 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
73 *  be unrolled one time?  In unrolled each iteration of the loop examines
74 *  two "nodes" on the chain being searched.  Otherwise, only one node
75 *  is examined per iteration.
76 *
77 *  If TRUE, then the loops are unrolled.
78 *  If FALSE, then the loops are not unrolled.
79 *
80 *  The primary factor in making this decision is the cost of disabling
81 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
82 *  body of the loop.  On some CPUs, the flash is more expensive than
83 *  one iteration of the loop body.  In this case, it might be desirable
84 *  to unroll the loop.  It is important to note that on some CPUs, this
85 *  code is the longest interrupt disable period in RTEMS.  So it is
86 *  necessary to strike a balance when setting this parameter.
87 */
88
89#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
90
91/*
92 *  Does this port provide a CPU dependent IDLE task implementation?
93 *
94 *  If TRUE, then the routine _CPU_Thread_Idle_body
95 *  must be provided and is the default IDLE thread body instead of
96 *  _CPU_Thread_Idle_body.
97 *
98 *  If FALSE, then use the generic IDLE thread body if the BSP does
99 *  not provide one.
100 *
101 *  This is intended to allow for supporting processors which have
102 *  a low power or idle mode.  When the IDLE thread is executed, then
103 *  the CPU can be powered down.
104 *
105 *  The order of precedence for selecting the IDLE thread body is:
106 *
107 *    1.  BSP provided
108 *    2.  CPU dependent (if provided)
109 *    3.  generic (if no BSP and no CPU dependent)
110 */
111
112#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
113
114/*
115 *  Does the stack grow up (toward higher addresses) or down
116 *  (toward lower addresses)?
117 *
118 *  If TRUE, then the grows upward.
119 *  If FALSE, then the grows toward smaller addresses.
120 */
121
122#define CPU_STACK_GROWS_UP               FALSE
123
124/*
125 *  The following is the variable attribute used to force alignment
126 *  of critical RTEMS structures.  On some processors it may make
127 *  sense to have these aligned on tighter boundaries than
128 *  the minimum requirements of the compiler in order to have as
129 *  much of the critical data area as possible in a cache line.
130 *
131 *  The placement of this macro in the declaration of the variables
132 *  is based on the syntactically requirements of the GNU C
133 *  "__attribute__" extension.  For example with GNU C, use
134 *  the following to force a structures to a 32 byte boundary.
135 *
136 *      __attribute__ ((aligned (32)))
137 *
138 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
139 *         To benefit from using this, the data must be heavily
140 *         used so it will stay in the cache and used frequently enough
141 *         in the executive to justify turning this on.
142 */
143
144#define CPU_STRUCTURE_ALIGNMENT \
145  __attribute__ ((aligned (PPC_STRUCTURE_ALIGNMENT)))
146
147#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
148
149/*
150 *  Define what is required to specify how the network to host conversion
151 *  routines are handled.
152 */
153
154#if defined(__BIG_ENDIAN__) || defined(_BIG_ENDIAN)
155#define CPU_BIG_ENDIAN                           TRUE
156#define CPU_LITTLE_ENDIAN                        FALSE
157#else
158#define CPU_BIG_ENDIAN                           FALSE
159#define CPU_LITTLE_ENDIAN                        TRUE
160#endif
161
162/*
163 *  Does the CPU have hardware floating point?
164 *
165 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
166 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
167 *
168 *  If there is a FP coprocessor such as the i387 or mc68881, then
169 *  the answer is TRUE.
170 *
171 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
172 *  It indicates whether or not this CPU model has FP support.  For
173 *  example, it would be possible to have an i386_nofp CPU model
174 *  which set this to false to indicate that you have an i386 without
175 *  an i387 and wish to leave floating point support out of RTEMS.
176 */
177
178#if ( PPC_HAS_FPU == 1 )
179#define CPU_HARDWARE_FP     TRUE
180#define CPU_SOFTWARE_FP     FALSE
181#else
182#define CPU_HARDWARE_FP     FALSE
183#define CPU_SOFTWARE_FP     FALSE
184#endif
185
186/*
187 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
188 *
189 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
190 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
191 *
192 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
193 *
194 *  PowerPC Note: It appears the GCC can implicitly generate FPU
195 *  and Altivec instructions when you least expect them.  So make
196 *  all tasks floating point.
197 */
198
199#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
200
201/*
202 *  Should the IDLE task have a floating point context?
203 *
204 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
205 *  and it has a floating point context which is switched in and out.
206 *  If FALSE, then the IDLE task does not have a floating point context.
207 *
208 *  Setting this to TRUE negatively impacts the time required to preempt
209 *  the IDLE task from an interrupt because the floating point context
210 *  must be saved as part of the preemption.
211 */
212
213#define CPU_IDLE_TASK_IS_FP      FALSE
214
215#define CPU_PER_CPU_CONTROL_SIZE 0
216
217/*
218 *  Processor defined structures required for cpukit/score.
219 */
220
221/*
222 * Contexts
223 *
224 *  Generally there are 2 types of context to save.
225 *     1. Interrupt registers to save
226 *     2. Task level registers to save
227 *
228 *  This means we have the following 3 context items:
229 *     1. task level context stuff::  Context_Control
230 *     2. floating point task stuff:: Context_Control_fp
231 *     3. special interrupt level context :: Context_Control_interrupt
232 *
233 *  On some processors, it is cost-effective to save only the callee
234 *  preserved registers during a task context switch.  This means
235 *  that the ISR code needs to save those registers which do not
236 *  persist across function calls.  It is not mandatory to make this
237 *  distinctions between the caller/callee saves registers for the
238 *  purpose of minimizing context saved during task switch and on interrupts.
239 *  If the cost of saving extra registers is minimal, simplicity is the
240 *  choice.  Save the same context on interrupt entry as for tasks in
241 *  this case.
242 *
243 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
244 *  care should be used in designing the context area.
245 *
246 *  On some CPUs with hardware floating point support, the Context_Control_fp
247 *  structure will not be used or it simply consist of an array of a
248 *  fixed number of bytes.   This is done when the floating point context
249 *  is dumped by a "FP save context" type instruction and the format
250 *  is not really defined by the CPU.  In this case, there is no need
251 *  to figure out the exact format -- only the size.  Of course, although
252 *  this is enough information for RTEMS, it is probably not enough for
253 *  a debugger such as gdb.  But that is another problem.
254 */
255
256#ifndef __SPE__
257  #define PPC_GPR_TYPE uint32_t
258  #define PPC_GPR_SIZE 4
259  #define PPC_GPR_LOAD lwz
260  #define PPC_GPR_STORE stw
261#else
262  #define PPC_GPR_TYPE uint64_t
263  #define PPC_GPR_SIZE 8
264  #define PPC_GPR_LOAD evldd
265  #define PPC_GPR_STORE evstdd
266#endif
267
268#define PPC_DEFAULT_CACHE_LINE_SIZE 32
269
270#ifndef ASM
271
272typedef struct {
273  /* There is no CPU specific per-CPU state */
274} CPU_Per_CPU_control;
275
276/*
277 * Non-volatile context according to E500ABIUG, EABI and 32-bit TLS (according
278 * to "Power Architecture 32-bit Application Binary Interface Supplement 1.0 -
279 * Linux and Embedded")
280 */
281typedef struct {
282  uint32_t gpr1;
283  uint32_t msr;
284  uint32_t lr;
285  uint32_t cr;
286  PPC_GPR_TYPE gpr14;
287  PPC_GPR_TYPE gpr15;
288  PPC_GPR_TYPE gpr16;
289  PPC_GPR_TYPE gpr17;
290  PPC_GPR_TYPE gpr18;
291  PPC_GPR_TYPE gpr19;
292  PPC_GPR_TYPE gpr20;
293  PPC_GPR_TYPE gpr21;
294  PPC_GPR_TYPE gpr22;
295  PPC_GPR_TYPE gpr23;
296  PPC_GPR_TYPE gpr24;
297  PPC_GPR_TYPE gpr25;
298  PPC_GPR_TYPE gpr26;
299  PPC_GPR_TYPE gpr27;
300  PPC_GPR_TYPE gpr28;
301  PPC_GPR_TYPE gpr29;
302  PPC_GPR_TYPE gpr30;
303  PPC_GPR_TYPE gpr31;
304  uint32_t gpr2;
305  #ifdef RTEMS_SMP
306    volatile uint32_t is_executing;
307  #endif
308  #ifdef __ALTIVEC__
309    /*
310     * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
311     * and padding to ensure cache-alignment.  Unfortunately, we can't verify
312     * the cache line size here in the cpukit but altivec support code will
313     * produce an error if this is ever different from 32 bytes.
314     *
315     * Note: it is the BSP/CPU-support's responsibility to save/restore
316     *       volatile vregs across interrupts and exceptions.
317     */
318    uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
319  #endif
320} ppc_context;
321
322typedef struct {
323  uint8_t context [
324    PPC_DEFAULT_CACHE_LINE_SIZE
325      + sizeof(ppc_context)
326      + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
327        ? 0
328          : PPC_DEFAULT_CACHE_LINE_SIZE
329            - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
330  ];
331} Context_Control;
332
333static inline ppc_context *ppc_get_context( const Context_Control *context )
334{
335  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
336  uintptr_t mask = clsz - 1;
337  uintptr_t addr = (uintptr_t) context;
338
339  return (ppc_context *) ((addr & ~mask) + clsz);
340}
341
342#define _CPU_Context_Get_SP( _context ) \
343  ppc_get_context(_context)->gpr1
344
345#ifdef RTEMS_SMP
346  static inline bool _CPU_Context_Get_is_executing(
347    const Context_Control *context
348  )
349  {
350    return ppc_get_context(context)->is_executing;
351  }
352
353  static inline void _CPU_Context_Set_is_executing(
354    Context_Control *context,
355    bool is_executing
356  )
357  {
358    ppc_get_context(context)->is_executing = is_executing;
359  }
360#endif
361#endif /* ASM */
362
363#define PPC_CONTEXT_OFFSET_GPR1 32
364#define PPC_CONTEXT_OFFSET_MSR 36
365#define PPC_CONTEXT_OFFSET_LR 40
366#define PPC_CONTEXT_OFFSET_CR 44
367
368#define PPC_CONTEXT_GPR_OFFSET( gpr ) \
369  (((gpr) - 14) * PPC_GPR_SIZE + 48)
370
371#define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
372#define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
373#define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
374#define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
375#define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
376#define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
377#define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
378#define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
379#define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
380#define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
381#define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
382#define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
383#define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
384#define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
385#define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
386#define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
387#define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
388#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
389#define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 )
390
391#ifdef RTEMS_SMP
392  #define PPC_CONTEXT_OFFSET_IS_EXECUTING (PPC_CONTEXT_GPR_OFFSET( 32 ) + 4)
393#endif
394
395#ifndef ASM
396typedef struct {
397    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
398     * procedure calls.  However, this would mean that the interrupt
399     * frame had to hold f0-f13, and the fpscr.  And as the majority
400     * of tasks will not have an FP context, we will save the whole
401     * context here.
402     */
403#if (PPC_HAS_DOUBLE == 1)
404    double      f[32];
405    uint64_t    fpscr;
406#else
407    float       f[32];
408    uint32_t    fpscr;
409#endif
410} Context_Control_fp;
411
412typedef struct CPU_Interrupt_frame {
413    uint32_t   stacklink;       /* Ensure this is a real frame (also reg1 save) */
414    uint32_t   calleeLr;        /* link register used by callees: SVR4/EABI */
415
416    /* This is what is left out of the primary contexts */
417    uint32_t   gpr0;
418    uint32_t   gpr2;            /* play safe */
419    uint32_t   gpr3;
420    uint32_t   gpr4;
421    uint32_t   gpr5;
422    uint32_t   gpr6;
423    uint32_t   gpr7;
424    uint32_t   gpr8;
425    uint32_t   gpr9;
426    uint32_t   gpr10;
427    uint32_t   gpr11;
428    uint32_t   gpr12;
429    uint32_t   gpr13;   /* Play safe */
430    uint32_t   gpr28;   /* For internal use by the IRQ handler */
431    uint32_t   gpr29;   /* For internal use by the IRQ handler */
432    uint32_t   gpr30;   /* For internal use by the IRQ handler */
433    uint32_t   gpr31;   /* For internal use by the IRQ handler */
434    uint32_t   cr;      /* Bits of this are volatile, so no-one may save */
435    uint32_t   ctr;
436    uint32_t   xer;
437    uint32_t   lr;
438    uint32_t   pc;
439    uint32_t   msr;
440    uint32_t   pad[3];
441} CPU_Interrupt_frame;
442
443#endif /* ASM */
444
445/*
446 *  Does the CPU follow the simple vectored interrupt model?
447 *
448 *  If TRUE, then RTEMS allocates the vector table it internally manages.
449 *  If FALSE, then the BSP is assumed to allocate and manage the vector
450 *  table
451 *
452 *  PowerPC Specific Information:
453 *
454 *  The PowerPC and x86 were the first to use the PIC interrupt model.
455 *  They do not use the simple vectored interrupt model.
456 */
457#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
458
459/*
460 *  Does RTEMS manage a dedicated interrupt stack in software?
461 *
462 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
463 *  If FALSE, nothing is done.
464 *
465 *  If the CPU supports a dedicated interrupt stack in hardware,
466 *  then it is generally the responsibility of the BSP to allocate it
467 *  and set it up.
468 *
469 *  If the CPU does not support a dedicated interrupt stack, then
470 *  the porter has two options: (1) execute interrupts on the
471 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
472 *  interrupt stack.
473 *
474 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
475 *
476 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
477 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
478 *  possible that both are FALSE for a particular CPU.  Although it
479 *  is unclear what that would imply about the interrupt processing
480 *  procedure on that CPU.
481 */
482
483#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
484
485/*
486 *  Does this CPU have hardware support for a dedicated interrupt stack?
487 *
488 *  If TRUE, then it must be installed during initialization.
489 *  If FALSE, then no installation is performed.
490 *
491 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
492 *
493 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
494 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
495 *  possible that both are FALSE for a particular CPU.  Although it
496 *  is unclear what that would imply about the interrupt processing
497 *  procedure on that CPU.
498 */
499
500#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
501
502/*
503 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
504 *
505 *  If TRUE, then the memory is allocated during initialization.
506 *  If FALSE, then the memory is allocated during initialization.
507 *
508 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
509 */
510
511#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
512
513/*
514 *  Does the RTEMS invoke the user's ISR with the vector number and
515 *  a pointer to the saved interrupt frame (1) or just the vector
516 *  number (0)?
517 */
518
519#define CPU_ISR_PASSES_FRAME_POINTER 0
520
521/*
522 *  Should the saving of the floating point registers be deferred
523 *  until a context switch is made to another different floating point
524 *  task?
525 *
526 *  If TRUE, then the floating point context will not be stored until
527 *  necessary.  It will remain in the floating point registers and not
528 *  disturned until another floating point task is switched to.
529 *
530 *  If FALSE, then the floating point context is saved when a floating
531 *  point task is switched out and restored when the next floating point
532 *  task is restored.  The state of the floating point registers between
533 *  those two operations is not specified.
534 *
535 *  If the floating point context does NOT have to be saved as part of
536 *  interrupt dispatching, then it should be safe to set this to TRUE.
537 *
538 *  Setting this flag to TRUE results in using a different algorithm
539 *  for deciding when to save and restore the floating point context.
540 *  The deferred FP switch algorithm minimizes the number of times
541 *  the FP context is saved and restored.  The FP context is not saved
542 *  until a context switch is made to another, different FP task.
543 *  Thus in a system with only one FP task, the FP context will never
544 *  be saved or restored.
545 *
546 *  Note, however that compilers may use floating point registers/
547 *  instructions for optimization or they may save/restore FP registers
548 *  on the stack. You must not use deferred switching in these cases
549 *  and on the PowerPC attempting to do so will raise a "FP unavailable"
550 *  exception.
551 */
552/*
553 *  ACB Note:  This could make debugging tricky..
554 */
555
556/* conservative setting (FALSE); probably doesn't affect performance too much */
557#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
558
559/*
560 *  Processor defined structures required for cpukit/score.
561 */
562
563#ifndef ASM
564
565/*
566 *  This variable is optional.  It is used on CPUs on which it is difficult
567 *  to generate an "uninitialized" FP context.  It is filled in by
568 *  _CPU_Initialize and copied into the task's FP context area during
569 *  _CPU_Context_Initialize.
570 */
571
572/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
573
574#endif /* ndef ASM */
575
576/*
577 *  This defines the number of levels and the mask used to pick those
578 *  bits out of a thread mode.
579 */
580
581#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
582#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
583
584/*
585 *  Nothing prevents the porter from declaring more CPU specific variables.
586 */
587
588#ifndef ASM
589
590SCORE_EXTERN struct {
591  uint32_t      *Disable_level;
592  void          *Stack;
593  volatile bool *Switch_necessary;
594  bool          *Signal;
595
596} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
597
598#endif /* ndef ASM */
599
600/*
601 *  The size of the floating point context area.  On some CPUs this
602 *  will not be a "sizeof" because the format of the floating point
603 *  area is not defined -- only the size is.  This is usually on
604 *  CPUs with a "floating point save context" instruction.
605 */
606
607#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
608
609/*
610 * (Optional) # of bytes for libmisc/stackchk to check
611 * If not specifed, then it defaults to something reasonable
612 * for most architectures.
613 */
614
615#define CPU_STACK_CHECK_SIZE    (128)
616
617/*
618 *  Amount of extra stack (above minimum stack size) required by
619 *  MPCI receive server thread.  Remember that in a multiprocessor
620 *  system this thread must exist and be able to process all directives.
621 */
622
623#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
624
625/*
626 *  This is defined if the port has a special way to report the ISR nesting
627 *  level.  Most ports maintain the variable _ISR_Nest_level. Note that
628 *  this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
629 *  being maintained (e.g. watchdog queues).
630 */
631
632#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
633
634/*
635 *  ISR handler macros
636 */
637
638/*
639 *  Disable all interrupts for an RTEMS critical section.  The previous
640 *  level is returned in _isr_cookie.
641 */
642
643#ifndef ASM
644
645static inline uint32_t   _CPU_ISR_Get_level( void )
646{
647  register unsigned int msr;
648  _CPU_MSR_GET(msr);
649  if (msr & MSR_EE) return 0;
650  else  return 1;
651}
652
653static inline void _CPU_ISR_Set_level( uint32_t   level )
654{
655  register unsigned int msr;
656  _CPU_MSR_GET(msr);
657  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
658    msr |= ppc_interrupt_get_disable_mask();
659  }
660  else {
661    msr &= ~ppc_interrupt_get_disable_mask();
662  }
663  _CPU_MSR_SET(msr);
664}
665
666void BSP_panic(char *);
667
668/* Fatal Error manager macros */
669
670/*
671 *  This routine copies _error into a known place -- typically a stack
672 *  location or a register, optionally disables interrupts, and
673 *  halts/stops the CPU.
674 */
675
676void _BSP_Fatal_error(unsigned int);
677
678#endif /* ASM */
679
680#define _CPU_Fatal_halt( _source, _error ) \
681  _BSP_Fatal_error(_error)
682
683/* end of Fatal Error manager macros */
684
685/*
686 *  Should be large enough to run all RTEMS tests.  This ensures
687 *  that a "reasonable" small application should not have any problems.
688 */
689
690#define CPU_STACK_MINIMUM_SIZE          (1024*8)
691
692#define CPU_SIZEOF_POINTER 4
693
694/*
695 *  CPU's worst alignment requirement for data types on a byte boundary.  This
696 *  alignment does not take into account the requirements for the stack.
697 */
698
699#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
700
701/*
702 *  This number corresponds to the byte alignment requirement for the
703 *  heap handler.  This alignment requirement may be stricter than that
704 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
705 *  common for the heap to follow the same alignment requirement as
706 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
707 *  then this should be set to CPU_ALIGNMENT.
708 *
709 *  NOTE:  This does not have to be a power of 2.  It does have to
710 *         be greater or equal to than CPU_ALIGNMENT.
711 */
712
713#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
714
715/*
716 *  This number corresponds to the byte alignment requirement for memory
717 *  buffers allocated by the partition manager.  This alignment requirement
718 *  may be stricter than that for the data types alignment specified by
719 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
720 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
721 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
722 *
723 *  NOTE:  This does not have to be a power of 2.  It does have to
724 *         be greater or equal to than CPU_ALIGNMENT.
725 */
726
727#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
728
729/*
730 *  This number corresponds to the byte alignment requirement for the
731 *  stack.  This alignment requirement may be stricter than that for the
732 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
733 *  is strict enough for the stack, then this should be set to 0.
734 *
735 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
736 */
737
738#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
739
740#ifndef ASM
741/*  The following routine swaps the endian format of an unsigned int.
742 *  It must be static because it is referenced indirectly.
743 *
744 *  This version will work on any processor, but if there is a better
745 *  way for your CPU PLEASE use it.  The most common way to do this is to:
746 *
747 *     swap least significant two bytes with 16-bit rotate
748 *     swap upper and lower 16-bits
749 *     swap most significant two bytes with 16-bit rotate
750 *
751 *  Some CPUs have special instructions which swap a 32-bit quantity in
752 *  a single instruction (e.g. i486).  It is probably best to avoid
753 *  an "endian swapping control bit" in the CPU.  One good reason is
754 *  that interrupts would probably have to be disabled to ensure that
755 *  an interrupt does not try to access the same "chunk" with the wrong
756 *  endian.  Another good reason is that on some CPUs, the endian bit
757 *  endianness for ALL fetches -- both code and data -- so the code
758 *  will be fetched incorrectly.
759 */
760
761static inline uint32_t CPU_swap_u32(
762  uint32_t value
763)
764{
765  uint32_t   swapped;
766
767  __asm__ volatile("rlwimi %0,%1,8,24,31;"
768               "rlwimi %0,%1,24,16,23;"
769               "rlwimi %0,%1,8,8,15;"
770               "rlwimi %0,%1,24,0,7;" :
771               "=&r" ((swapped)) : "r" ((value)));
772
773  return( swapped );
774}
775
776#define CPU_swap_u16( value ) \
777  (((value&0xff) << 8) | ((value >> 8)&0xff))
778
779typedef uint32_t CPU_Counter_ticks;
780
781static inline CPU_Counter_ticks _CPU_Counter_read( void )
782{
783  CPU_Counter_ticks value;
784
785#ifdef ppc8540
786  /* Book E has no mftb */
787  __asm__ volatile( "mfspr %0, 268" : "=r" (value) );
788#else
789  __asm__ volatile( "mftb %0" : "=r" (value) );
790#endif
791
792  return value;
793}
794
795static inline CPU_Counter_ticks _CPU_Counter_difference(
796  CPU_Counter_ticks second,
797  CPU_Counter_ticks first
798)
799{
800  return second - first;
801}
802
803#endif /* ASM */
804
805
806#ifndef ASM
807/* Context handler macros */
808
809/*
810 *  Initialize the context to a state suitable for starting a
811 *  task after a context restore operation.  Generally, this
812 *  involves:
813 *
814 *     - setting a starting address
815 *     - preparing the stack
816 *     - preparing the stack and frame pointers
817 *     - setting the proper interrupt level in the context
818 *     - initializing the floating point context
819 *
820 *  This routine generally does not set any unnecessary register
821 *  in the context.  The state of the "general data" registers is
822 *  undefined at task start time.
823 */
824
825void _CPU_Context_Initialize(
826  Context_Control  *the_context,
827  uint32_t         *stack_base,
828  uint32_t          size,
829  uint32_t          new_level,
830  void             *entry_point,
831  bool              is_fp,
832  void             *tls_area
833);
834
835/*
836 *  This routine is responsible for somehow restarting the currently
837 *  executing task.  If you are lucky, then all that is necessary
838 *  is restoring the context.  Otherwise, there will need to be
839 *  a special assembly routine which does something special in this
840 *  case.  Context_Restore should work most of the time.  It will
841 *  not work if restarting self conflicts with the stack frame
842 *  assumptions of restoring a context.
843 */
844
845#define _CPU_Context_Restart_self( _the_context ) \
846   _CPU_Context_restore( (_the_context) );
847
848/*
849 *  The purpose of this macro is to allow the initial pointer into
850 *  a floating point context area (used to save the floating point
851 *  context) to be at an arbitrary place in the floating point
852 *  context area.
853 *
854 *  This is necessary because some FP units are designed to have
855 *  their context saved as a stack which grows into lower addresses.
856 *  Other FP units can be saved by simply moving registers into offsets
857 *  from the base of the context area.  Finally some FP units provide
858 *  a "dump context" instruction which could fill in from high to low
859 *  or low to high based on the whim of the CPU designers.
860 */
861
862#define _CPU_Context_Fp_start( _base, _offset ) \
863   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
864
865/*
866 *  This routine initializes the FP context area passed to it to.
867 *  There are a few standard ways in which to initialize the
868 *  floating point context.  The code included for this macro assumes
869 *  that this is a CPU in which a "initial" FP context was saved into
870 *  _CPU_Null_fp_context and it simply copies it to the destination
871 *  context passed to it.
872 *
873 *  Other models include (1) not doing anything, and (2) putting
874 *  a "null FP status word" in the correct place in the FP context.
875 */
876
877#define _CPU_Context_Initialize_fp( _destination ) \
878  memset( *(_destination), 0, sizeof( **(_destination) ) )
879
880/* end of Context handler macros */
881#endif /* ASM */
882
883#ifndef ASM
884/* Bitfield handler macros */
885
886/*
887 *  This routine sets _output to the bit number of the first bit
888 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Word.
889 *  This type may be either 16 or 32 bits wide although only the 16
890 *  least significant bits will be used.
891 *
892 *  There are a number of variables in using a "find first bit" type
893 *  instruction.
894 *
895 *    (1) What happens when run on a value of zero?
896 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
897 *    (3) The numbering may be zero or one based.
898 *    (4) The "find first bit" instruction may search from MSB or LSB.
899 *
900 *  RTEMS guarantees that (1) will never happen so it is not a concern.
901 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
902 *  _CPU_Priority_Bits_index().  These three form a set of routines
903 *  which must logically operate together.  Bits in the _value are
904 *  set and cleared based on masks built by _CPU_Priority_mask().
905 *  The basic major and minor values calculated by _Priority_Major()
906 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
907 *  to properly range between the values returned by the "find first bit"
908 *  instruction.  This makes it possible for _Priority_Get_highest() to
909 *  calculate the major and directly index into the minor table.
910 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
911 *  is the first bit found.
912 *
913 *  This entire "find first bit" and mapping process depends heavily
914 *  on the manner in which a priority is broken into a major and minor
915 *  components with the major being the 4 MSB of a priority and minor
916 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
917 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
918 *  to the lowest priority.
919 *
920 *  If your CPU does not have a "find first bit" instruction, then
921 *  there are ways to make do without it.  Here are a handful of ways
922 *  to implement this in software:
923 *
924 *    - a series of 16 bit test instructions
925 *    - a "binary search using if's"
926 *    - _number = 0
927 *      if _value > 0x00ff
928 *        _value >>=8
929 *        _number = 8;
930 *
931 *      if _value > 0x0000f
932 *        _value >=8
933 *        _number += 4
934 *
935 *      _number += bit_set_table[ _value ]
936 *
937 *    where bit_set_table[ 16 ] has values which indicate the first
938 *      bit set
939 */
940
941#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
942  { \
943    __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
944                  "1" ((_value))); \
945  }
946
947/* end of Bitfield handler macros */
948
949/*
950 *  This routine builds the mask which corresponds to the bit fields
951 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
952 *  for that routine.
953 */
954
955#define _CPU_Priority_Mask( _bit_number ) \
956  ( 0x80000000 >> (_bit_number) )
957
958/*
959 *  This routine translates the bit numbers returned by
960 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
961 *  a major or minor component of a priority.  See the discussion
962 *  for that routine.
963 */
964
965#define _CPU_Priority_bits_index( _priority ) \
966  (_priority)
967
968/* end of Priority handler macros */
969#endif /* ASM */
970
971/* functions */
972
973#ifndef ASM
974
975/*
976 *  _CPU_Initialize
977 *
978 *  This routine performs CPU dependent initialization.
979 */
980
981void _CPU_Initialize(void);
982
983/*
984 *  _CPU_ISR_install_vector
985 *
986 *  This routine installs an interrupt vector.
987 */
988
989void _CPU_ISR_install_vector(
990  uint32_t    vector,
991  proc_ptr    new_handler,
992  proc_ptr   *old_handler
993);
994
995/*
996 *  _CPU_Context_switch
997 *
998 *  This routine switches from the run context to the heir context.
999 */
1000
1001void _CPU_Context_switch(
1002  Context_Control  *run,
1003  Context_Control  *heir
1004);
1005
1006/*
1007 *  _CPU_Context_restore
1008 *
1009 *  This routine is generallu used only to restart self in an
1010 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1011 *
1012 *  NOTE: May be unnecessary to reload some registers.
1013 */
1014
1015void _CPU_Context_restore(
1016  Context_Control *new_context
1017) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
1018
1019/*
1020 *  _CPU_Context_save_fp
1021 *
1022 *  This routine saves the floating point context passed to it.
1023 */
1024
1025void _CPU_Context_save_fp(
1026  Context_Control_fp **fp_context_ptr
1027);
1028
1029/*
1030 *  _CPU_Context_restore_fp
1031 *
1032 *  This routine restores the floating point context passed to it.
1033 */
1034
1035void _CPU_Context_restore_fp(
1036  Context_Control_fp **fp_context_ptr
1037);
1038
1039void _CPU_Context_volatile_clobber( uintptr_t pattern );
1040
1041void _CPU_Context_validate( uintptr_t pattern );
1042
1043#ifdef RTEMS_SMP
1044  uint32_t _CPU_SMP_Initialize( void );
1045
1046  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
1047
1048  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
1049
1050  static inline uint32_t _CPU_SMP_Get_current_processor( void )
1051  {
1052    uint32_t pir;
1053
1054    /* Use Book E Processor ID Register (PIR) */
1055    __asm__ volatile (
1056      "mfspr %[pir], 286"
1057      : [pir] "=&r" (pir)
1058    );
1059
1060    return pir;
1061  }
1062
1063  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
1064
1065  static inline void _CPU_SMP_Processor_event_broadcast( void )
1066  {
1067    __asm__ volatile ( "" : : : "memory" );
1068  }
1069
1070  static inline void _CPU_SMP_Processor_event_receive( void )
1071  {
1072    __asm__ volatile ( "" : : : "memory" );
1073  }
1074#endif
1075
1076typedef struct {
1077  uint32_t EXC_SRR0;
1078  uint32_t EXC_SRR1;
1079  uint32_t _EXC_number;
1080  uint32_t EXC_CR;
1081  uint32_t EXC_CTR;
1082  uint32_t EXC_XER;
1083  uint32_t EXC_LR;
1084  #ifdef __SPE__
1085    uint32_t EXC_SPEFSCR;
1086    uint64_t EXC_ACC;
1087  #endif
1088  PPC_GPR_TYPE GPR0;
1089  PPC_GPR_TYPE GPR1;
1090  PPC_GPR_TYPE GPR2;
1091  PPC_GPR_TYPE GPR3;
1092  PPC_GPR_TYPE GPR4;
1093  PPC_GPR_TYPE GPR5;
1094  PPC_GPR_TYPE GPR6;
1095  PPC_GPR_TYPE GPR7;
1096  PPC_GPR_TYPE GPR8;
1097  PPC_GPR_TYPE GPR9;
1098  PPC_GPR_TYPE GPR10;
1099  PPC_GPR_TYPE GPR11;
1100  PPC_GPR_TYPE GPR12;
1101  PPC_GPR_TYPE GPR13;
1102  PPC_GPR_TYPE GPR14;
1103  PPC_GPR_TYPE GPR15;
1104  PPC_GPR_TYPE GPR16;
1105  PPC_GPR_TYPE GPR17;
1106  PPC_GPR_TYPE GPR18;
1107  PPC_GPR_TYPE GPR19;
1108  PPC_GPR_TYPE GPR20;
1109  PPC_GPR_TYPE GPR21;
1110  PPC_GPR_TYPE GPR22;
1111  PPC_GPR_TYPE GPR23;
1112  PPC_GPR_TYPE GPR24;
1113  PPC_GPR_TYPE GPR25;
1114  PPC_GPR_TYPE GPR26;
1115  PPC_GPR_TYPE GPR27;
1116  PPC_GPR_TYPE GPR28;
1117  PPC_GPR_TYPE GPR29;
1118  PPC_GPR_TYPE GPR30;
1119  PPC_GPR_TYPE GPR31;
1120} CPU_Exception_frame;
1121
1122void _BSP_Exception_frame_print( const CPU_Exception_frame *frame );
1123
1124static inline void _CPU_Exception_frame_print(
1125  const CPU_Exception_frame *frame
1126)
1127{
1128  _BSP_Exception_frame_print( frame );
1129}
1130
1131/*
1132 * _CPU_Initialize_altivec()
1133 *
1134 * Global altivec-related initialization.
1135 */
1136void
1137_CPU_Initialize_altivec(void);
1138
1139/*
1140 * _CPU_Context_switch_altivec
1141 *
1142 * This routine switches the altivec contexts passed to it.
1143 */
1144
1145void
1146_CPU_Context_switch_altivec(
1147  ppc_context *from,
1148  ppc_context *to
1149);
1150
1151/*
1152 * _CPU_Context_restore_altivec
1153 *
1154 * This routine restores the altivec context passed to it.
1155 */
1156
1157void
1158_CPU_Context_restore_altivec(
1159  ppc_context *ctxt
1160);
1161
1162/*
1163 * _CPU_Context_initialize_altivec
1164 *
1165 * This routine initializes the altivec context passed to it.
1166 */
1167
1168void
1169_CPU_Context_initialize_altivec(
1170  ppc_context *ctxt
1171);
1172
1173void _CPU_Fatal_error(
1174  uint32_t   _error
1175);
1176
1177#endif /* ASM */
1178
1179#ifdef __cplusplus
1180}
1181#endif
1182
1183#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.