source: rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h @ 826fa6b1

4.115
Last change on this file since 826fa6b1 was 826fa6b1, checked in by Joel Sherrill <joel.sherrill@…>, on 05/07/12 at 23:29:56

Score ISR - Minimize Capabilities When Not Simple Vectored

In particular CPU_INTERRUPT_NUMBER_OF_VECTORS and
CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER are only used on
Simple Vectored Architectures, so do not depend on
them being defined. This disables as much as possible
that is specific to the Simple Vectored Model and
not expected to be used on architectures which use
the Programmable Interrupt Controller model for
interrupt handler vectoring.

  • Property mode set to 100644
File size: 33.8 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  COPYRIGHT (c) 1989-2012.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  COPYRIGHT (c) 1995 i-cubed ltd.
10 *
11 *  To anyone who acknowledges that this file is provided "AS IS"
12 *  without any express or implied warranty:
13 *      permission to use, copy, modify, and distribute this file
14 *      for any purpose is hereby granted without fee, provided that
15 *      the above copyright notice and this notice appears in all
16 *      copies, and that the name of i-cubed limited not be used in
17 *      advertising or publicity pertaining to distribution of the
18 *      software without specific, written prior permission.
19 *      i-cubed limited makes no representations about the suitability
20 *      of this software for any purpose.
21 *
22 *  Copyright (c) 2001 Andy Dachs <a.dachs@sstl.co.uk>.
23 *
24 *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
25 *
26 *  Copyright (c) 2010-2011 embedded brains GmbH.
27 *
28 *  The license and distribution terms for this file may be
29 *  found in the file LICENSE in this distribution or at
30 *  http://www.rtems.com/license/LICENSE.
31 *
32 * $Id$
33 */
34
35#ifndef _RTEMS_SCORE_CPU_H
36#define _RTEMS_SCORE_CPU_H
37
38#include <rtems/score/types.h>
39#include <rtems/score/powerpc.h>
40#include <rtems/powerpc/registers.h>
41
42#ifndef ASM
43  #include <string.h> /* for memset() */
44#endif
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50/* conditional compilation parameters */
51
52/*
53 *  Should the calls to _Thread_Enable_dispatch be inlined?
54 *
55 *  If TRUE, then they are inlined.
56 *  If FALSE, then a subroutine call is made.
57 *
58 *  Basically this is an example of the classic trade-off of size
59 *  versus speed.  Inlining the call (TRUE) typically increases the
60 *  size of RTEMS while speeding up the enabling of dispatching.
61 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
62 *  only be 0 or 1 unless you are in an interrupt handler and that
63 *  interrupt handler invokes the executive.]  When not inlined
64 *  something calls _Thread_Enable_dispatch which in turns calls
65 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
66 *  one subroutine call is avoided entirely.]
67 */
68
69#define CPU_INLINE_ENABLE_DISPATCH       FALSE
70
71/*
72 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
73 *  be unrolled one time?  In unrolled each iteration of the loop examines
74 *  two "nodes" on the chain being searched.  Otherwise, only one node
75 *  is examined per iteration.
76 *
77 *  If TRUE, then the loops are unrolled.
78 *  If FALSE, then the loops are not unrolled.
79 *
80 *  The primary factor in making this decision is the cost of disabling
81 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
82 *  body of the loop.  On some CPUs, the flash is more expensive than
83 *  one iteration of the loop body.  In this case, it might be desirable
84 *  to unroll the loop.  It is important to note that on some CPUs, this
85 *  code is the longest interrupt disable period in RTEMS.  So it is
86 *  necessary to strike a balance when setting this parameter.
87 */
88
89#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
90
91/*
92 *  Does this port provide a CPU dependent IDLE task implementation?
93 *
94 *  If TRUE, then the routine _CPU_Thread_Idle_body
95 *  must be provided and is the default IDLE thread body instead of
96 *  _CPU_Thread_Idle_body.
97 *
98 *  If FALSE, then use the generic IDLE thread body if the BSP does
99 *  not provide one.
100 *
101 *  This is intended to allow for supporting processors which have
102 *  a low power or idle mode.  When the IDLE thread is executed, then
103 *  the CPU can be powered down.
104 *
105 *  The order of precedence for selecting the IDLE thread body is:
106 *
107 *    1.  BSP provided
108 *    2.  CPU dependent (if provided)
109 *    3.  generic (if no BSP and no CPU dependent)
110 */
111
112#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
113
114/*
115 *  Does the stack grow up (toward higher addresses) or down
116 *  (toward lower addresses)?
117 *
118 *  If TRUE, then the grows upward.
119 *  If FALSE, then the grows toward smaller addresses.
120 */
121
122#define CPU_STACK_GROWS_UP               FALSE
123
124/*
125 *  The following is the variable attribute used to force alignment
126 *  of critical RTEMS structures.  On some processors it may make
127 *  sense to have these aligned on tighter boundaries than
128 *  the minimum requirements of the compiler in order to have as
129 *  much of the critical data area as possible in a cache line.
130 *
131 *  The placement of this macro in the declaration of the variables
132 *  is based on the syntactically requirements of the GNU C
133 *  "__attribute__" extension.  For example with GNU C, use
134 *  the following to force a structures to a 32 byte boundary.
135 *
136 *      __attribute__ ((aligned (32)))
137 *
138 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
139 *         To benefit from using this, the data must be heavily
140 *         used so it will stay in the cache and used frequently enough
141 *         in the executive to justify turning this on.
142 */
143
144#define CPU_STRUCTURE_ALIGNMENT \
145  __attribute__ ((aligned (PPC_STRUCTURE_ALIGNMENT)))
146
147#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
148
149/*
150 *  Define what is required to specify how the network to host conversion
151 *  routines are handled.
152 */
153
154#if defined(__BIG_ENDIAN__) || defined(_BIG_ENDIAN)
155#define CPU_BIG_ENDIAN                           TRUE
156#define CPU_LITTLE_ENDIAN                        FALSE
157#else
158#define CPU_BIG_ENDIAN                           FALSE
159#define CPU_LITTLE_ENDIAN                        TRUE
160#endif
161
162/*
163 *  Does the CPU have hardware floating point?
164 *
165 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
166 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
167 *
168 *  If there is a FP coprocessor such as the i387 or mc68881, then
169 *  the answer is TRUE.
170 *
171 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
172 *  It indicates whether or not this CPU model has FP support.  For
173 *  example, it would be possible to have an i386_nofp CPU model
174 *  which set this to false to indicate that you have an i386 without
175 *  an i387 and wish to leave floating point support out of RTEMS.
176 */
177
178#if ( PPC_HAS_FPU == 1 )
179#define CPU_HARDWARE_FP     TRUE
180#define CPU_SOFTWARE_FP     FALSE
181#else
182#define CPU_HARDWARE_FP     FALSE
183#define CPU_SOFTWARE_FP     FALSE
184#endif
185
186/*
187 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
188 *
189 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
190 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
191 *
192 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
193 *
194 *  PowerPC Note: It appears the GCC can implicitly generate FPU
195 *  and Altivec instructions when you least expect them.  So make
196 *  all tasks floating point.
197 */
198
199#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
200
201/*
202 *  Should the IDLE task have a floating point context?
203 *
204 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
205 *  and it has a floating point context which is switched in and out.
206 *  If FALSE, then the IDLE task does not have a floating point context.
207 *
208 *  Setting this to TRUE negatively impacts the time required to preempt
209 *  the IDLE task from an interrupt because the floating point context
210 *  must be saved as part of the preemption.
211 */
212
213#define CPU_IDLE_TASK_IS_FP      FALSE
214
215/*
216 *  Processor defined structures required for cpukit/score.
217 */
218
219/*
220 * Contexts
221 *
222 *  Generally there are 2 types of context to save.
223 *     1. Interrupt registers to save
224 *     2. Task level registers to save
225 *
226 *  This means we have the following 3 context items:
227 *     1. task level context stuff::  Context_Control
228 *     2. floating point task stuff:: Context_Control_fp
229 *     3. special interrupt level context :: Context_Control_interrupt
230 *
231 *  On some processors, it is cost-effective to save only the callee
232 *  preserved registers during a task context switch.  This means
233 *  that the ISR code needs to save those registers which do not
234 *  persist across function calls.  It is not mandatory to make this
235 *  distinctions between the caller/callee saves registers for the
236 *  purpose of minimizing context saved during task switch and on interrupts.
237 *  If the cost of saving extra registers is minimal, simplicity is the
238 *  choice.  Save the same context on interrupt entry as for tasks in
239 *  this case.
240 *
241 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
242 *  care should be used in designing the context area.
243 *
244 *  On some CPUs with hardware floating point support, the Context_Control_fp
245 *  structure will not be used or it simply consist of an array of a
246 *  fixed number of bytes.   This is done when the floating point context
247 *  is dumped by a "FP save context" type instruction and the format
248 *  is not really defined by the CPU.  In this case, there is no need
249 *  to figure out the exact format -- only the size.  Of course, although
250 *  this is enough information for RTEMS, it is probably not enough for
251 *  a debugger such as gdb.  But that is another problem.
252 */
253
254#ifndef ASM
255
256typedef struct {
257  #ifndef __SPE__
258    uint32_t   gpr1;    /* Stack pointer for all */
259    uint32_t   gpr2;    /* Reserved SVR4, section ptr EABI + */
260    uint32_t   gpr13;   /* Section ptr SVR4/EABI */
261    uint32_t   gpr14;   /* Non volatile for all */
262    uint32_t   gpr15;   /* Non volatile for all */
263    uint32_t   gpr16;   /* Non volatile for all */
264    uint32_t   gpr17;   /* Non volatile for all */
265    uint32_t   gpr18;   /* Non volatile for all */
266    uint32_t   gpr19;   /* Non volatile for all */
267    uint32_t   gpr20;   /* Non volatile for all */
268    uint32_t   gpr21;   /* Non volatile for all */
269    uint32_t   gpr22;   /* Non volatile for all */
270    uint32_t   gpr23;   /* Non volatile for all */
271    uint32_t   gpr24;   /* Non volatile for all */
272    uint32_t   gpr25;   /* Non volatile for all */
273    uint32_t   gpr26;   /* Non volatile for all */
274    uint32_t   gpr27;   /* Non volatile for all */
275    uint32_t   gpr28;   /* Non volatile for all */
276    uint32_t   gpr29;   /* Non volatile for all */
277    uint32_t   gpr30;   /* Non volatile for all */
278    uint32_t   gpr31;   /* Non volatile for all */
279    uint32_t   cr;      /* PART of the CR is non volatile for all */
280    uint32_t   pc;      /* Program counter/Link register */
281    uint32_t   msr;     /* Initial interrupt level */
282    #ifdef __ALTIVEC__
283      /*
284       * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
285       * and padding to ensure cache-alignment.  Unfortunately, we can't verify
286       * the cache line size here in the cpukit but altivec support code will
287       * produce an error if this is ever different from 32 bytes.
288       *
289       * Note: it is the BSP/CPU-support's responsibility to save/restore
290       *       volatile vregs across interrupts and exceptions.
291       */
292      uint8_t altivec[16*12 + 32 + 32];
293    #endif
294  #else
295    /* Non-volatile context according to E500ABIUG and EABI */
296    uint32_t context [
297      8 /* Cache line padding */
298      + 1 /* Stack pointer */
299      + 1 /* MSR */
300      + 1 /* LR */
301      + 1 /* CR */
302      + 18 * 2 /* GPR 14 to GPR 31 */
303    ];
304  #endif
305} Context_Control;
306#endif /* ASM */
307
308#ifndef __SPE__
309  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
310    do { \
311      (_context)->gpr1 = _sp; \
312    } while (0)
313
314  #define PPC_CONTEXT_GET_CR( _context ) \
315    (_context)->cr
316
317  #define PPC_CONTEXT_GET_MSR( _context ) \
318    (_context)->msr
319
320  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
321    do { \
322      (_context)->msr = _msr; \
323    } while (0)
324
325  #define PPC_CONTEXT_FIRST_SAVED_GPR 13
326
327  #define PPC_CONTEXT_GET_FIRST_SAVED( _context ) \
328    (_context)->gpr13
329
330  #define PPC_CONTEXT_GET_PC( _context ) \
331    (_context)->pc
332
333  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
334    do { \
335      (_context)->pc = _pc; \
336    } while (0)
337
338  #define _CPU_Context_Get_SP( _context ) \
339    (_context)->gpr1
340#else
341  #define PPC_CONTEXT_CACHE_LINE_0 32
342  #define PPC_CONTEXT_OFFSET_SP 32
343  #define PPC_CONTEXT_OFFSET_MSR 36
344  #define PPC_CONTEXT_OFFSET_LR 40
345  #define PPC_CONTEXT_OFFSET_CR 44
346  #define PPC_CONTEXT_OFFSET_GPR14 48
347  #define PPC_CONTEXT_OFFSET_GPR15 56
348  #define PPC_CONTEXT_CACHE_LINE_1 64
349  #define PPC_CONTEXT_OFFSET_GPR16 64
350  #define PPC_CONTEXT_OFFSET_GPR17 72
351  #define PPC_CONTEXT_OFFSET_GPR18 80
352  #define PPC_CONTEXT_OFFSET_GPR19 88
353  #define PPC_CONTEXT_CACHE_LINE_2 96
354  #define PPC_CONTEXT_OFFSET_GPR20 96
355  #define PPC_CONTEXT_OFFSET_GPR21 104
356  #define PPC_CONTEXT_OFFSET_GPR22 112
357  #define PPC_CONTEXT_OFFSET_GPR23 120
358  #define PPC_CONTEXT_CACHE_LINE_3 128
359  #define PPC_CONTEXT_OFFSET_GPR24 128
360  #define PPC_CONTEXT_OFFSET_GPR25 136
361  #define PPC_CONTEXT_OFFSET_GPR26 144
362  #define PPC_CONTEXT_OFFSET_GPR27 152
363  #define PPC_CONTEXT_CACHE_LINE_4 160
364  #define PPC_CONTEXT_OFFSET_GPR28 160
365  #define PPC_CONTEXT_OFFSET_GPR29 168
366  #define PPC_CONTEXT_OFFSET_GPR30 176
367  #define PPC_CONTEXT_OFFSET_GPR31 184
368
369  #define PPC_CONTEXT_AREA( _context ) \
370    ((uint32_t *) (((uintptr_t) (_context)) & ~0x1fU))
371
372  #define PPC_CONTEXT_FIELD( _context, _offset ) \
373    PPC_CONTEXT_AREA( _context ) [(_offset) / 4]
374
375  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
376    do { \
377      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP ) = _sp; \
378    } while (0)
379
380  #define PPC_CONTEXT_GET_CR( _context ) \
381    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_CR )
382
383  #define PPC_CONTEXT_GET_MSR( _context ) \
384    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_MSR )
385
386  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
387    do { \
388      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_MSR ) = _msr; \
389    } while (0)
390
391  #define PPC_CONTEXT_FIRST_SAVED_GPR 14
392
393  #define PPC_CONTEXT_GET_FIRST_SAVED( _context ) \
394    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_GPR14 )
395
396  #define PPC_CONTEXT_GET_PC( _context ) \
397    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_LR )
398
399  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
400    do { \
401      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_LR ) = _pc; \
402    } while (0)
403
404  #define _CPU_Context_Get_SP( _context ) \
405    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP )
406#endif
407
408#ifndef ASM
409typedef struct {
410    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
411     * procedure calls.  However, this would mean that the interrupt
412     * frame had to hold f0-f13, and the fpscr.  And as the majority
413     * of tasks will not have an FP context, we will save the whole
414     * context here.
415     */
416#if (PPC_HAS_DOUBLE == 1)
417    double      f[32];
418    uint64_t    fpscr;
419#else
420    float       f[32];
421    uint32_t    fpscr;
422#endif
423} Context_Control_fp;
424
425typedef struct CPU_Interrupt_frame {
426    uint32_t   stacklink;       /* Ensure this is a real frame (also reg1 save) */
427    uint32_t   calleeLr;        /* link register used by callees: SVR4/EABI */
428
429    /* This is what is left out of the primary contexts */
430    uint32_t   gpr0;
431    uint32_t   gpr2;            /* play safe */
432    uint32_t   gpr3;
433    uint32_t   gpr4;
434    uint32_t   gpr5;
435    uint32_t   gpr6;
436    uint32_t   gpr7;
437    uint32_t   gpr8;
438    uint32_t   gpr9;
439    uint32_t   gpr10;
440    uint32_t   gpr11;
441    uint32_t   gpr12;
442    uint32_t   gpr13;   /* Play safe */
443    uint32_t   gpr28;   /* For internal use by the IRQ handler */
444    uint32_t   gpr29;   /* For internal use by the IRQ handler */
445    uint32_t   gpr30;   /* For internal use by the IRQ handler */
446    uint32_t   gpr31;   /* For internal use by the IRQ handler */
447    uint32_t   cr;      /* Bits of this are volatile, so no-one may save */
448    uint32_t   ctr;
449    uint32_t   xer;
450    uint32_t   lr;
451    uint32_t   pc;
452    uint32_t   msr;
453    uint32_t   pad[3];
454} CPU_Interrupt_frame;
455
456#endif /* ASM */
457
458/*
459 *  Does RTEMS manage a dedicated interrupt stack in software?
460 *
461 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
462 *  If FALSE, nothing is done.
463 *
464 *  If the CPU supports a dedicated interrupt stack in hardware,
465 *  then it is generally the responsibility of the BSP to allocate it
466 *  and set it up.
467 *
468 *  If the CPU does not support a dedicated interrupt stack, then
469 *  the porter has two options: (1) execute interrupts on the
470 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
471 *  interrupt stack.
472 *
473 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
474 *
475 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
476 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
477 *  possible that both are FALSE for a particular CPU.  Although it
478 *  is unclear what that would imply about the interrupt processing
479 *  procedure on that CPU.
480 */
481
482#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
483
484/*
485 *  Does this CPU have hardware support for a dedicated interrupt stack?
486 *
487 *  If TRUE, then it must be installed during initialization.
488 *  If FALSE, then no installation is performed.
489 *
490 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
491 *
492 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
493 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
494 *  possible that both are FALSE for a particular CPU.  Although it
495 *  is unclear what that would imply about the interrupt processing
496 *  procedure on that CPU.
497 */
498
499#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
500
501/*
502 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
503 *
504 *  If TRUE, then the memory is allocated during initialization.
505 *  If FALSE, then the memory is allocated during initialization.
506 *
507 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
508 */
509
510#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
511
512/*
513 *  Does the RTEMS invoke the user's ISR with the vector number and
514 *  a pointer to the saved interrupt frame (1) or just the vector
515 *  number (0)?
516 */
517
518#define CPU_ISR_PASSES_FRAME_POINTER 0
519
520/*
521 *  Should the saving of the floating point registers be deferred
522 *  until a context switch is made to another different floating point
523 *  task?
524 *
525 *  If TRUE, then the floating point context will not be stored until
526 *  necessary.  It will remain in the floating point registers and not
527 *  disturned until another floating point task is switched to.
528 *
529 *  If FALSE, then the floating point context is saved when a floating
530 *  point task is switched out and restored when the next floating point
531 *  task is restored.  The state of the floating point registers between
532 *  those two operations is not specified.
533 *
534 *  If the floating point context does NOT have to be saved as part of
535 *  interrupt dispatching, then it should be safe to set this to TRUE.
536 *
537 *  Setting this flag to TRUE results in using a different algorithm
538 *  for deciding when to save and restore the floating point context.
539 *  The deferred FP switch algorithm minimizes the number of times
540 *  the FP context is saved and restored.  The FP context is not saved
541 *  until a context switch is made to another, different FP task.
542 *  Thus in a system with only one FP task, the FP context will never
543 *  be saved or restored.
544 *
545 *  Note, however that compilers may use floating point registers/
546 *  instructions for optimization or they may save/restore FP registers
547 *  on the stack. You must not use deferred switching in these cases
548 *  and on the PowerPC attempting to do so will raise a "FP unavailable"
549 *  exception.
550 */
551/*
552 *  ACB Note:  This could make debugging tricky..
553 */
554
555/* conservative setting (FALSE); probably doesn't affect performance too much */
556#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
557
558/*
559 *  Processor defined structures required for cpukit/score.
560 */
561
562#ifndef ASM
563
564/*
565 *  This variable is optional.  It is used on CPUs on which it is difficult
566 *  to generate an "uninitialized" FP context.  It is filled in by
567 *  _CPU_Initialize and copied into the task's FP context area during
568 *  _CPU_Context_Initialize.
569 */
570
571/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
572
573#endif /* ndef ASM */
574
575/*
576 *  This defines the number of levels and the mask used to pick those
577 *  bits out of a thread mode.
578 */
579
580#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
581#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
582
583/*
584 *  Nothing prevents the porter from declaring more CPU specific variables.
585 */
586
587#ifndef ASM
588
589SCORE_EXTERN struct {
590  uint32_t      *Disable_level;
591  void          *Stack;
592  volatile bool *Switch_necessary;
593  bool          *Signal;
594
595} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
596
597#endif /* ndef ASM */
598
599/*
600 *  The size of the floating point context area.  On some CPUs this
601 *  will not be a "sizeof" because the format of the floating point
602 *  area is not defined -- only the size is.  This is usually on
603 *  CPUs with a "floating point save context" instruction.
604 */
605
606#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
607
608/*
609 * (Optional) # of bytes for libmisc/stackchk to check
610 * If not specifed, then it defaults to something reasonable
611 * for most architectures.
612 */
613
614#define CPU_STACK_CHECK_SIZE    (128)
615
616/*
617 *  Amount of extra stack (above minimum stack size) required by
618 *  MPCI receive server thread.  Remember that in a multiprocessor
619 *  system this thread must exist and be able to process all directives.
620 */
621
622#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
623
624/*
625 *  This defines the number of entries in the ISR_Vector_table managed
626 *  by RTEMS.
627 *
628 *  NOTE: CPU_INTERRUPT_NUMBER_OF_VECTORS and
629 *        CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER are only used on
630 *        Simple Vectored Architectures and thus are not defined
631 *        for this architecture.
632 */
633
634/*
635 *  This is defined if the port has a special way to report the ISR nesting
636 *  level.  Most ports maintain the variable _ISR_Nest_level. Note that
637 *  this is not an option - RTEMS/score _relies_ on _ISR_Nest_level
638 *  being maintained (e.g. watchdog queues).
639 */
640
641#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
642
643/*
644 *  ISR handler macros
645 */
646
647#define _CPU_Initialize_vectors()
648
649/*
650 *  Disable all interrupts for an RTEMS critical section.  The previous
651 *  level is returned in _isr_cookie.
652 */
653
654#ifndef ASM
655
656static inline uint32_t   _CPU_ISR_Get_level( void )
657{
658  register unsigned int msr;
659  _CPU_MSR_GET(msr);
660  if (msr & MSR_EE) return 0;
661  else  return 1;
662}
663
664static inline void _CPU_ISR_Set_level( uint32_t   level )
665{
666  register unsigned int msr;
667  _CPU_MSR_GET(msr);
668  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
669    msr |= ppc_interrupt_get_disable_mask();
670  }
671  else {
672    msr &= ~ppc_interrupt_get_disable_mask();
673  }
674  _CPU_MSR_SET(msr);
675}
676
677void BSP_panic(char *);
678
679/* Fatal Error manager macros */
680
681/*
682 *  This routine copies _error into a known place -- typically a stack
683 *  location or a register, optionally disables interrupts, and
684 *  halts/stops the CPU.
685 */
686
687void _BSP_Fatal_error(unsigned int);
688
689#endif /* ASM */
690
691#define _CPU_Fatal_halt( _error ) \
692  _BSP_Fatal_error(_error)
693
694/* end of Fatal Error manager macros */
695
696/*
697 * SPRG0 was previously used to make sure that the BSP fixed the PR288 bug.
698 * Now SPRG0 is devoted to the interrupt disable mask.
699 */
700
701#define PPC_BSP_HAS_FIXED_PR288 ppc_this_is_now_the_interrupt_disable_mask
702
703/*
704 *  Should be large enough to run all RTEMS tests.  This ensures
705 *  that a "reasonable" small application should not have any problems.
706 */
707
708#define CPU_STACK_MINIMUM_SIZE          (1024*8)
709
710/*
711 *  CPU's worst alignment requirement for data types on a byte boundary.  This
712 *  alignment does not take into account the requirements for the stack.
713 */
714
715#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
716
717/*
718 *  This number corresponds to the byte alignment requirement for the
719 *  heap handler.  This alignment requirement may be stricter than that
720 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
721 *  common for the heap to follow the same alignment requirement as
722 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
723 *  then this should be set to CPU_ALIGNMENT.
724 *
725 *  NOTE:  This does not have to be a power of 2.  It does have to
726 *         be greater or equal to than CPU_ALIGNMENT.
727 */
728
729#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
730
731/*
732 *  This number corresponds to the byte alignment requirement for memory
733 *  buffers allocated by the partition manager.  This alignment requirement
734 *  may be stricter than that for the data types alignment specified by
735 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
736 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
737 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
738 *
739 *  NOTE:  This does not have to be a power of 2.  It does have to
740 *         be greater or equal to than CPU_ALIGNMENT.
741 */
742
743#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
744
745/*
746 *  This number corresponds to the byte alignment requirement for the
747 *  stack.  This alignment requirement may be stricter than that for the
748 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
749 *  is strict enough for the stack, then this should be set to 0.
750 *
751 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
752 */
753
754#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
755
756#ifndef ASM
757/*  The following routine swaps the endian format of an unsigned int.
758 *  It must be static because it is referenced indirectly.
759 *
760 *  This version will work on any processor, but if there is a better
761 *  way for your CPU PLEASE use it.  The most common way to do this is to:
762 *
763 *     swap least significant two bytes with 16-bit rotate
764 *     swap upper and lower 16-bits
765 *     swap most significant two bytes with 16-bit rotate
766 *
767 *  Some CPUs have special instructions which swap a 32-bit quantity in
768 *  a single instruction (e.g. i486).  It is probably best to avoid
769 *  an "endian swapping control bit" in the CPU.  One good reason is
770 *  that interrupts would probably have to be disabled to ensure that
771 *  an interrupt does not try to access the same "chunk" with the wrong
772 *  endian.  Another good reason is that on some CPUs, the endian bit
773 *  endianness for ALL fetches -- both code and data -- so the code
774 *  will be fetched incorrectly.
775 */
776
777static inline uint32_t CPU_swap_u32(
778  uint32_t value
779)
780{
781  uint32_t   swapped;
782
783  __asm__ volatile("rlwimi %0,%1,8,24,31;"
784               "rlwimi %0,%1,24,16,23;"
785               "rlwimi %0,%1,8,8,15;"
786               "rlwimi %0,%1,24,0,7;" :
787               "=&r" ((swapped)) : "r" ((value)));
788
789  return( swapped );
790}
791
792#define CPU_swap_u16( value ) \
793  (((value&0xff) << 8) | ((value >> 8)&0xff))
794
795#endif /* ASM */
796
797
798#ifndef ASM
799/* Context handler macros */
800
801/*
802 *  Initialize the context to a state suitable for starting a
803 *  task after a context restore operation.  Generally, this
804 *  involves:
805 *
806 *     - setting a starting address
807 *     - preparing the stack
808 *     - preparing the stack and frame pointers
809 *     - setting the proper interrupt level in the context
810 *     - initializing the floating point context
811 *
812 *  This routine generally does not set any unnecessary register
813 *  in the context.  The state of the "general data" registers is
814 *  undefined at task start time.
815 */
816
817void _CPU_Context_Initialize(
818  Context_Control  *the_context,
819  uint32_t         *stack_base,
820  uint32_t          size,
821  uint32_t          new_level,
822  void             *entry_point,
823  bool              is_fp
824);
825
826/*
827 *  This routine is responsible for somehow restarting the currently
828 *  executing task.  If you are lucky, then all that is necessary
829 *  is restoring the context.  Otherwise, there will need to be
830 *  a special assembly routine which does something special in this
831 *  case.  Context_Restore should work most of the time.  It will
832 *  not work if restarting self conflicts with the stack frame
833 *  assumptions of restoring a context.
834 */
835
836#define _CPU_Context_Restart_self( _the_context ) \
837   _CPU_Context_restore( (_the_context) );
838
839/*
840 *  The purpose of this macro is to allow the initial pointer into
841 *  a floating point context area (used to save the floating point
842 *  context) to be at an arbitrary place in the floating point
843 *  context area.
844 *
845 *  This is necessary because some FP units are designed to have
846 *  their context saved as a stack which grows into lower addresses.
847 *  Other FP units can be saved by simply moving registers into offsets
848 *  from the base of the context area.  Finally some FP units provide
849 *  a "dump context" instruction which could fill in from high to low
850 *  or low to high based on the whim of the CPU designers.
851 */
852
853#define _CPU_Context_Fp_start( _base, _offset ) \
854   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
855
856/*
857 *  This routine initializes the FP context area passed to it to.
858 *  There are a few standard ways in which to initialize the
859 *  floating point context.  The code included for this macro assumes
860 *  that this is a CPU in which a "initial" FP context was saved into
861 *  _CPU_Null_fp_context and it simply copies it to the destination
862 *  context passed to it.
863 *
864 *  Other models include (1) not doing anything, and (2) putting
865 *  a "null FP status word" in the correct place in the FP context.
866 */
867
868#define _CPU_Context_Initialize_fp( _destination ) \
869  memset( *(_destination), 0, sizeof( **(_destination) ) )
870
871/* end of Context handler macros */
872#endif /* ASM */
873
874#ifndef ASM
875/* Bitfield handler macros */
876
877/*
878 *  This routine sets _output to the bit number of the first bit
879 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Control.
880 *  This type may be either 16 or 32 bits wide although only the 16
881 *  least significant bits will be used.
882 *
883 *  There are a number of variables in using a "find first bit" type
884 *  instruction.
885 *
886 *    (1) What happens when run on a value of zero?
887 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
888 *    (3) The numbering may be zero or one based.
889 *    (4) The "find first bit" instruction may search from MSB or LSB.
890 *
891 *  RTEMS guarantees that (1) will never happen so it is not a concern.
892 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
893 *  _CPU_Priority_Bits_index().  These three form a set of routines
894 *  which must logically operate together.  Bits in the _value are
895 *  set and cleared based on masks built by _CPU_Priority_mask().
896 *  The basic major and minor values calculated by _Priority_Major()
897 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
898 *  to properly range between the values returned by the "find first bit"
899 *  instruction.  This makes it possible for _Priority_Get_highest() to
900 *  calculate the major and directly index into the minor table.
901 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
902 *  is the first bit found.
903 *
904 *  This entire "find first bit" and mapping process depends heavily
905 *  on the manner in which a priority is broken into a major and minor
906 *  components with the major being the 4 MSB of a priority and minor
907 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
908 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
909 *  to the lowest priority.
910 *
911 *  If your CPU does not have a "find first bit" instruction, then
912 *  there are ways to make do without it.  Here are a handful of ways
913 *  to implement this in software:
914 *
915 *    - a series of 16 bit test instructions
916 *    - a "binary search using if's"
917 *    - _number = 0
918 *      if _value > 0x00ff
919 *        _value >>=8
920 *        _number = 8;
921 *
922 *      if _value > 0x0000f
923 *        _value >=8
924 *        _number += 4
925 *
926 *      _number += bit_set_table[ _value ]
927 *
928 *    where bit_set_table[ 16 ] has values which indicate the first
929 *      bit set
930 */
931
932#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
933  { \
934    __asm__ volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
935                  "1" ((_value))); \
936  }
937
938/* end of Bitfield handler macros */
939
940/*
941 *  This routine builds the mask which corresponds to the bit fields
942 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
943 *  for that routine.
944 */
945
946#define _CPU_Priority_Mask( _bit_number ) \
947  ( 0x80000000 >> (_bit_number) )
948
949/*
950 *  This routine translates the bit numbers returned by
951 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
952 *  a major or minor component of a priority.  See the discussion
953 *  for that routine.
954 */
955
956#define _CPU_Priority_bits_index( _priority ) \
957  (_priority)
958
959/* end of Priority handler macros */
960#endif /* ASM */
961
962/* functions */
963
964#ifndef ASM
965
966/*
967 *  _CPU_Initialize
968 *
969 *  This routine performs CPU dependent initialization.
970 */
971
972void _CPU_Initialize(void);
973
974/*
975 *  _CPU_ISR_install_vector
976 *
977 *  This routine installs an interrupt vector.
978 */
979
980void _CPU_ISR_install_vector(
981  uint32_t    vector,
982  proc_ptr    new_handler,
983  proc_ptr   *old_handler
984);
985
986/*
987 *  _CPU_Install_interrupt_stack
988 *
989 *  This routine installs the hardware interrupt stack pointer.
990 *
991 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
992 *         is TRUE.
993 */
994
995void _CPU_Install_interrupt_stack( void );
996
997/*
998 *  _CPU_Context_switch
999 *
1000 *  This routine switches from the run context to the heir context.
1001 */
1002
1003void _CPU_Context_switch(
1004  Context_Control  *run,
1005  Context_Control  *heir
1006);
1007
1008/*
1009 *  _CPU_Context_restore
1010 *
1011 *  This routine is generallu used only to restart self in an
1012 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1013 *
1014 *  NOTE: May be unnecessary to reload some registers.
1015 */
1016
1017void _CPU_Context_restore(
1018  Context_Control *new_context
1019) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
1020
1021/*
1022 *  _CPU_Context_save_fp
1023 *
1024 *  This routine saves the floating point context passed to it.
1025 */
1026
1027void _CPU_Context_save_fp(
1028  Context_Control_fp **fp_context_ptr
1029);
1030
1031/*
1032 *  _CPU_Context_restore_fp
1033 *
1034 *  This routine restores the floating point context passed to it.
1035 */
1036
1037void _CPU_Context_restore_fp(
1038  Context_Control_fp **fp_context_ptr
1039);
1040
1041/*
1042 * _CPU_Initialize_altivec()
1043 *
1044 * Global altivec-related initialization.
1045 */
1046void
1047_CPU_Initialize_altivec(void);
1048
1049/*
1050 * _CPU_Context_switch_altivec
1051 *
1052 * This routine switches the altivec contexts passed to it.
1053 */
1054
1055void
1056_CPU_Context_switch_altivec(
1057  Context_Control *from,
1058  Context_Control *to
1059);
1060
1061/*
1062 * _CPU_Context_restore_altivec
1063 *
1064 * This routine restores the altivec context passed to it.
1065 */
1066
1067void
1068_CPU_Context_restore_altivec(
1069  Context_Control *ctxt
1070);
1071
1072/*
1073 * _CPU_Context_initialize_altivec
1074 *
1075 * This routine initializes the altivec context passed to it.
1076 */
1077
1078void
1079_CPU_Context_initialize_altivec(
1080  Context_Control *ctxt
1081);
1082
1083void _CPU_Fatal_error(
1084  uint32_t   _error
1085);
1086
1087#endif /* ASM */
1088
1089#ifdef __cplusplus
1090}
1091#endif
1092
1093#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.