source: rtems/c/src/exec/score/cpu/powerpc/cpu.h @ 3a4ae6c

4.104.114.84.95
Last change on this file since 3a4ae6c was 3a4ae6c, checked in by Joel Sherrill <joel.sherrill@…>, on 09/11/95 at 19:35:39

The word "RTEMS" almost completely removed from the core.

Configuration Table Template file added and all tests
modified to use this. All gvar.h and conftbl.h files
removed from test directories.

Configuration parameter maximum_devices added.

Core semaphore and mutex handlers added and RTEMS API Semaphore
Manager updated to reflect this.

Initialization sequence changed to invoke API specific initialization
routines. Initialization tasks table now owned by RTEMS Tasks Manager.

Added user extension for post-switch.

Utilized user extensions to implement API specific functionality
like signal dispatching.

Added extensions to the System Initialization Thread so that an
API can register a function to be invoked while the system
is being initialized. These are largely equivalent to the
pre-driver and post-driver hooks.

Added the Modules file oar-go32_p5, modified oar-go32, and modified
the file make/custom/go32.cfg to look at an environment varable which
determines what CPU model is being used.

All BSPs updated to reflect named devices and clock driver's IOCTL
used by the Shared Memory Driver. Also merged clock isr into
main file and removed ckisr.c where possible.

Updated spsize to reflect new and moved variables.

Makefiles for the executive source and include files updated to show
break down of files into Core, RTEMS API, and Neither.

Header and inline files installed into subdirectory based on whether
logically in the Core or a part of the RTEMS API.

  • Property mode set to 100644
File size: 33.5 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the PowerPC
4 *  processor.
5 *
6 *  Author:     Andrew Bray <andy@i-cubed.demon.co.uk>
7 *
8 *  COPYRIGHT (c) 1995 by i-cubed ltd.
9 *
10 *  To anyone who acknowledges that this file is provided "AS IS"
11 *  without any express or implied warranty:
12 *      permission to use, copy, modify, and distribute this file
13 *      for any purpose is hereby granted without fee, provided that
14 *      the above copyright notice and this notice appears in all
15 *      copies, and that the name of i-cubed limited not be used in
16 *      advertising or publicity pertaining to distribution of the
17 *      software without specific, written prior permission.
18 *      i-cubed limited makes no representations about the suitability
19 *      of this software for any purpose.
20 *
21 *  Derived from c/src/exec/cpu/no_cpu/cpu.h:
22 *
23 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
24 *  On-Line Applications Research Corporation (OAR).
25 *  All rights assigned to U.S. Government, 1994.
26 *
27 *  This material may be reproduced by or for the U.S. Government pursuant
28 *  to the copyright license under the clause at DFARS 252.227-7013.  This
29 *  notice must appear in all copies of this file and its derivatives.
30 *
31 */
32
33#ifndef __CPU_h
34#define __CPU_h
35
36#ifdef __cplusplus
37extern "C" {
38#endif
39
40#include <rtems/core/ppc.h>               /* pick up machine definitions */
41#ifndef ASM
42struct CPU_Interrupt_frame;
43
44#include <rtems/core/ppctypes.h>
45#endif
46
47/* conditional compilation parameters */
48
49/*
50 *  Should the calls to _Thread_Enable_dispatch be inlined?
51 *
52 *  If TRUE, then they are inlined.
53 *  If FALSE, then a subroutine call is made.
54 *
55 *  Basically this is an example of the classic trade-off of size
56 *  versus speed.  Inlining the call (TRUE) typically increases the
57 *  size of RTEMS while speeding up the enabling of dispatching.
58 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
59 *  only be 0 or 1 unless you are in an interrupt handler and that
60 *  interrupt handler invokes the executive.]  When not inlined
61 *  something calls _Thread_Enable_dispatch which in turns calls
62 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
63 *  one subroutine call is avoided entirely.]
64 */
65
66#define CPU_INLINE_ENABLE_DISPATCH       FALSE
67
68/*
69 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
70 *  be unrolled one time?  In unrolled each iteration of the loop examines
71 *  two "nodes" on the chain being searched.  Otherwise, only one node
72 *  is examined per iteration.
73 *
74 *  If TRUE, then the loops are unrolled.
75 *  If FALSE, then the loops are not unrolled.
76 *
77 *  The primary factor in making this decision is the cost of disabling
78 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
79 *  body of the loop.  On some CPUs, the flash is more expensive than
80 *  one iteration of the loop body.  In this case, it might be desirable
81 *  to unroll the loop.  It is important to note that on some CPUs, this
82 *  code is the longest interrupt disable period in RTEMS.  So it is
83 *  necessary to strike a balance when setting this parameter.
84 */
85
86#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
87
88/*
89 *  Does RTEMS manage a dedicated interrupt stack in software?
90 *
91 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
92 *  If FALSE, nothing is done.
93 *
94 *  If the CPU supports a dedicated interrupt stack in hardware,
95 *  then it is generally the responsibility of the BSP to allocate it
96 *  and set it up.
97 *
98 *  If the CPU does not support a dedicated interrupt stack, then
99 *  the porter has two options: (1) execute interrupts on the
100 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
101 *  interrupt stack.
102 *
103 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
104 *
105 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
106 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
107 *  possible that both are FALSE for a particular CPU.  Although it
108 *  is unclear what that would imply about the interrupt processing
109 *  procedure on that CPU.
110 */
111
112#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
113
114/*
115 *  Does this CPU have hardware support for a dedicated interrupt stack?
116 *
117 *  If TRUE, then it must be installed during initialization.
118 *  If FALSE, then no installation is performed.
119 *
120 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
121 *
122 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
123 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
124 *  possible that both are FALSE for a particular CPU.  Although it
125 *  is unclear what that would imply about the interrupt processing
126 *  procedure on that CPU.
127 */
128
129/*
130 *  ACB: This is a lie, but it gets us a handle on a call to set up
131 *  a variable derived from the top of the interrupt stack.
132 */
133
134#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
135
136/*
137 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
138 *
139 *  If TRUE, then the memory is allocated during initialization.
140 *  If FALSE, then the memory is allocated during initialization.
141 *
142 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
143 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
144 */
145
146#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
147
148/*
149 *  Does the CPU have hardware floating point?
150 *
151 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
152 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
153 *
154 *  If there is a FP coprocessor such as the i387 or mc68881, then
155 *  the answer is TRUE.
156 *
157 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
158 *  It indicates whether or not this CPU model has FP support.  For
159 *  example, it would be possible to have an i386_nofp CPU model
160 *  which set this to false to indicate that you have an i386 without
161 *  an i387 and wish to leave floating point support out of RTEMS.
162 */
163
164#if ( PPC_HAS_FPU == 1 )
165#define CPU_HARDWARE_FP     TRUE
166#else
167#define CPU_HARDWARE_FP     FALSE
168#endif
169
170/*
171 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
172 *
173 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
174 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
175 *
176 *  So far, the only CPU in which this option has been used is the
177 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
178 *  floating point registers to perform integer multiplies.  If
179 *  a function which you would not think utilize the FP unit DOES,
180 *  then one can not easily predict which tasks will use the FP hardware.
181 *  In this case, this option should be TRUE.
182 *
183 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
184 */
185
186#define CPU_ALL_TASKS_ARE_FP     FALSE
187
188/*
189 *  Should the IDLE task have a floating point context?
190 *
191 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
192 *  and it has a floating point context which is switched in and out.
193 *  If FALSE, then the IDLE task does not have a floating point context.
194 *
195 *  Setting this to TRUE negatively impacts the time required to preempt
196 *  the IDLE task from an interrupt because the floating point context
197 *  must be saved as part of the preemption.
198 */
199
200#define CPU_IDLE_TASK_IS_FP      FALSE
201
202/*
203 *  Should the saving of the floating point registers be deferred
204 *  until a context switch is made to another different floating point
205 *  task?
206 *
207 *  If TRUE, then the floating point context will not be stored until
208 *  necessary.  It will remain in the floating point registers and not
209 *  disturned until another floating point task is switched to.
210 *
211 *  If FALSE, then the floating point context is saved when a floating
212 *  point task is switched out and restored when the next floating point
213 *  task is restored.  The state of the floating point registers between
214 *  those two operations is not specified.
215 *
216 *  If the floating point context does NOT have to be saved as part of
217 *  interrupt dispatching, then it should be safe to set this to TRUE.
218 *
219 *  Setting this flag to TRUE results in using a different algorithm
220 *  for deciding when to save and restore the floating point context.
221 *  The deferred FP switch algorithm minimizes the number of times
222 *  the FP context is saved and restored.  The FP context is not saved
223 *  until a context switch is made to another, different FP task.
224 *  Thus in a system with only one FP task, the FP context will never
225 *  be saved or restored.
226 */
227/*
228 *  ACB Note:  This could make debugging tricky..
229 */
230
231#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
232
233/*
234 *  Does this port provide a CPU dependent IDLE task implementation?
235 *
236 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
237 *  must be provided and is the default IDLE thread body instead of
238 *  _Internal_threads_Idle_thread_body.
239 *
240 *  If FALSE, then use the generic IDLE thread body if the BSP does
241 *  not provide one.
242 *
243 *  This is intended to allow for supporting processors which have
244 *  a low power or idle mode.  When the IDLE thread is executed, then
245 *  the CPU can be powered down.
246 *
247 *  The order of precedence for selecting the IDLE thread body is:
248 *
249 *    1.  BSP provided
250 *    2.  CPU dependent (if provided)
251 *    3.  generic (if no BSP and no CPU dependent)
252 */
253
254#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
255
256/*
257 *  Does the stack grow up (toward higher addresses) or down
258 *  (toward lower addresses)?
259 *
260 *  If TRUE, then the grows upward.
261 *  If FALSE, then the grows toward smaller addresses.
262 */
263
264#define CPU_STACK_GROWS_UP               FALSE
265
266/*
267 *  The following is the variable attribute used to force alignment
268 *  of critical RTEMS structures.  On some processors it may make
269 *  sense to have these aligned on tighter boundaries than
270 *  the minimum requirements of the compiler in order to have as
271 *  much of the critical data area as possible in a cache line.
272 *
273 *  The placement of this macro in the declaration of the variables
274 *  is based on the syntactically requirements of the GNU C
275 *  "__attribute__" extension.  For example with GNU C, use
276 *  the following to force a structures to a 32 byte boundary.
277 *
278 *      __attribute__ ((aligned (32)))
279 *
280 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
281 *         To benefit from using this, the data must be heavily
282 *         used so it will stay in the cache and used frequently enough
283 *         in the executive to justify turning this on.
284 */
285
286#define CPU_STRUCTURE_ALIGNMENT    __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
287
288/*
289 *  The following defines the number of bits actually used in the
290 *  interrupt field of the task mode.  How those bits map to the
291 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
292 */
293/*
294 *  ACB Note: Levels are:
295 *   0: All maskable interrupts enabled
296 *   1: Other critical exceptions enabled
297 *   2: Machine check enabled
298 *   3: All maskable IRQs disabled
299 */
300
301#define CPU_MODES_INTERRUPT_MASK   0x00000003
302
303/*
304 *  Processor defined structures
305 *
306 *  Examples structures include the descriptor tables from the i386
307 *  and the processor control structure on the i960ca.
308 */
309
310/* may need to put some structures here.  */
311
312/*
313 * Contexts
314 *
315 *  Generally there are 2 types of context to save.
316 *     1. Interrupt registers to save
317 *     2. Task level registers to save
318 *
319 *  This means we have the following 3 context items:
320 *     1. task level context stuff::  Context_Control
321 *     2. floating point task stuff:: Context_Control_fp
322 *     3. special interrupt level context :: Context_Control_interrupt
323 *
324 *  On some processors, it is cost-effective to save only the callee
325 *  preserved registers during a task context switch.  This means
326 *  that the ISR code needs to save those registers which do not
327 *  persist across function calls.  It is not mandatory to make this
328 *  distinctions between the caller/callee saves registers for the
329 *  purpose of minimizing context saved during task switch and on interrupts.
330 *  If the cost of saving extra registers is minimal, simplicity is the
331 *  choice.  Save the same context on interrupt entry as for tasks in
332 *  this case.
333 *
334 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
335 *  care should be used in designing the context area.
336 *
337 *  On some CPUs with hardware floating point support, the Context_Control_fp
338 *  structure will not be used or it simply consist of an array of a
339 *  fixed number of bytes.   This is done when the floating point context
340 *  is dumped by a "FP save context" type instruction and the format
341 *  is not really defined by the CPU.  In this case, there is no need
342 *  to figure out the exact format -- only the size.  Of course, although
343 *  this is enough information for RTEMS, it is probably not enough for
344 *  a debugger such as gdb.  But that is another problem.
345 */
346
347typedef struct {
348    unsigned32 gpr1;    /* Stack pointer for all */
349    unsigned32 gpr2;    /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
350    unsigned32 gpr13;   /* First non volatile PowerOpen, section ptr SVR4/EABI */
351    unsigned32 gpr14;   /* Non volatile for all */
352    unsigned32 gpr15;   /* Non volatile for all */
353    unsigned32 gpr16;   /* Non volatile for all */
354    unsigned32 gpr17;   /* Non volatile for all */
355    unsigned32 gpr18;   /* Non volatile for all */
356    unsigned32 gpr19;   /* Non volatile for all */
357    unsigned32 gpr20;   /* Non volatile for all */
358    unsigned32 gpr21;   /* Non volatile for all */
359    unsigned32 gpr22;   /* Non volatile for all */
360    unsigned32 gpr23;   /* Non volatile for all */
361    unsigned32 gpr24;   /* Non volatile for all */
362    unsigned32 gpr25;   /* Non volatile for all */
363    unsigned32 gpr26;   /* Non volatile for all */
364    unsigned32 gpr27;   /* Non volatile for all */
365    unsigned32 gpr28;   /* Non volatile for all */
366    unsigned32 gpr29;   /* Non volatile for all */
367    unsigned32 gpr30;   /* Non volatile for all */
368    unsigned32 gpr31;   /* Non volatile for all */
369    unsigned32 cr;      /* PART of the CR is non volatile for all */
370    unsigned32 pc;      /* Program counter/Link register */
371    unsigned32 msr;     /* Initial interrupt level */
372} Context_Control;
373
374typedef struct {
375    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
376     * procedure calls.  However, this would mean that the interrupt
377     * frame had to hold f0-f13, and the fpscr.  And as the majority
378     * of tasks will not have an FP context, we will save the whole
379     * context here.
380     */
381#if (PPC_HAS_DOUBLE == 1)
382    double      f[32];
383    double      fpscr;
384#else
385    float       f[32];
386    float       fpscr;
387#endif
388} Context_Control_fp;
389
390typedef struct CPU_Interrupt_frame {
391    unsigned32 stacklink;       /* Ensure this is a real frame (also reg1 save) */
392#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
393    unsigned32 dummy[13];       /* Used by callees: PowerOpen ABI */
394#else
395    unsigned32 dummy[1];        /* Used by callees: SVR4/EABI */
396#endif
397    /* This is what is left out of the primary contexts */
398    unsigned32 gpr0;
399    unsigned32 gpr2;            /* play safe */
400    unsigned32 gpr3;
401    unsigned32 gpr4;
402    unsigned32 gpr5;
403    unsigned32 gpr6;
404    unsigned32 gpr7;
405    unsigned32 gpr8;
406    unsigned32 gpr9;
407    unsigned32 gpr10;
408    unsigned32 gpr11;
409    unsigned32 gpr12;
410    unsigned32 gpr13;   /* Play safe */
411    unsigned32 gpr28;   /* For internal use by the IRQ handler */
412    unsigned32 gpr29;   /* For internal use by the IRQ handler */
413    unsigned32 gpr30;   /* For internal use by the IRQ handler */
414    unsigned32 gpr31;   /* For internal use by the IRQ handler */
415    unsigned32 cr;      /* Bits of this are volatile, so no-one may save */
416    unsigned32 ctr;
417    unsigned32 xer;
418    unsigned32 lr;
419    unsigned32 pc;
420    unsigned32 msr;
421    unsigned32 pad[3];
422} CPU_Interrupt_frame;
423
424
425/*
426 *  The following table contains the information required to configure
427 *  the PowerPC processor specific parameters.
428 *
429 *  NOTE: The interrupt_stack_size field is required if
430 *        CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
431 *
432 *        The pretasking_hook, predriver_hook, and postdriver_hook,
433 *        and the do_zero_of_workspace fields are required on ALL CPUs.
434 */
435
436typedef struct {
437  void       (*pretasking_hook)( void );
438  void       (*predriver_hook)( void );
439  void       (*postdriver_hook)( void );
440  void       (*idle_task)( void );
441  boolean      do_zero_of_workspace;
442  unsigned32   interrupt_stack_size;
443  unsigned32   extra_system_initialization_stack;
444  unsigned32   clicks_per_usec; /* Timer clicks per microsecond */
445  unsigned32   serial_per_sec;  /* Serial clocks per second */
446  boolean      serial_external_clock;
447  boolean      serial_xon_xoff;
448  boolean      serial_cts_rts;
449  unsigned32   serial_rate;
450  unsigned32   timer_average_overhead; /* Average overhead of timer in ticks */
451  unsigned32   timer_least_valid; /* Least valid number from timer */
452  void (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
453}   rtems_cpu_table;
454
455/*
456 *  This variable is optional.  It is used on CPUs on which it is difficult
457 *  to generate an "uninitialized" FP context.  It is filled in by
458 *  _CPU_Initialize and copied into the task's FP context area during
459 *  _CPU_Context_Initialize.
460 */
461
462/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
463
464/*
465 *  On some CPUs, RTEMS supports a software managed interrupt stack.
466 *  This stack is allocated by the Interrupt Manager and the switch
467 *  is performed in _ISR_Handler.  These variables contain pointers
468 *  to the lowest and highest addresses in the chunk of memory allocated
469 *  for the interrupt stack.  Since it is unknown whether the stack
470 *  grows up or down (in general), this give the CPU dependent
471 *  code the option of picking the version it wants to use.
472 *
473 *  NOTE: These two variables are required if the macro
474 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
475 */
476
477EXTERN void               *_CPU_Interrupt_stack_low;
478EXTERN void               *_CPU_Interrupt_stack_high;
479
480/*
481 *  With some compilation systems, it is difficult if not impossible to
482 *  call a high-level language routine from assembly language.  This
483 *  is especially true of commercial Ada compilers and name mangling
484 *  C++ ones.  This variable can be optionally defined by the CPU porter
485 *  and contains the address of the routine _Thread_Dispatch.  This
486 *  can make it easier to invoke that routine at the end of the interrupt
487 *  sequence (if a dispatch is necessary).
488 */
489
490/* EXTERN void           (*_CPU_Thread_dispatch_pointer)(); */
491
492/*
493 *  Nothing prevents the porter from declaring more CPU specific variables.
494 */
495
496EXTERN struct {
497#if (PPC_ABI == PPC_ABI_POWEROPEN)
498  unsigned32 Dispatch_r2;
499#else
500  unsigned32 Default_r2;
501#if (PPC_ABI != PPC_ABI_GCC27)
502  unsigned32 Default_r13;
503#endif
504#endif
505  unsigned32 *Nest_level;
506  unsigned32 *Disable_level;
507  void *Vector_table;
508  void *Stack;
509  boolean *Switch_necessary;
510  boolean *Signal;
511} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
512
513/*
514 *  The size of the floating point context area.  On some CPUs this
515 *  will not be a "sizeof" because the format of the floating point
516 *  area is not defined -- only the size is.  This is usually on
517 *  CPUs with a "floating point save context" instruction.
518 */
519
520#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
521
522/*
523 * (Optional) # of bytes for libmisc/stackchk to check
524 * If not specifed, then it defaults to something reasonable
525 * for most architectures.
526 */
527
528#define CPU_STACK_CHECK_SIZE    (128)
529
530/*
531 *  Amount of extra stack (above minimum stack size) required by
532 *  system initialization thread.  Remember that in a multiprocessor
533 *  system the system intialization thread becomes the MP server thread.
534 */
535
536#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 0
537
538/*
539 *  This defines the number of entries in the ISR_Vector_table managed
540 *  by RTEMS.
541 */
542
543#define CPU_INTERRUPT_NUMBER_OF_VECTORS  (PPC_INTERRUPT_MAX)
544
545/*
546 *  Should be large enough to run all RTEMS tests.  This insures
547 *  that a "reasonable" small application should not have any problems.
548 */
549
550#define CPU_STACK_MINIMUM_SIZE          (1024*3)
551
552/*
553 *  CPU's worst alignment requirement for data types on a byte boundary.  This
554 *  alignment does not take into account the requirements for the stack.
555 */
556
557#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
558
559/*
560 *  This number corresponds to the byte alignment requirement for the
561 *  heap handler.  This alignment requirement may be stricter than that
562 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
563 *  common for the heap to follow the same alignment requirement as
564 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
565 *  then this should be set to CPU_ALIGNMENT.
566 *
567 *  NOTE:  This does not have to be a power of 2.  It does have to
568 *         be greater or equal to than CPU_ALIGNMENT.
569 */
570
571#define CPU_HEAP_ALIGNMENT         (PPC_CACHE_ALIGNMENT)
572
573/*
574 *  This number corresponds to the byte alignment requirement for memory
575 *  buffers allocated by the partition manager.  This alignment requirement
576 *  may be stricter than that for the data types alignment specified by
577 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
578 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
579 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
580 *
581 *  NOTE:  This does not have to be a power of 2.  It does have to
582 *         be greater or equal to than CPU_ALIGNMENT.
583 */
584
585#define CPU_PARTITION_ALIGNMENT    (PPC_CACHE_ALIGNMENT)
586
587/*
588 *  This number corresponds to the byte alignment requirement for the
589 *  stack.  This alignment requirement may be stricter than that for the
590 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
591 *  is strict enough for the stack, then this should be set to 0.
592 *
593 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
594 */
595
596#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
597
598/* ISR handler macros */
599
600/*
601 *  Disable all interrupts for an RTEMS critical section.  The previous
602 *  level is returned in _level.
603 */
604
605#define _CPU_ISR_Disable( _isr_cookie ) \
606  { \
607    asm volatile ( \
608        "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
609        "=r" ((_isr_cookie)) : "r" ((PPC_MSR_DISABLE_MASK)) \
610        ); \
611  }
612
613/*
614 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
615 *  This indicates the end of an RTEMS critical section.  The parameter
616 *  _level is not modified.
617 */
618
619#define _CPU_ISR_Enable( _isr_cookie )  \
620  { \
621     asm volatile ( "mtmsr %0" : \
622                   "=r" ((_isr_cookie)) : "0" ((_isr_cookie))); \
623  }
624
625/*
626 *  This temporarily restores the interrupt to _level before immediately
627 *  disabling them again.  This is used to divide long RTEMS critical
628 *  sections into two or more parts.  The parameter _level is not
629 * modified.
630 */
631
632#define _CPU_ISR_Flash( _isr_cookie ) \
633  { \
634    asm volatile ( \
635        "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
636        "=r" ((_isr_cookie)) : \
637        "r" ((PPC_MSR_DISABLE_MASK)), "0" ((_isr_cookie)) \
638        ); \
639  }
640
641/*
642 *  Map interrupt level in task mode onto the hardware that the CPU
643 *  actually provides.  Currently, interrupt levels which do not
644 *  map onto the CPU in a generic fashion are undefined.  Someday,
645 *  it would be nice if these were "mapped" by the application
646 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
647 *  8 - 255 would be available for bsp/application specific meaning.
648 *  This could be used to manage a programmable interrupt controller
649 *  via the rtems_task_mode directive.
650 */
651
652#define _CPU_ISR_Set_level( new_level ) \
653  { \
654    register unsigned32 tmp; \
655    asm volatile ( \
656        "mfmsr %0; andc %0,%0,%1; and %2, %2, %1; or %0, %0, %2; mtmsr %0" : \
657        "=r" ((tmp)) : \
658        "r" ((PPC_MSR_DISABLE_MASK)), "r" ((_CPU_msrs[new_level])), "0" ((tmp)) \
659        ); \
660  }
661
662/* end of ISR handler macros */
663
664/* Context handler macros */
665
666/*
667 *  Initialize the context to a state suitable for starting a
668 *  task after a context restore operation.  Generally, this
669 *  involves:
670 *
671 *     - setting a starting address
672 *     - preparing the stack
673 *     - preparing the stack and frame pointers
674 *     - setting the proper interrupt level in the context
675 *     - initializing the floating point context
676 *
677 *  This routine generally does not set any unnecessary register
678 *  in the context.  The state of the "general data" registers is
679 *  undefined at task start time.
680 */
681
682#if PPC_ABI == PPC_ABI_POWEROPEN
683#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
684                                 _isr, _entry_point ) \
685  { \
686    unsigned32 sp, *desc; \
687    \
688    sp = ((unsigned32)_stack_base) + (_size) - 56; \
689    *((unsigned32 *)sp) = 0; \
690    \
691    desc = (unsigned32 *)_entry_point; \
692    \
693    (_the_context)->msr = PPC_MSR_INITIAL | \
694      _CPU_msrs[ _isr ]; \
695    (_the_context)->pc = desc[0]; \
696    (_the_context)->gpr1 = sp; \
697    (_the_context)->gpr2 = desc[1]; \
698  }
699#endif
700#if PPC_ABI == PPC_ABI_SVR4
701#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
702                                 _isr, _entry_point ) \
703  { \
704    unsigned32 sp, r13; \
705    \
706    sp = ((unsigned32)_stack_base) + (_size) - 8; \
707    *((unsigned32 *)sp) = 0; \
708    \
709    asm volatile ("mr %0, 13" : "=r" ((r13))); \
710    \
711    (_the_context->msr) = PPC_MSR_INITIAL | \
712      _CPU_msrs[ _isr ]; \
713    (_the_context->pc) = _entry_point; \
714    (_the_context->gpr1) = sp; \
715    (_the_context->gpr13) = r13; \
716  }
717#endif
718#if PPC_ABI == PPC_ABI_EABI
719#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
720                                 _isr, _entry_point ) \
721  { \
722    unsigned32 sp, r2, r13; \
723    \
724    sp = ((unsigned32)_stack_base) + (_size) - 8; \
725    *((unsigned32 *)sp) = 0; \
726    \
727    asm volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13))); \
728    \
729    (_the_context)->msr = PPC_MSR_INITIAL | \
730      _CPU_msrs[ _isr ]; \
731    (_the_context->pc) = _entry_point; \
732    (_the_context->gpr1) = sp; \
733    (_the_context->gpr2) = r2; \
734    (_the_context->gpr13) = r13; \
735  }
736#endif
737
738/*
739 *  This routine is responsible for somehow restarting the currently
740 *  executing task.  If you are lucky, then all that is necessary
741 *  is restoring the context.  Otherwise, there will need to be
742 *  a special assembly routine which does something special in this
743 *  case.  Context_Restore should work most of the time.  It will
744 *  not work if restarting self conflicts with the stack frame
745 *  assumptions of restoring a context.
746 */
747
748#define _CPU_Context_Restart_self( _the_context ) \
749   _CPU_Context_restore( (_the_context) );
750
751/*
752 *  The purpose of this macro is to allow the initial pointer into
753 *  a floating point context area (used to save the floating point
754 *  context) to be at an arbitrary place in the floating point
755 *  context area.
756 *
757 *  This is necessary because some FP units are designed to have
758 *  their context saved as a stack which grows into lower addresses.
759 *  Other FP units can be saved by simply moving registers into offsets
760 *  from the base of the context area.  Finally some FP units provide
761 *  a "dump context" instruction which could fill in from high to low
762 *  or low to high based on the whim of the CPU designers.
763 */
764
765#define _CPU_Context_Fp_start( _base, _offset ) \
766   ( (void *) (_base) + (_offset) )
767
768/*
769 *  This routine initializes the FP context area passed to it to.
770 *  There are a few standard ways in which to initialize the
771 *  floating point context.  The code included for this macro assumes
772 *  that this is a CPU in which a "initial" FP context was saved into
773 *  _CPU_Null_fp_context and it simply copies it to the destination
774 *  context passed to it.
775 *
776 *  Other models include (1) not doing anything, and (2) putting
777 *  a "null FP status word" in the correct place in the FP context.
778 */
779
780#define _CPU_Context_Initialize_fp( _destination ) \
781  { \
782   ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
783  }
784
785/* end of Context handler macros */
786
787/* Fatal Error manager macros */
788
789/*
790 *  This routine copies _error into a known place -- typically a stack
791 *  location or a register, optionally disables interrupts, and
792 *  halts/stops the CPU.
793 */
794
795#define _CPU_Fatal_halt( _error ) \
796  _CPU_Fatal_error(_error)
797
798/* end of Fatal Error manager macros */
799
800/* Bitfield handler macros */
801
802/*
803 *  This routine sets _output to the bit number of the first bit
804 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
805 *  This type may be either 16 or 32 bits wide although only the 16
806 *  least significant bits will be used.
807 *
808 *  There are a number of variables in using a "find first bit" type
809 *  instruction.
810 *
811 *    (1) What happens when run on a value of zero?
812 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
813 *    (3) The numbering may be zero or one based.
814 *    (4) The "find first bit" instruction may search from MSB or LSB.
815 *
816 *  RTEMS guarantees that (1) will never happen so it is not a concern.
817 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
818 *  _CPU_Priority_Bits_index().  These three form a set of routines
819 *  which must logically operate together.  Bits in the _value are
820 *  set and cleared based on masks built by _CPU_Priority_mask().
821 *  The basic major and minor values calculated by _Priority_Major()
822 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
823 *  to properly range between the values returned by the "find first bit"
824 *  instruction.  This makes it possible for _Priority_Get_highest() to
825 *  calculate the major and directly index into the minor table.
826 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
827 *  is the first bit found.
828 *
829 *  This entire "find first bit" and mapping process depends heavily
830 *  on the manner in which a priority is broken into a major and minor
831 *  components with the major being the 4 MSB of a priority and minor
832 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
833 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
834 *  to the lowest priority.
835 *
836 *  If your CPU does not have a "find first bit" instruction, then
837 *  there are ways to make do without it.  Here are a handful of ways
838 *  to implement this in software:
839 *
840 *    - a series of 16 bit test instructions
841 *    - a "binary search using if's"
842 *    - _number = 0
843 *      if _value > 0x00ff
844 *        _value >>=8
845 *        _number = 8;
846 *
847 *      if _value > 0x0000f
848 *        _value >=8
849 *        _number += 4
850 *
851 *      _number += bit_set_table[ _value ]
852 *
853 *    where bit_set_table[ 16 ] has values which indicate the first
854 *      bit set
855 */
856
857#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
858  { \
859    asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
860                  "1" ((_value))); \
861  }
862
863/* end of Bitfield handler macros */
864
865/*
866 *  This routine builds the mask which corresponds to the bit fields
867 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
868 *  for that routine.
869 */
870
871#define _CPU_Priority_Mask( _bit_number ) \
872  ( 0x80000000 >> (_bit_number) )
873
874/*
875 *  This routine translates the bit numbers returned by
876 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
877 *  a major or minor component of a priority.  See the discussion
878 *  for that routine.
879 */
880
881#define _CPU_Priority_Bits_index( _priority ) \
882  (_priority)
883
884/* end of Priority handler macros */
885
886/* variables */
887
888extern const unsigned32 _CPU_msrs[4];
889
890/* functions */
891
892/*
893 *  _CPU_Initialize
894 *
895 *  This routine performs CPU dependent initialization.
896 */
897
898void _CPU_Initialize(
899  rtems_cpu_table  *cpu_table,
900  void      (*thread_dispatch)
901);
902
903/*
904 *  _CPU_ISR_install_vector
905 *
906 *  This routine installs an interrupt vector.
907 */
908
909void _CPU_ISR_install_vector(
910  unsigned32  vector,
911  proc_ptr    new_handler,
912  proc_ptr   *old_handler
913);
914
915/*
916 *  _CPU_Install_interrupt_stack
917 *
918 *  This routine installs the hardware interrupt stack pointer.
919 *
920 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
921 *         is TRUE.
922 */
923
924void _CPU_Install_interrupt_stack( void );
925
926/*
927 *  _CPU_Context_switch
928 *
929 *  This routine switches from the run context to the heir context.
930 */
931
932void _CPU_Context_switch(
933  Context_Control  *run,
934  Context_Control  *heir
935);
936
937/*
938 *  _CPU_Context_restore
939 *
940 *  This routine is generallu used only to restart self in an
941 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
942 *
943 *  NOTE: May be unnecessary to reload some registers.
944 */
945
946void _CPU_Context_restore(
947  Context_Control *new_context
948);
949
950/*
951 *  _CPU_Context_save_fp
952 *
953 *  This routine saves the floating point context passed to it.
954 */
955
956void _CPU_Context_save_fp(
957  void **fp_context_ptr
958);
959
960/*
961 *  _CPU_Context_restore_fp
962 *
963 *  This routine restores the floating point context passed to it.
964 */
965
966void _CPU_Context_restore_fp(
967  void **fp_context_ptr
968);
969
970void _CPU_Fatal_error(
971  unsigned32 _error
972);
973
974/*  The following routine swaps the endian format of an unsigned int.
975 *  It must be static because it is referenced indirectly.
976 *
977 *  This version will work on any processor, but if there is a better
978 *  way for your CPU PLEASE use it.  The most common way to do this is to:
979 *
980 *     swap least significant two bytes with 16-bit rotate
981 *     swap upper and lower 16-bits
982 *     swap most significant two bytes with 16-bit rotate
983 *
984 *  Some CPUs have special instructions which swap a 32-bit quantity in
985 *  a single instruction (e.g. i486).  It is probably best to avoid
986 *  an "endian swapping control bit" in the CPU.  One good reason is
987 *  that interrupts would probably have to be disabled to insure that
988 *  an interrupt does not try to access the same "chunk" with the wrong
989 *  endian.  Another good reason is that on some CPUs, the endian bit
990 *  endianness for ALL fetches -- both code and data -- so the code
991 *  will be fetched incorrectly.
992 */
993 
994static inline unsigned int CPU_swap_u32(
995  unsigned int value
996)
997{
998  unsigned32 swapped;
999 
1000  asm volatile("rlwimi %0,%1,8,24,31;"
1001               "rlwimi %0,%1,24,16,23;"
1002               "rlwimi %0,%1,8,8,15;"
1003               "rlwimi %0,%1,24,0,7;" :
1004               "=r" ((swapped)) : "r" ((value)));
1005
1006  return( swapped );
1007}
1008
1009#ifdef __cplusplus
1010}
1011#endif
1012
1013#endif
Note: See TracBrowser for help on using the repository browser.