source: rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h @ 3084de2

4.104.114.84.95
Last change on this file since 3084de2 was 3084de2, checked in by Joel Sherrill <joel.sherrill@…>, on 04/07/99 at 15:57:05

MPC821 support and PPC patches from Andrew Bray <andy@…>:

In c/src/exec/score/cpu/powerpc/rtems/score/ppc.h:

A lot of hardware interrupts were omitted. Patch enclosed.
I have also added the 821.

In c/src/exec/score/cpu/powerpc/rtems/score/cpu.h:

My patch adds the 821.

In c/src/exec/score/cpu/powerpc/cpu.c:

I have added the MPC821, and also fixed up for the missing hardware
interrupts. It is also inconsistent with
c/src/lib/libcpu/powerpc/mpc860/vectors/vectors.S. This has been fixed.

In c/src/lib/libcpu/powerpc/mpc860/vectors/vectors.S:

Fixed an inconsistency with cpu.c.

I also include some new files to go with the above patches. These are the
cpu library rtems-19990331/c/src/lib/libcpu/powerpc/mpc821/* and
c/src/exec/score/cpu/powerpc/mpc821.h which are minor modifications of
the 860 equivalents.

Other comments:

The various accesses to the DPRAM on the 860 are done with a linktime
symbol. This could be done dynamically at run time by reading the immr
register, and masking off the lower 16 bits. This takes the same amount
of time as loading an address constant, and the same number of
instructions as well (2).

In c/src/lib/libcpu/powerpc/mpc860/console-generic/console-generic.c:

This will silently fail if you attempt to use SCC1. This is only relevant
if you are not using SCC1 for ethernet.

This file also sets one of port B output pins for each port. This is NOT
generic, it should be in the BSP specific console driver.

  • Property mode set to 100644
File size: 37.0 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the PowerPC
4 *  processor.
5 *
6 *  Author:     Andrew Bray <andy@i-cubed.co.uk>
7 *
8 *  COPYRIGHT (c) 1995 by i-cubed ltd.
9 *
10 *  To anyone who acknowledges that this file is provided "AS IS"
11 *  without any express or implied warranty:
12 *      permission to use, copy, modify, and distribute this file
13 *      for any purpose is hereby granted without fee, provided that
14 *      the above copyright notice and this notice appears in all
15 *      copies, and that the name of i-cubed limited not be used in
16 *      advertising or publicity pertaining to distribution of the
17 *      software without specific, written prior permission.
18 *      i-cubed limited makes no representations about the suitability
19 *      of this software for any purpose.
20 *
21 *  Derived from c/src/exec/cpu/no_cpu/cpu.h:
22 *
23 *  COPYRIGHT (c) 1989-1997.
24 *  On-Line Applications Research Corporation (OAR).
25 *  Copyright assigned to U.S. Government, 1994.
26 *
27 *  The license and distribution terms for this file may in
28 *  the file LICENSE in this distribution or at
29 *  http://www.OARcorp.com/rtems/license.html.
30 *
31 *  $Id$
32 */
33
34#ifndef __CPU_h
35#define __CPU_h
36
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41#include <rtems/score/ppc.h>               /* pick up machine definitions */
42#ifndef ASM
43struct CPU_Interrupt_frame;
44
45#include <rtems/score/ppctypes.h>
46#endif
47
48/* conditional compilation parameters */
49
50/*
51 *  Should the calls to _Thread_Enable_dispatch be inlined?
52 *
53 *  If TRUE, then they are inlined.
54 *  If FALSE, then a subroutine call is made.
55 *
56 *  Basically this is an example of the classic trade-off of size
57 *  versus speed.  Inlining the call (TRUE) typically increases the
58 *  size of RTEMS while speeding up the enabling of dispatching.
59 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
60 *  only be 0 or 1 unless you are in an interrupt handler and that
61 *  interrupt handler invokes the executive.]  When not inlined
62 *  something calls _Thread_Enable_dispatch which in turns calls
63 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
64 *  one subroutine call is avoided entirely.]
65 */
66
67#define CPU_INLINE_ENABLE_DISPATCH       FALSE
68
69/*
70 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
71 *  be unrolled one time?  In unrolled each iteration of the loop examines
72 *  two "nodes" on the chain being searched.  Otherwise, only one node
73 *  is examined per iteration.
74 *
75 *  If TRUE, then the loops are unrolled.
76 *  If FALSE, then the loops are not unrolled.
77 *
78 *  The primary factor in making this decision is the cost of disabling
79 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
80 *  body of the loop.  On some CPUs, the flash is more expensive than
81 *  one iteration of the loop body.  In this case, it might be desirable
82 *  to unroll the loop.  It is important to note that on some CPUs, this
83 *  code is the longest interrupt disable period in RTEMS.  So it is
84 *  necessary to strike a balance when setting this parameter.
85 */
86
87#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
88
89/*
90 *  Does RTEMS manage a dedicated interrupt stack in software?
91 *
92 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
93 *  If FALSE, nothing is done.
94 *
95 *  If the CPU supports a dedicated interrupt stack in hardware,
96 *  then it is generally the responsibility of the BSP to allocate it
97 *  and set it up.
98 *
99 *  If the CPU does not support a dedicated interrupt stack, then
100 *  the porter has two options: (1) execute interrupts on the
101 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
102 *  interrupt stack.
103 *
104 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
105 *
106 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
107 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
108 *  possible that both are FALSE for a particular CPU.  Although it
109 *  is unclear what that would imply about the interrupt processing
110 *  procedure on that CPU.
111 */
112
113#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
114
115/*
116 *  Does this CPU have hardware support for a dedicated interrupt stack?
117 *
118 *  If TRUE, then it must be installed during initialization.
119 *  If FALSE, then no installation is performed.
120 *
121 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
122 *
123 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
124 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
125 *  possible that both are FALSE for a particular CPU.  Although it
126 *  is unclear what that would imply about the interrupt processing
127 *  procedure on that CPU.
128 */
129
130/*
131 *  ACB: This is a lie, but it gets us a handle on a call to set up
132 *  a variable derived from the top of the interrupt stack.
133 */
134
135#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
136
137/*
138 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
139 *
140 *  If TRUE, then the memory is allocated during initialization.
141 *  If FALSE, then the memory is allocated during initialization.
142 *
143 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
144 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
145 */
146
147#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
148
149/*
150 *  Does the RTEMS invoke the user's ISR with the vector number and
151 *  a pointer to the saved interrupt frame (1) or just the vector
152 *  number (0)?
153 */
154
155#define CPU_ISR_PASSES_FRAME_POINTER 1
156
157/*
158 *  Does the CPU have hardware floating point?
159 *
160 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
161 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
162 *
163 *  If there is a FP coprocessor such as the i387 or mc68881, then
164 *  the answer is TRUE.
165 *
166 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
167 *  It indicates whether or not this CPU model has FP support.  For
168 *  example, it would be possible to have an i386_nofp CPU model
169 *  which set this to false to indicate that you have an i386 without
170 *  an i387 and wish to leave floating point support out of RTEMS.
171 */
172
173#if ( PPC_HAS_FPU == 1 )
174#define CPU_HARDWARE_FP     TRUE
175#else
176#define CPU_HARDWARE_FP     FALSE
177#endif
178
179/*
180 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
181 *
182 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
183 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
184 *
185 *  So far, the only CPU in which this option has been used is the
186 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
187 *  floating point registers to perform integer multiplies.  If
188 *  a function which you would not think utilize the FP unit DOES,
189 *  then one can not easily predict which tasks will use the FP hardware.
190 *  In this case, this option should be TRUE.
191 *
192 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
193 */
194
195#define CPU_ALL_TASKS_ARE_FP     FALSE
196
197/*
198 *  Should the IDLE task have a floating point context?
199 *
200 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
201 *  and it has a floating point context which is switched in and out.
202 *  If FALSE, then the IDLE task does not have a floating point context.
203 *
204 *  Setting this to TRUE negatively impacts the time required to preempt
205 *  the IDLE task from an interrupt because the floating point context
206 *  must be saved as part of the preemption.
207 */
208
209#define CPU_IDLE_TASK_IS_FP      FALSE
210
211/*
212 *  Should the saving of the floating point registers be deferred
213 *  until a context switch is made to another different floating point
214 *  task?
215 *
216 *  If TRUE, then the floating point context will not be stored until
217 *  necessary.  It will remain in the floating point registers and not
218 *  disturned until another floating point task is switched to.
219 *
220 *  If FALSE, then the floating point context is saved when a floating
221 *  point task is switched out and restored when the next floating point
222 *  task is restored.  The state of the floating point registers between
223 *  those two operations is not specified.
224 *
225 *  If the floating point context does NOT have to be saved as part of
226 *  interrupt dispatching, then it should be safe to set this to TRUE.
227 *
228 *  Setting this flag to TRUE results in using a different algorithm
229 *  for deciding when to save and restore the floating point context.
230 *  The deferred FP switch algorithm minimizes the number of times
231 *  the FP context is saved and restored.  The FP context is not saved
232 *  until a context switch is made to another, different FP task.
233 *  Thus in a system with only one FP task, the FP context will never
234 *  be saved or restored.
235 */
236/*
237 *  ACB Note:  This could make debugging tricky..
238 */
239
240#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
241
242/*
243 *  Does this port provide a CPU dependent IDLE task implementation?
244 *
245 *  If TRUE, then the routine _CPU_Thread_Idle_body
246 *  must be provided and is the default IDLE thread body instead of
247 *  _CPU_Thread_Idle_body.
248 *
249 *  If FALSE, then use the generic IDLE thread body if the BSP does
250 *  not provide one.
251 *
252 *  This is intended to allow for supporting processors which have
253 *  a low power or idle mode.  When the IDLE thread is executed, then
254 *  the CPU can be powered down.
255 *
256 *  The order of precedence for selecting the IDLE thread body is:
257 *
258 *    1.  BSP provided
259 *    2.  CPU dependent (if provided)
260 *    3.  generic (if no BSP and no CPU dependent)
261 */
262
263#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
264
265/*
266 *  Does the stack grow up (toward higher addresses) or down
267 *  (toward lower addresses)?
268 *
269 *  If TRUE, then the grows upward.
270 *  If FALSE, then the grows toward smaller addresses.
271 */
272
273#define CPU_STACK_GROWS_UP               FALSE
274
275/*
276 *  The following is the variable attribute used to force alignment
277 *  of critical RTEMS structures.  On some processors it may make
278 *  sense to have these aligned on tighter boundaries than
279 *  the minimum requirements of the compiler in order to have as
280 *  much of the critical data area as possible in a cache line.
281 *
282 *  The placement of this macro in the declaration of the variables
283 *  is based on the syntactically requirements of the GNU C
284 *  "__attribute__" extension.  For example with GNU C, use
285 *  the following to force a structures to a 32 byte boundary.
286 *
287 *      __attribute__ ((aligned (32)))
288 *
289 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
290 *         To benefit from using this, the data must be heavily
291 *         used so it will stay in the cache and used frequently enough
292 *         in the executive to justify turning this on.
293 */
294
295#define CPU_STRUCTURE_ALIGNMENT \
296  __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
297
298/*
299 *  Define what is required to specify how the network to host conversion
300 *  routines are handled.
301 */
302
303#define CPU_CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
304#define CPU_BIG_ENDIAN                           TRUE
305#define CPU_LITTLE_ENDIAN                        FALSE
306
307/*
308 *  The following defines the number of bits actually used in the
309 *  interrupt field of the task mode.  How those bits map to the
310 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
311 *
312 *  The interrupt level is bit mapped for the PowerPC family. The
313 *  bits are set to 0 to indicate that a particular exception source
314 *  enabled and 1 if it is disabled.  This keeps with RTEMS convention
315 *  that interrupt level 0 means all sources are enabled.
316 *
317 *  The bits are assigned to correspond to enable bits in the MSR.
318 */
319
320#define PPC_INTERRUPT_LEVEL_ME   0x01
321#define PPC_INTERRUPT_LEVEL_EE   0x02
322#define PPC_INTERRUPT_LEVEL_CE   0x04
323
324/* XXX should these be maskable? */
325#if 0
326#define PPC_INTERRUPT_LEVEL_DE   0x08
327#define PPC_INTERRUPT_LEVEL_BE   0x10
328#define PPC_INTERRUPT_LEVEL_SE   0x20
329#endif
330
331#define CPU_MODES_INTERRUPT_MASK   0x00000007
332
333/*
334 *  Processor defined structures
335 *
336 *  Examples structures include the descriptor tables from the i386
337 *  and the processor control structure on the i960ca.
338 */
339
340/* may need to put some structures here.  */
341
342/*
343 * Contexts
344 *
345 *  Generally there are 2 types of context to save.
346 *     1. Interrupt registers to save
347 *     2. Task level registers to save
348 *
349 *  This means we have the following 3 context items:
350 *     1. task level context stuff::  Context_Control
351 *     2. floating point task stuff:: Context_Control_fp
352 *     3. special interrupt level context :: Context_Control_interrupt
353 *
354 *  On some processors, it is cost-effective to save only the callee
355 *  preserved registers during a task context switch.  This means
356 *  that the ISR code needs to save those registers which do not
357 *  persist across function calls.  It is not mandatory to make this
358 *  distinctions between the caller/callee saves registers for the
359 *  purpose of minimizing context saved during task switch and on interrupts.
360 *  If the cost of saving extra registers is minimal, simplicity is the
361 *  choice.  Save the same context on interrupt entry as for tasks in
362 *  this case.
363 *
364 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
365 *  care should be used in designing the context area.
366 *
367 *  On some CPUs with hardware floating point support, the Context_Control_fp
368 *  structure will not be used or it simply consist of an array of a
369 *  fixed number of bytes.   This is done when the floating point context
370 *  is dumped by a "FP save context" type instruction and the format
371 *  is not really defined by the CPU.  In this case, there is no need
372 *  to figure out the exact format -- only the size.  Of course, although
373 *  this is enough information for RTEMS, it is probably not enough for
374 *  a debugger such as gdb.  But that is another problem.
375 */
376
377typedef struct {
378    unsigned32 gpr1;    /* Stack pointer for all */
379    unsigned32 gpr2;    /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
380    unsigned32 gpr13;   /* First non volatile PowerOpen, section ptr SVR4/EABI */
381    unsigned32 gpr14;   /* Non volatile for all */
382    unsigned32 gpr15;   /* Non volatile for all */
383    unsigned32 gpr16;   /* Non volatile for all */
384    unsigned32 gpr17;   /* Non volatile for all */
385    unsigned32 gpr18;   /* Non volatile for all */
386    unsigned32 gpr19;   /* Non volatile for all */
387    unsigned32 gpr20;   /* Non volatile for all */
388    unsigned32 gpr21;   /* Non volatile for all */
389    unsigned32 gpr22;   /* Non volatile for all */
390    unsigned32 gpr23;   /* Non volatile for all */
391    unsigned32 gpr24;   /* Non volatile for all */
392    unsigned32 gpr25;   /* Non volatile for all */
393    unsigned32 gpr26;   /* Non volatile for all */
394    unsigned32 gpr27;   /* Non volatile for all */
395    unsigned32 gpr28;   /* Non volatile for all */
396    unsigned32 gpr29;   /* Non volatile for all */
397    unsigned32 gpr30;   /* Non volatile for all */
398    unsigned32 gpr31;   /* Non volatile for all */
399    unsigned32 cr;      /* PART of the CR is non volatile for all */
400    unsigned32 pc;      /* Program counter/Link register */
401    unsigned32 msr;     /* Initial interrupt level */
402} Context_Control;
403
404typedef struct {
405    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
406     * procedure calls.  However, this would mean that the interrupt
407     * frame had to hold f0-f13, and the fpscr.  And as the majority
408     * of tasks will not have an FP context, we will save the whole
409     * context here.
410     */
411#if (PPC_HAS_DOUBLE == 1)
412    double      f[32];
413    double      fpscr;
414#else
415    float       f[32];
416    float       fpscr;
417#endif
418} Context_Control_fp;
419
420typedef struct CPU_Interrupt_frame {
421    unsigned32 stacklink;       /* Ensure this is a real frame (also reg1 save) */
422#if (PPC_ABI == PPC_ABI_POWEROPEN || PPC_ABI == PPC_ABI_GCC27)
423    unsigned32 dummy[13];       /* Used by callees: PowerOpen ABI */
424#else
425    unsigned32 dummy[1];        /* Used by callees: SVR4/EABI */
426#endif
427    /* This is what is left out of the primary contexts */
428    unsigned32 gpr0;
429    unsigned32 gpr2;            /* play safe */
430    unsigned32 gpr3;
431    unsigned32 gpr4;
432    unsigned32 gpr5;
433    unsigned32 gpr6;
434    unsigned32 gpr7;
435    unsigned32 gpr8;
436    unsigned32 gpr9;
437    unsigned32 gpr10;
438    unsigned32 gpr11;
439    unsigned32 gpr12;
440    unsigned32 gpr13;   /* Play safe */
441    unsigned32 gpr28;   /* For internal use by the IRQ handler */
442    unsigned32 gpr29;   /* For internal use by the IRQ handler */
443    unsigned32 gpr30;   /* For internal use by the IRQ handler */
444    unsigned32 gpr31;   /* For internal use by the IRQ handler */
445    unsigned32 cr;      /* Bits of this are volatile, so no-one may save */
446    unsigned32 ctr;
447    unsigned32 xer;
448    unsigned32 lr;
449    unsigned32 pc;
450    unsigned32 msr;
451    unsigned32 pad[3];
452} CPU_Interrupt_frame;
453
454
455/*
456 *  The following table contains the information required to configure
457 *  the PowerPC processor specific parameters.
458 */
459
460typedef struct {
461  void       (*pretasking_hook)( void );
462  void       (*predriver_hook)( void );
463  void       (*postdriver_hook)( void );
464  void       (*idle_task)( void );
465  boolean      do_zero_of_workspace;
466  unsigned32   idle_task_stack_size;
467  unsigned32   interrupt_stack_size;
468  unsigned32   extra_mpci_receive_server_stack;
469  void *     (*stack_allocate_hook)( unsigned32 );
470  void       (*stack_free_hook)( void* );
471  /* end of fields required on all CPUs */
472
473  unsigned32   clicks_per_usec;        /* Timer clicks per microsecond */
474  void       (*spurious_handler)(unsigned32 vector, CPU_Interrupt_frame *);
475  boolean      exceptions_in_RAM;     /* TRUE if in RAM */
476
477#if (defined(ppc403) || defined(mpc860) || defined(mpc821))
478  unsigned32   serial_per_sec;         /* Serial clocks per second */
479  boolean      serial_external_clock;
480  boolean      serial_xon_xoff;
481  boolean      serial_cts_rts;
482  unsigned32   serial_rate;
483  unsigned32   timer_average_overhead; /* Average overhead of timer in ticks */
484  unsigned32   timer_least_valid;      /* Least valid number from timer      */
485  boolean      timer_internal_clock;   /* TRUE, when timer runs with CPU clk */
486#endif
487
488#if (defined(mpc860) || defined(mpc821))
489  unsigned32   clock_speed;            /* Speed of CPU in Hz */
490#endif
491}   rtems_cpu_table;
492
493/*
494 *  The following type defines an entry in the PPC's trap table.
495 *
496 *  NOTE: The instructions chosen are RTEMS dependent although one is
497 *        obligated to use two of the four instructions to perform a
498 *        long jump.  The other instructions load one register with the
499 *        trap type (a.k.a. vector) and another with the psr.
500 */
501 
502typedef struct {
503  unsigned32   stwu_r1;                       /* stwu  %r1, -(??+IP_END)(%1)*/
504  unsigned32   stw_r0;                        /* stw   %r0, IP_0(%r1)       */
505  unsigned32   li_r0_IRQ;                     /* li    %r0, _IRQ            */
506  unsigned32   b_Handler;                     /* b     PROC (_ISR_Handler)  */
507} CPU_Trap_table_entry;
508
509/*
510 *  This variable is optional.  It is used on CPUs on which it is difficult
511 *  to generate an "uninitialized" FP context.  It is filled in by
512 *  _CPU_Initialize and copied into the task's FP context area during
513 *  _CPU_Context_Initialize.
514 */
515
516/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
517
518/*
519 *  On some CPUs, RTEMS supports a software managed interrupt stack.
520 *  This stack is allocated by the Interrupt Manager and the switch
521 *  is performed in _ISR_Handler.  These variables contain pointers
522 *  to the lowest and highest addresses in the chunk of memory allocated
523 *  for the interrupt stack.  Since it is unknown whether the stack
524 *  grows up or down (in general), this give the CPU dependent
525 *  code the option of picking the version it wants to use.
526 *
527 *  NOTE: These two variables are required if the macro
528 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
529 */
530
531SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
532SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
533
534/*
535 *  With some compilation systems, it is difficult if not impossible to
536 *  call a high-level language routine from assembly language.  This
537 *  is especially true of commercial Ada compilers and name mangling
538 *  C++ ones.  This variable can be optionally defined by the CPU porter
539 *  and contains the address of the routine _Thread_Dispatch.  This
540 *  can make it easier to invoke that routine at the end of the interrupt
541 *  sequence (if a dispatch is necessary).
542 */
543
544/* EXTERN void           (*_CPU_Thread_dispatch_pointer)(); */
545
546/*
547 *  Nothing prevents the porter from declaring more CPU specific variables.
548 */
549
550
551SCORE_EXTERN struct {
552  unsigned32 *Nest_level;
553  unsigned32 *Disable_level;
554  void *Vector_table;
555  void *Stack;
556#if (PPC_ABI == PPC_ABI_POWEROPEN)
557  unsigned32 Dispatch_r2;
558#else
559  unsigned32 Default_r2;
560#if (PPC_ABI != PPC_ABI_GCC27)
561  unsigned32 Default_r13;
562#endif
563#endif
564  volatile boolean *Switch_necessary;
565  boolean *Signal;
566
567  unsigned32 msr_initial;
568} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
569
570/*
571 *  The size of the floating point context area.  On some CPUs this
572 *  will not be a "sizeof" because the format of the floating point
573 *  area is not defined -- only the size is.  This is usually on
574 *  CPUs with a "floating point save context" instruction.
575 */
576
577#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
578
579/*
580 * (Optional) # of bytes for libmisc/stackchk to check
581 * If not specifed, then it defaults to something reasonable
582 * for most architectures.
583 */
584
585#define CPU_STACK_CHECK_SIZE    (128)
586
587/*
588 *  Amount of extra stack (above minimum stack size) required by
589 *  MPCI receive server thread.  Remember that in a multiprocessor
590 *  system this thread must exist and be able to process all directives.
591 */
592
593#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
594
595/*
596 *  This defines the number of entries in the ISR_Vector_table managed
597 *  by RTEMS.
598 */
599
600#define CPU_INTERRUPT_NUMBER_OF_VECTORS     (PPC_INTERRUPT_MAX)
601#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
602
603/*
604 *  Should be large enough to run all RTEMS tests.  This insures
605 *  that a "reasonable" small application should not have any problems.
606 */
607
608#define CPU_STACK_MINIMUM_SIZE          (1024*3)
609
610/*
611 *  CPU's worst alignment requirement for data types on a byte boundary.  This
612 *  alignment does not take into account the requirements for the stack.
613 */
614
615#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
616
617/*
618 *  This number corresponds to the byte alignment requirement for the
619 *  heap handler.  This alignment requirement may be stricter than that
620 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
621 *  common for the heap to follow the same alignment requirement as
622 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
623 *  then this should be set to CPU_ALIGNMENT.
624 *
625 *  NOTE:  This does not have to be a power of 2.  It does have to
626 *         be greater or equal to than CPU_ALIGNMENT.
627 */
628
629#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
630
631/*
632 *  This number corresponds to the byte alignment requirement for memory
633 *  buffers allocated by the partition manager.  This alignment requirement
634 *  may be stricter than that for the data types alignment specified by
635 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
636 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
637 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
638 *
639 *  NOTE:  This does not have to be a power of 2.  It does have to
640 *         be greater or equal to than CPU_ALIGNMENT.
641 */
642
643#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
644
645/*
646 *  This number corresponds to the byte alignment requirement for the
647 *  stack.  This alignment requirement may be stricter than that for the
648 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
649 *  is strict enough for the stack, then this should be set to 0.
650 *
651 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
652 */
653
654#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
655
656/* ISR handler macros */
657
658/*
659 *  Disable all interrupts for an RTEMS critical section.  The previous
660 *  level is returned in _isr_cookie.
661 */
662
663#define loc_string(a,b) a " (" #b ")\n"
664
665#define _CPU_MSR_Value( _msr_value ) \
666  do { \
667    _msr_value = 0; \
668    asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
669  } while (0)
670
671#define _CPU_MSR_SET( _msr_value ) \
672{ asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
673
674#if 0
675#define _CPU_ISR_Disable( _isr_cookie ) \
676  { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
677    _isr_cookie = 0; \
678    asm volatile (
679        "mfmsr %0" : \
680        "=r" ((_isr_cookie)) : \
681        "0" ((_isr_cookie)) \
682    ); \
683    asm volatile (
684        "andc %1,%0,%1" : \
685        "=r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
686        "0" ((_isr_cookie)), "1" ((_disable_mask)) \
687    ); \
688    asm volatile (
689        "mtmsr %1" : \
690        "=r" ((_disable_mask)) : \
691        "0" ((_disable_mask)) \
692    ); \
693  }
694#endif
695
696#define _CPU_ISR_Disable( _isr_cookie ) \
697  { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
698    _isr_cookie = 0; \
699    asm volatile ( \
700        "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
701        "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
702        "0" ((_isr_cookie)), "1" ((_disable_mask)) \
703        ); \
704  }
705
706
707#define _CPU_Data_Cache_Block_Flush( _address ) \
708  do { register void *__address = (_address); \
709       register unsigned32 _zero = 0; \
710       asm volatile ( "dcbf %0,%1" : \
711                      "=r" (_zero), "=r" (__address) : \
712                      "0" (_zero), "1" (__address) \
713       ); \
714  } while (0)
715
716
717/*
718 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
719 *  This indicates the end of an RTEMS critical section.  The parameter
720 *  _isr_cookie is not modified.
721 */
722
723#define _CPU_ISR_Enable( _isr_cookie )  \
724  { \
725     asm volatile ( "mtmsr %0" : \
726                   "=r" ((_isr_cookie)) : \
727                   "0" ((_isr_cookie))); \
728  }
729
730/*
731 *  This temporarily restores the interrupt to _isr_cookie before immediately
732 *  disabling them again.  This is used to divide long RTEMS critical
733 *  sections into two or more parts.  The parameter _isr_cookie is not
734 *  modified.
735 *
736 *  NOTE:  The version being used is not very optimized but it does
737 *         not trip a problem in gcc where the disable mask does not
738 *         get loaded.  Check this for future (post 10/97 gcc versions.
739 */
740
741#define _CPU_ISR_Flash( _isr_cookie ) \
742  { register unsigned int _disable_mask = PPC_MSR_DISABLE_MASK; \
743    asm volatile ( \
744      "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
745      "=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
746      "0" ((_isr_cookie)), "1" ((_disable_mask)) \
747    ); \
748  }
749
750/*
751 *  Map interrupt level in task mode onto the hardware that the CPU
752 *  actually provides.  Currently, interrupt levels which do not
753 *  map onto the CPU in a generic fashion are undefined.  Someday,
754 *  it would be nice if these were "mapped" by the application
755 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
756 *  8 - 255 would be available for bsp/application specific meaning.
757 *  This could be used to manage a programmable interrupt controller
758 *  via the rtems_task_mode directive.
759 */
760
761unsigned32 _CPU_ISR_Calculate_level(
762  unsigned32 new_level
763);
764
765void _CPU_ISR_Set_level(
766  unsigned32 new_level
767);
768 
769unsigned32 _CPU_ISR_Get_level( void );
770
771void _CPU_ISR_install_raw_handler(
772  unsigned32  vector,
773  proc_ptr    new_handler,
774  proc_ptr   *old_handler
775);
776
777/* end of ISR handler macros */
778
779/*
780 *  Simple spin delay in microsecond units for device drivers.
781 *  This is very dependent on the clock speed of the target.
782 */
783
784#define CPU_Get_timebase_low( _value ) \
785    asm volatile( "mftb  %0" : "=r" (_value) )
786
787#define delay( _microseconds ) \
788  do { \
789    unsigned32 start, ticks, now; \
790    CPU_Get_timebase_low( start ) ; \
791    ticks = (_microseconds) * Cpu_table.clicks_per_usec; \
792    do \
793      CPU_Get_timebase_low( now ) ; \
794    while (now - start < ticks); \
795  } while (0)
796
797#define delay_in_bus_cycles( _cycles ) \
798  do { \
799    unsigned32 start, now; \
800    CPU_Get_timebase_low( start ); \
801    do \
802      CPU_Get_timebase_low( now ); \
803    while (now - start < (_cycles)); \
804  } while (0)
805
806
807
808/* Context handler macros */
809
810/*
811 *  Initialize the context to a state suitable for starting a
812 *  task after a context restore operation.  Generally, this
813 *  involves:
814 *
815 *     - setting a starting address
816 *     - preparing the stack
817 *     - preparing the stack and frame pointers
818 *     - setting the proper interrupt level in the context
819 *     - initializing the floating point context
820 *
821 *  This routine generally does not set any unnecessary register
822 *  in the context.  The state of the "general data" registers is
823 *  undefined at task start time.
824 *
825 *  NOTE:  Implemented as a subroutine for the SPARC port.
826 */
827
828void _CPU_Context_Initialize(
829  Context_Control  *the_context,
830  unsigned32       *stack_base,
831  unsigned32        size,
832  unsigned32        new_level,
833  void             *entry_point,
834  boolean           is_fp
835);
836
837/*
838 *  This routine is responsible for somehow restarting the currently
839 *  executing task.  If you are lucky, then all that is necessary
840 *  is restoring the context.  Otherwise, there will need to be
841 *  a special assembly routine which does something special in this
842 *  case.  Context_Restore should work most of the time.  It will
843 *  not work if restarting self conflicts with the stack frame
844 *  assumptions of restoring a context.
845 */
846
847#define _CPU_Context_Restart_self( _the_context ) \
848   _CPU_Context_restore( (_the_context) );
849
850/*
851 *  The purpose of this macro is to allow the initial pointer into
852 *  a floating point context area (used to save the floating point
853 *  context) to be at an arbitrary place in the floating point
854 *  context area.
855 *
856 *  This is necessary because some FP units are designed to have
857 *  their context saved as a stack which grows into lower addresses.
858 *  Other FP units can be saved by simply moving registers into offsets
859 *  from the base of the context area.  Finally some FP units provide
860 *  a "dump context" instruction which could fill in from high to low
861 *  or low to high based on the whim of the CPU designers.
862 */
863
864#define _CPU_Context_Fp_start( _base, _offset ) \
865   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
866
867/*
868 *  This routine initializes the FP context area passed to it to.
869 *  There are a few standard ways in which to initialize the
870 *  floating point context.  The code included for this macro assumes
871 *  that this is a CPU in which a "initial" FP context was saved into
872 *  _CPU_Null_fp_context and it simply copies it to the destination
873 *  context passed to it.
874 *
875 *  Other models include (1) not doing anything, and (2) putting
876 *  a "null FP status word" in the correct place in the FP context.
877 */
878
879#define _CPU_Context_Initialize_fp( _destination ) \
880  { \
881   ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
882  }
883
884/* end of Context handler macros */
885
886/* Fatal Error manager macros */
887
888/*
889 *  This routine copies _error into a known place -- typically a stack
890 *  location or a register, optionally disables interrupts, and
891 *  halts/stops the CPU.
892 */
893
894#define _CPU_Fatal_halt( _error ) \
895  _CPU_Fatal_error(_error)
896
897/* end of Fatal Error manager macros */
898
899/* Bitfield handler macros */
900
901/*
902 *  This routine sets _output to the bit number of the first bit
903 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
904 *  This type may be either 16 or 32 bits wide although only the 16
905 *  least significant bits will be used.
906 *
907 *  There are a number of variables in using a "find first bit" type
908 *  instruction.
909 *
910 *    (1) What happens when run on a value of zero?
911 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
912 *    (3) The numbering may be zero or one based.
913 *    (4) The "find first bit" instruction may search from MSB or LSB.
914 *
915 *  RTEMS guarantees that (1) will never happen so it is not a concern.
916 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
917 *  _CPU_Priority_Bits_index().  These three form a set of routines
918 *  which must logically operate together.  Bits in the _value are
919 *  set and cleared based on masks built by _CPU_Priority_mask().
920 *  The basic major and minor values calculated by _Priority_Major()
921 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
922 *  to properly range between the values returned by the "find first bit"
923 *  instruction.  This makes it possible for _Priority_Get_highest() to
924 *  calculate the major and directly index into the minor table.
925 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
926 *  is the first bit found.
927 *
928 *  This entire "find first bit" and mapping process depends heavily
929 *  on the manner in which a priority is broken into a major and minor
930 *  components with the major being the 4 MSB of a priority and minor
931 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
932 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
933 *  to the lowest priority.
934 *
935 *  If your CPU does not have a "find first bit" instruction, then
936 *  there are ways to make do without it.  Here are a handful of ways
937 *  to implement this in software:
938 *
939 *    - a series of 16 bit test instructions
940 *    - a "binary search using if's"
941 *    - _number = 0
942 *      if _value > 0x00ff
943 *        _value >>=8
944 *        _number = 8;
945 *
946 *      if _value > 0x0000f
947 *        _value >=8
948 *        _number += 4
949 *
950 *      _number += bit_set_table[ _value ]
951 *
952 *    where bit_set_table[ 16 ] has values which indicate the first
953 *      bit set
954 */
955
956#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
957  { \
958    asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
959                  "1" ((_value))); \
960  }
961
962/* end of Bitfield handler macros */
963
964/*
965 *  This routine builds the mask which corresponds to the bit fields
966 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
967 *  for that routine.
968 */
969
970#define _CPU_Priority_Mask( _bit_number ) \
971  ( 0x80000000 >> (_bit_number) )
972
973/*
974 *  This routine translates the bit numbers returned by
975 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
976 *  a major or minor component of a priority.  See the discussion
977 *  for that routine.
978 */
979
980#define _CPU_Priority_bits_index( _priority ) \
981  (_priority)
982
983/* end of Priority handler macros */
984
985/* variables */
986
987extern const unsigned32 _CPU_msrs[4];
988
989/* functions */
990
991/*
992 *  _CPU_Initialize
993 *
994 *  This routine performs CPU dependent initialization.
995 */
996
997void _CPU_Initialize(
998  rtems_cpu_table  *cpu_table,
999  void            (*thread_dispatch)
1000);
1001
1002/*
1003 *  _CPU_ISR_install_vector
1004 *
1005 *  This routine installs an interrupt vector.
1006 */
1007
1008void _CPU_ISR_install_vector(
1009  unsigned32  vector,
1010  proc_ptr    new_handler,
1011  proc_ptr   *old_handler
1012);
1013
1014/*
1015 *  _CPU_Install_interrupt_stack
1016 *
1017 *  This routine installs the hardware interrupt stack pointer.
1018 *
1019 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
1020 *         is TRUE.
1021 */
1022
1023void _CPU_Install_interrupt_stack( void );
1024
1025/*
1026 *  _CPU_Context_switch
1027 *
1028 *  This routine switches from the run context to the heir context.
1029 */
1030
1031void _CPU_Context_switch(
1032  Context_Control  *run,
1033  Context_Control  *heir
1034);
1035
1036/*
1037 *  _CPU_Context_restore
1038 *
1039 *  This routine is generallu used only to restart self in an
1040 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1041 *
1042 *  NOTE: May be unnecessary to reload some registers.
1043 */
1044
1045void _CPU_Context_restore(
1046  Context_Control *new_context
1047);
1048
1049/*
1050 *  _CPU_Context_save_fp
1051 *
1052 *  This routine saves the floating point context passed to it.
1053 */
1054
1055void _CPU_Context_save_fp(
1056  void **fp_context_ptr
1057);
1058
1059/*
1060 *  _CPU_Context_restore_fp
1061 *
1062 *  This routine restores the floating point context passed to it.
1063 */
1064
1065void _CPU_Context_restore_fp(
1066  void **fp_context_ptr
1067);
1068
1069void _CPU_Fatal_error(
1070  unsigned32 _error
1071);
1072
1073/*  The following routine swaps the endian format of an unsigned int.
1074 *  It must be static because it is referenced indirectly.
1075 *
1076 *  This version will work on any processor, but if there is a better
1077 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1078 *
1079 *     swap least significant two bytes with 16-bit rotate
1080 *     swap upper and lower 16-bits
1081 *     swap most significant two bytes with 16-bit rotate
1082 *
1083 *  Some CPUs have special instructions which swap a 32-bit quantity in
1084 *  a single instruction (e.g. i486).  It is probably best to avoid
1085 *  an "endian swapping control bit" in the CPU.  One good reason is
1086 *  that interrupts would probably have to be disabled to insure that
1087 *  an interrupt does not try to access the same "chunk" with the wrong
1088 *  endian.  Another good reason is that on some CPUs, the endian bit
1089 *  endianness for ALL fetches -- both code and data -- so the code
1090 *  will be fetched incorrectly.
1091 */
1092 
1093static inline unsigned int CPU_swap_u32(
1094  unsigned int value
1095)
1096{
1097  unsigned32 swapped;
1098 
1099  asm volatile("rlwimi %0,%1,8,24,31;"
1100               "rlwimi %0,%1,24,16,23;"
1101               "rlwimi %0,%1,8,8,15;"
1102               "rlwimi %0,%1,24,0,7;" :
1103               "=&r" ((swapped)) : "r" ((value)));
1104
1105  return( swapped );
1106}
1107
1108#define CPU_swap_u16( value ) \
1109  (((value&0xff) << 8) | ((value >> 8)&0xff))
1110
1111/*
1112 *  Routines to access the decrementer register
1113 */
1114
1115#define PPC_Set_decrementer( _clicks ) \
1116  do { \
1117    asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
1118  } while (0)
1119
1120/*
1121 *  Routines to access the time base register
1122 */
1123
1124static inline unsigned64 PPC_Get_timebase_register( void )
1125{
1126  unsigned32 tbr_low;
1127  unsigned32 tbr_high;
1128  unsigned32 tbr_high_old;
1129  unsigned64 tbr;
1130
1131  do {
1132    asm volatile( "mftbu %0" : "=r" (tbr_high_old));
1133    asm volatile( "mftb  %0" : "=r" (tbr_low));
1134    asm volatile( "mftbu %0" : "=r" (tbr_high));
1135  } while ( tbr_high_old != tbr_high );
1136
1137  tbr = tbr_high;
1138  tbr <<= 32;
1139  tbr |= tbr_low;
1140  return tbr;
1141}
1142
1143#ifdef __cplusplus
1144}
1145#endif
1146
1147#endif
Note: See TracBrowser for help on using the repository browser.