source: rtems/c/src/exec/score/cpu/powerpc/mpc750/cpu.h @ 458bd34

4.104.114.84.95
Last change on this file since 458bd34 was 458bd34, checked in by Joel Sherrill <joel.sherrill@…>, on 11/05/99 at 16:44:02

This is another pass at making sure that nothing outside the BSP
unnecessarily uses any variables defined by the BSP. On this
sweep, use of BSP_Configuration and Cpu_table was eliminated.

A significant part of this modification was the addition of
macros to access fields in the RTEMS configuration structures.

This is necessary to strengthen the division between the BSP independent
parts of RTEMS and the BSPs themselves. This started after
comments and analysis by Ralf Corsepius <corsepiu@…>.

  • Property mode set to 100644
File size: 31.4 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the PowerPC
4 *  processor.
5 *
6 *  Author:     Andrew Bray <andy@i-cubed.co.uk>
7 *
8 *  COPYRIGHT (c) 1995 by i-cubed ltd.
9 *
10 *  To anyone who acknowledges that this file is provided "AS IS"
11 *  without any express or implied warranty:
12 *      permission to use, copy, modify, and distribute this file
13 *      for any purpose is hereby granted without fee, provided that
14 *      the above copyright notice and this notice appears in all
15 *      copies, and that the name of i-cubed limited not be used in
16 *      advertising or publicity pertaining to distribution of the
17 *      software without specific, written prior permission.
18 *      i-cubed limited makes no representations about the suitability
19 *      of this software for any purpose.
20 *
21 *  Derived from c/src/exec/cpu/no_cpu/cpu.h:
22 *
23 *  COPYRIGHT (c) 1989-1997.
24 *  On-Line Applications Research Corporation (OAR).
25 *  Copyright assigned to U.S. Government, 1994.
26 *
27 *  The license and distribution terms for this file may be found in
28 *  the file LICENSE in this distribution or at
29 *  http://www.OARcorp.com/rtems/license.html.
30 *
31 *  $Id$
32 */
33
34#ifndef __CPU_h
35#define __CPU_h
36
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41#include <rtems/score/ppc.h>               /* pick up machine definitions */
42#include <libcpu/cpu.h>
43 
44#ifndef ASM
45#include <rtems/score/ppctypes.h>
46#endif
47
48/* conditional compilation parameters */
49
50/*
51 *  Should the calls to _Thread_Enable_dispatch be inlined?
52 *
53 *  If TRUE, then they are inlined.
54 *  If FALSE, then a subroutine call is made.
55 *
56 *  Basically this is an example of the classic trade-off of size
57 *  versus speed.  Inlining the call (TRUE) typically increases the
58 *  size of RTEMS while speeding up the enabling of dispatching.
59 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
60 *  only be 0 or 1 unless you are in an interrupt handler and that
61 *  interrupt handler invokes the executive.]  When not inlined
62 *  something calls _Thread_Enable_dispatch which in turns calls
63 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
64 *  one subroutine call is avoided entirely.]
65 */
66
67#define CPU_INLINE_ENABLE_DISPATCH       FALSE
68
69/*
70 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
71 *  be unrolled one time?  In unrolled each iteration of the loop examines
72 *  two "nodes" on the chain being searched.  Otherwise, only one node
73 *  is examined per iteration.
74 *
75 *  If TRUE, then the loops are unrolled.
76 *  If FALSE, then the loops are not unrolled.
77 *
78 *  The primary factor in making this decision is the cost of disabling
79 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
80 *  body of the loop.  On some CPUs, the flash is more expensive than
81 *  one iteration of the loop body.  In this case, it might be desirable
82 *  to unroll the loop.  It is important to note that on some CPUs, this
83 *  code is the longest interrupt disable period in RTEMS.  So it is
84 *  necessary to strike a balance when setting this parameter.
85 */
86
87#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
88
89/*
90 *  Does RTEMS manage a dedicated interrupt stack in software?
91 *
92 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
93 *  If FALSE, nothing is done.
94 *
95 *  If the CPU supports a dedicated interrupt stack in hardware,
96 *  then it is generally the responsibility of the BSP to allocate it
97 *  and set it up.
98 *
99 *  If the CPU does not support a dedicated interrupt stack, then
100 *  the porter has two options: (1) execute interrupts on the
101 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
102 *  interrupt stack.
103 *
104 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
105 *
106 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
107 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
108 *  possible that both are FALSE for a particular CPU.  Although it
109 *  is unclear what that would imply about the interrupt processing
110 *  procedure on that CPU.
111 */
112
113#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
114
115/*
116 *  Does this CPU have hardware support for a dedicated interrupt stack?
117 *
118 *  If TRUE, then it must be installed during initialization.
119 *  If FALSE, then no installation is performed.
120 *
121 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
122 *
123 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
124 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
125 *  possible that both are FALSE for a particular CPU.  Although it
126 *  is unclear what that would imply about the interrupt processing
127 *  procedure on that CPU.
128 */
129
130#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
131
132/*
133 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
134 *
135 *  If TRUE, then the memory is allocated during initialization.
136 *  If FALSE, then the memory is allocated during initialization.
137 *
138 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
139 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
140 */
141
142#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
143
144/*
145 *  Does the RTEMS invoke the user's ISR with the vector number and
146 *  a pointer to the saved interrupt frame (1) or just the vector
147 *  number (0)?
148 */
149
150#define CPU_ISR_PASSES_FRAME_POINTER 0
151
152/*
153 *  Does the CPU have hardware floating point?
154 *
155 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
156 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
157 *
158 *  If there is a FP coprocessor such as the i387 or mc68881, then
159 *  the answer is TRUE.
160 *
161 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
162 *  It indicates whether or not this CPU model has FP support.  For
163 *  example, it would be possible to have an i386_nofp CPU model
164 *  which set this to false to indicate that you have an i386 without
165 *  an i387 and wish to leave floating point support out of RTEMS.
166 */
167
168#if ( PPC_HAS_FPU == 1 )
169#define CPU_HARDWARE_FP     TRUE
170#else
171#define CPU_HARDWARE_FP     FALSE
172#endif
173
174/*
175 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
176 *
177 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
178 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
179 *
180 *  So far, the only CPU in which this option has been used is the
181 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
182 *  floating point registers to perform integer multiplies.  If
183 *  a function which you would not think utilize the FP unit DOES,
184 *  then one can not easily predict which tasks will use the FP hardware.
185 *  In this case, this option should be TRUE.
186 *
187 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
188 */
189
190#define CPU_ALL_TASKS_ARE_FP     FALSE
191
192/*
193 *  Should the IDLE task have a floating point context?
194 *
195 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
196 *  and it has a floating point context which is switched in and out.
197 *  If FALSE, then the IDLE task does not have a floating point context.
198 *
199 *  Setting this to TRUE negatively impacts the time required to preempt
200 *  the IDLE task from an interrupt because the floating point context
201 *  must be saved as part of the preemption.
202 */
203
204#define CPU_IDLE_TASK_IS_FP      FALSE
205
206/*
207 *  Should the saving of the floating point registers be deferred
208 *  until a context switch is made to another different floating point
209 *  task?
210 *
211 *  If TRUE, then the floating point context will not be stored until
212 *  necessary.  It will remain in the floating point registers and not
213 *  disturned until another floating point task is switched to.
214 *
215 *  If FALSE, then the floating point context is saved when a floating
216 *  point task is switched out and restored when the next floating point
217 *  task is restored.  The state of the floating point registers between
218 *  those two operations is not specified.
219 *
220 *  If the floating point context does NOT have to be saved as part of
221 *  interrupt dispatching, then it should be safe to set this to TRUE.
222 *
223 *  Setting this flag to TRUE results in using a different algorithm
224 *  for deciding when to save and restore the floating point context.
225 *  The deferred FP switch algorithm minimizes the number of times
226 *  the FP context is saved and restored.  The FP context is not saved
227 *  until a context switch is made to another, different FP task.
228 *  Thus in a system with only one FP task, the FP context will never
229 *  be saved or restored.
230 */
231/*
232 *  ACB Note:  This could make debugging tricky..
233 */
234
235#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
236
237/*
238 *  Does this port provide a CPU dependent IDLE task implementation?
239 *
240 *  If TRUE, then the routine _CPU_Thread_Idle_body
241 *  must be provided and is the default IDLE thread body instead of
242 *  _CPU_Thread_Idle_body.
243 *
244 *  If FALSE, then use the generic IDLE thread body if the BSP does
245 *  not provide one.
246 *
247 *  This is intended to allow for supporting processors which have
248 *  a low power or idle mode.  When the IDLE thread is executed, then
249 *  the CPU can be powered down.
250 *
251 *  The order of precedence for selecting the IDLE thread body is:
252 *
253 *    1.  BSP provided
254 *    2.  CPU dependent (if provided)
255 *    3.  generic (if no BSP and no CPU dependent)
256 */
257
258#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
259
260/*
261 *  Does the stack grow up (toward higher addresses) or down
262 *  (toward lower addresses)?
263 *
264 *  If TRUE, then the grows upward.
265 *  If FALSE, then the grows toward smaller addresses.
266 */
267
268#define CPU_STACK_GROWS_UP               FALSE
269
270/*
271 *  The following is the variable attribute used to force alignment
272 *  of critical RTEMS structures.  On some processors it may make
273 *  sense to have these aligned on tighter boundaries than
274 *  the minimum requirements of the compiler in order to have as
275 *  much of the critical data area as possible in a cache line.
276 *
277 *  The placement of this macro in the declaration of the variables
278 *  is based on the syntactically requirements of the GNU C
279 *  "__attribute__" extension.  For example with GNU C, use
280 *  the following to force a structures to a 32 byte boundary.
281 *
282 *      __attribute__ ((aligned (32)))
283 *
284 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
285 *         To benefit from using this, the data must be heavily
286 *         used so it will stay in the cache and used frequently enough
287 *         in the executive to justify turning this on.
288 */
289
290#define CPU_STRUCTURE_ALIGNMENT \
291  __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
292
293/*
294 *  Define what is required to specify how the network to host conversion
295 *  routines are handled.
296 */
297
298#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
299#define CPU_BIG_ENDIAN                           TRUE
300#define CPU_LITTLE_ENDIAN                        FALSE
301
302
303/*
304 *  Processor defined structures
305 *
306 *  Examples structures include the descriptor tables from the i386
307 *  and the processor control structure on the i960ca.
308 */
309
310/* may need to put some structures here.  */
311
312/*
313 * Contexts
314 *
315 *  Generally there are 2 types of context to save.
316 *     1. Interrupt registers to save
317 *     2. Task level registers to save
318 *
319 *  This means we have the following 3 context items:
320 *     1. task level context stuff::  Context_Control
321 *     2. floating point task stuff:: Context_Control_fp
322 *     3. special interrupt level context :: Context_Control_interrupt
323 *
324 *  On some processors, it is cost-effective to save only the callee
325 *  preserved registers during a task context switch.  This means
326 *  that the ISR code needs to save those registers which do not
327 *  persist across function calls.  It is not mandatory to make this
328 *  distinctions between the caller/callee saves registers for the
329 *  purpose of minimizing context saved during task switch and on interrupts.
330 *  If the cost of saving extra registers is minimal, simplicity is the
331 *  choice.  Save the same context on interrupt entry as for tasks in
332 *  this case.
333 *
334 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
335 *  care should be used in designing the context area.
336 *
337 *  On some CPUs with hardware floating point support, the Context_Control_fp
338 *  structure will not be used or it simply consist of an array of a
339 *  fixed number of bytes.   This is done when the floating point context
340 *  is dumped by a "FP save context" type instruction and the format
341 *  is not really defined by the CPU.  In this case, there is no need
342 *  to figure out the exact format -- only the size.  Of course, although
343 *  this is enough information for RTEMS, it is probably not enough for
344 *  a debugger such as gdb.  But that is another problem.
345 */
346
347#ifndef ASM
348
349typedef struct {
350    unsigned32 gpr1;    /* Stack pointer for all */
351    unsigned32 gpr2;    /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
352    unsigned32 gpr13;   /* First non volatile PowerOpen, section ptr SVR4/EABI */
353    unsigned32 gpr14;   /* Non volatile for all */
354    unsigned32 gpr15;   /* Non volatile for all */
355    unsigned32 gpr16;   /* Non volatile for all */
356    unsigned32 gpr17;   /* Non volatile for all */
357    unsigned32 gpr18;   /* Non volatile for all */
358    unsigned32 gpr19;   /* Non volatile for all */
359    unsigned32 gpr20;   /* Non volatile for all */
360    unsigned32 gpr21;   /* Non volatile for all */
361    unsigned32 gpr22;   /* Non volatile for all */
362    unsigned32 gpr23;   /* Non volatile for all */
363    unsigned32 gpr24;   /* Non volatile for all */
364    unsigned32 gpr25;   /* Non volatile for all */
365    unsigned32 gpr26;   /* Non volatile for all */
366    unsigned32 gpr27;   /* Non volatile for all */
367    unsigned32 gpr28;   /* Non volatile for all */
368    unsigned32 gpr29;   /* Non volatile for all */
369    unsigned32 gpr30;   /* Non volatile for all */
370    unsigned32 gpr31;   /* Non volatile for all */
371    unsigned32 cr;      /* PART of the CR is non volatile for all */
372    unsigned32 pc;      /* Program counter/Link register */
373    unsigned32 msr;     /* Initial interrupt level */
374} Context_Control;
375
376typedef struct {
377    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
378     * procedure calls.  However, this would mean that the interrupt
379     * frame had to hold f0-f13, and the fpscr.  And as the majority
380     * of tasks will not have an FP context, we will save the whole
381     * context here.
382     */
383#if (PPC_HAS_DOUBLE == 1)
384    double      f[32];
385    double      fpscr;
386#else
387    float       f[32];
388    float       fpscr;
389#endif
390} Context_Control_fp;
391
392typedef struct CPU_Interrupt_frame {
393    unsigned32 stacklink;       /* Ensure this is a real frame (also reg1 save) */
394    unsigned32 calleeLr;        /* link register used by callees: SVR4/EABI */
395  /* This is what is left out of the primary contexts */
396    unsigned32 gpr0;
397    unsigned32 gpr2;            /* play safe */
398    unsigned32 gpr3;
399    unsigned32 gpr4;
400    unsigned32 gpr5;
401    unsigned32 gpr6;
402    unsigned32 gpr7;
403    unsigned32 gpr8;
404    unsigned32 gpr9;
405    unsigned32 gpr10;
406    unsigned32 gpr11;
407    unsigned32 gpr12;
408    unsigned32 gpr13;   /* Play safe */
409    unsigned32 gpr28;   /* For internal use by the IRQ handler */
410    unsigned32 gpr29;   /* For internal use by the IRQ handler */
411    unsigned32 gpr30;   /* For internal use by the IRQ handler */
412    unsigned32 gpr31;   /* For internal use by the IRQ handler */
413    unsigned32 cr;      /* Bits of this are volatile, so no-one may save */
414    unsigned32 ctr;
415    unsigned32 xer;
416    unsigned32 lr;
417    unsigned32 pc;
418    unsigned32 msr;
419    unsigned32 pad[3];
420} CPU_Interrupt_frame;
421 
422/*
423 *  The following table contains the information required to configure
424 *  the PowerPC processor specific parameters.
425 */
426
427typedef struct {
428  void       (*pretasking_hook)( void );
429  void       (*predriver_hook)( void );
430  void       (*postdriver_hook)( void );
431  void       (*idle_task)( void );
432  boolean      do_zero_of_workspace;
433  unsigned32   idle_task_stack_size;
434  unsigned32   interrupt_stack_size;
435  unsigned32   extra_mpci_receive_server_stack;
436  void *     (*stack_allocate_hook)( unsigned32 );
437  void       (*stack_free_hook)( void* );
438  /* end of fields required on all CPUs */
439
440  unsigned32   clicks_per_usec;        /* Timer clicks per microsecond */
441  boolean      exceptions_in_RAM;     /* TRUE if in RAM */
442
443}   rtems_cpu_table;
444
445/*
446 *  Macros to access required entires in the CPU Table are in
447 *  the file rtems/system.h.
448 */
449
450/*
451 *  Macros to access PowerPC MPC750 specific additions to the CPU Table
452 */
453
454#define rtems_cpu_configuration_get_clicks_per_usec() \
455   (_CPU_Table.clicks_per_usec)
456
457#define rtems_cpu_configuration_get_exceptions_in_ram() \
458   (_CPU_Table.exceptions_in_RAM)
459
460/*
461 *  This variable is optional.  It is used on CPUs on which it is difficult
462 *  to generate an "uninitialized" FP context.  It is filled in by
463 *  _CPU_Initialize and copied into the task's FP context area during
464 *  _CPU_Context_Initialize.
465 */
466
467/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
468
469/*
470 *  On some CPUs, RTEMS supports a software managed interrupt stack.
471 *  This stack is allocated by the Interrupt Manager and the switch
472 *  is performed in _ISR_Handler.  These variables contain pointers
473 *  to the lowest and highest addresses in the chunk of memory allocated
474 *  for the interrupt stack.  Since it is unknown whether the stack
475 *  grows up or down (in general), this give the CPU dependent
476 *  code the option of picking the version it wants to use.
477 *
478 *  NOTE: These two variables are required if the macro
479 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
480 */
481
482SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
483SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
484
485#endif /* ndef ASM */
486
487/*
488 *  This defines the number of levels and the mask used to pick those
489 *  bits out of a thread mode.
490 */
491
492#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
493#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
494
495/*
496 *  With some compilation systems, it is difficult if not impossible to
497 *  call a high-level language routine from assembly language.  This
498 *  is especially true of commercial Ada compilers and name mangling
499 *  C++ ones.  This variable can be optionally defined by the CPU porter
500 *  and contains the address of the routine _Thread_Dispatch.  This
501 *  can make it easier to invoke that routine at the end of the interrupt
502 *  sequence (if a dispatch is necessary).
503 */
504
505/* EXTERN void           (*_CPU_Thread_dispatch_pointer)(); */
506
507/*
508 *  Nothing prevents the porter from declaring more CPU specific variables.
509 */
510
511#ifndef ASM
512 
513SCORE_EXTERN struct {
514  unsigned32 *Disable_level;
515  void *Stack;
516  volatile boolean *Switch_necessary;
517  boolean *Signal;
518
519} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
520
521#endif /* ndef ASM */
522
523/*
524 *  The size of the floating point context area.  On some CPUs this
525 *  will not be a "sizeof" because the format of the floating point
526 *  area is not defined -- only the size is.  This is usually on
527 *  CPUs with a "floating point save context" instruction.
528 */
529
530#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
531
532/*
533 * (Optional) # of bytes for libmisc/stackchk to check
534 * If not specifed, then it defaults to something reasonable
535 * for most architectures.
536 */
537
538#define CPU_STACK_CHECK_SIZE    (128)
539
540/*
541 *  Amount of extra stack (above minimum stack size) required by
542 *  MPCI receive server thread.  Remember that in a multiprocessor
543 *  system this thread must exist and be able to process all directives.
544 */
545
546#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
547
548/*
549 *  This defines the number of entries in the ISR_Vector_table managed
550 *  by RTEMS.
551 */
552
553#define CPU_INTERRUPT_NUMBER_OF_VECTORS     (PPC_INTERRUPT_MAX)
554#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
555
556/*
557 *  Should be large enough to run all RTEMS tests.  This insures
558 *  that a "reasonable" small application should not have any problems.
559 */
560
561#define CPU_STACK_MINIMUM_SIZE          (1024*8)
562
563/*
564 *  CPU's worst alignment requirement for data types on a byte boundary.  This
565 *  alignment does not take into account the requirements for the stack.
566 */
567
568#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
569
570/*
571 *  This number corresponds to the byte alignment requirement for the
572 *  heap handler.  This alignment requirement may be stricter than that
573 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
574 *  common for the heap to follow the same alignment requirement as
575 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
576 *  then this should be set to CPU_ALIGNMENT.
577 *
578 *  NOTE:  This does not have to be a power of 2.  It does have to
579 *         be greater or equal to than CPU_ALIGNMENT.
580 */
581
582#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
583
584/*
585 *  This number corresponds to the byte alignment requirement for memory
586 *  buffers allocated by the partition manager.  This alignment requirement
587 *  may be stricter than that for the data types alignment specified by
588 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
589 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
590 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
591 *
592 *  NOTE:  This does not have to be a power of 2.  It does have to
593 *         be greater or equal to than CPU_ALIGNMENT.
594 */
595
596#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
597
598/*
599 *  This number corresponds to the byte alignment requirement for the
600 *  stack.  This alignment requirement may be stricter than that for the
601 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
602 *  is strict enough for the stack, then this should be set to 0.
603 *
604 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
605 */
606
607#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
608
609/*
610 * Needed for Interrupt stack
611 */
612#define CPU_MINIMUM_STACK_FRAME_SIZE 8
613
614
615/* ISR handler macros */
616
617/*
618 *  Disable all interrupts for an RTEMS critical section.  The previous
619 *  level is returned in _isr_cookie.
620 */
621
622#define loc_string(a,b) a " (" #b ")\n"
623
624#ifndef ASM
625 
626static inline unsigned32 _CPU_ISR_Get_level( void )
627{
628  register unsigned int msr;
629  _CPU_MSR_GET(msr);
630  if (msr & MSR_EE) return 0;
631  else  return 1;
632}
633
634static inline void _CPU_ISR_Set_level( unsigned32 level )
635{
636  register unsigned int msr;
637  _CPU_MSR_GET(msr);
638  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
639    msr |= MSR_EE;
640  }
641  else {
642    msr &= ~MSR_EE;
643  }
644  _CPU_MSR_SET(msr);
645}
646 
647#define _CPU_ISR_install_vector(irq, new, old) {BSP_panic("_CPU_ISR_install_vector called\n");}
648
649/* Context handler macros */
650
651/*
652 *  Initialize the context to a state suitable for starting a
653 *  task after a context restore operation.  Generally, this
654 *  involves:
655 *
656 *     - setting a starting address
657 *     - preparing the stack
658 *     - preparing the stack and frame pointers
659 *     - setting the proper interrupt level in the context
660 *     - initializing the floating point context
661 *
662 *  This routine generally does not set any unnecessary register
663 *  in the context.  The state of the "general data" registers is
664 *  undefined at task start time.
665 *
666 *  NOTE:  Implemented as a subroutine for the SPARC port.
667 */
668
669void _CPU_Context_Initialize(
670  Context_Control  *the_context,
671  unsigned32       *stack_base,
672  unsigned32        size,
673  unsigned32        new_level,
674  void             *entry_point,
675  boolean           is_fp
676);
677
678/*
679 *  This routine is responsible for somehow restarting the currently
680 *  executing task.  If you are lucky, then all that is necessary
681 *  is restoring the context.  Otherwise, there will need to be
682 *  a special assembly routine which does something special in this
683 *  case.  Context_Restore should work most of the time.  It will
684 *  not work if restarting self conflicts with the stack frame
685 *  assumptions of restoring a context.
686 */
687
688#define _CPU_Context_Restart_self( _the_context ) \
689   _CPU_Context_restore( (_the_context) );
690
691/*
692 *  The purpose of this macro is to allow the initial pointer into
693 *  a floating point context area (used to save the floating point
694 *  context) to be at an arbitrary place in the floating point
695 *  context area.
696 *
697 *  This is necessary because some FP units are designed to have
698 *  their context saved as a stack which grows into lower addresses.
699 *  Other FP units can be saved by simply moving registers into offsets
700 *  from the base of the context area.  Finally some FP units provide
701 *  a "dump context" instruction which could fill in from high to low
702 *  or low to high based on the whim of the CPU designers.
703 */
704
705#define _CPU_Context_Fp_start( _base, _offset ) \
706   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
707
708/*
709 *  This routine initializes the FP context area passed to it to.
710 *  There are a few standard ways in which to initialize the
711 *  floating point context.  The code included for this macro assumes
712 *  that this is a CPU in which a "initial" FP context was saved into
713 *  _CPU_Null_fp_context and it simply copies it to the destination
714 *  context passed to it.
715 *
716 *  Other models include (1) not doing anything, and (2) putting
717 *  a "null FP status word" in the correct place in the FP context.
718 */
719
720#define _CPU_Context_Initialize_fp( _destination ) \
721  { \
722   ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
723  }
724
725/* end of Context handler macros */
726
727/* Fatal Error manager macros */
728
729/*
730 *  This routine copies _error into a known place -- typically a stack
731 *  location or a register, optionally disables interrupts, and
732 *  halts/stops the CPU.
733 */
734
735#define _CPU_Fatal_halt( _error ) \
736  _BSP_Fatal_error(_error)
737
738/* end of Fatal Error manager macros */
739
740/* Bitfield handler macros */
741
742/*
743 *  This routine sets _output to the bit number of the first bit
744 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
745 *  This type may be either 16 or 32 bits wide although only the 16
746 *  least significant bits will be used.
747 *
748 *  There are a number of variables in using a "find first bit" type
749 *  instruction.
750 *
751 *    (1) What happens when run on a value of zero?
752 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
753 *    (3) The numbering may be zero or one based.
754 *    (4) The "find first bit" instruction may search from MSB or LSB.
755 *
756 *  RTEMS guarantees that (1) will never happen so it is not a concern.
757 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
758 *  _CPU_Priority_Bits_index().  These three form a set of routines
759 *  which must logically operate together.  Bits in the _value are
760 *  set and cleared based on masks built by _CPU_Priority_mask().
761 *  The basic major and minor values calculated by _Priority_Major()
762 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
763 *  to properly range between the values returned by the "find first bit"
764 *  instruction.  This makes it possible for _Priority_Get_highest() to
765 *  calculate the major and directly index into the minor table.
766 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
767 *  is the first bit found.
768 *
769 *  This entire "find first bit" and mapping process depends heavily
770 *  on the manner in which a priority is broken into a major and minor
771 *  components with the major being the 4 MSB of a priority and minor
772 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
773 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
774 *  to the lowest priority.
775 *
776 *  If your CPU does not have a "find first bit" instruction, then
777 *  there are ways to make do without it.  Here are a handful of ways
778 *  to implement this in software:
779 *
780 *    - a series of 16 bit test instructions
781 *    - a "binary search using if's"
782 *    - _number = 0
783 *      if _value > 0x00ff
784 *        _value >>=8
785 *        _number = 8;
786 *
787 *      if _value > 0x0000f
788 *        _value >=8
789 *        _number += 4
790 *
791 *      _number += bit_set_table[ _value ]
792 *
793 *    where bit_set_table[ 16 ] has values which indicate the first
794 *      bit set
795 */
796
797#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
798  { \
799    asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
800                  "1" ((_value))); \
801  }
802
803/* end of Bitfield handler macros */
804
805/*
806 *  This routine builds the mask which corresponds to the bit fields
807 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
808 *  for that routine.
809 */
810
811#define _CPU_Priority_Mask( _bit_number ) \
812  ( 0x80000000 >> (_bit_number) )
813
814/*
815 *  This routine translates the bit numbers returned by
816 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
817 *  a major or minor component of a priority.  See the discussion
818 *  for that routine.
819 */
820
821#define _CPU_Priority_bits_index( _priority ) \
822  (_priority)
823
824/* end of Priority handler macros */
825
826/* variables */
827
828extern const unsigned32 _CPU_msrs[4];
829
830/* functions */
831
832/*
833 *  _CPU_Initialize
834 *
835 *  This routine performs CPU dependent initialization.
836 */
837
838void _CPU_Initialize(
839  rtems_cpu_table  *cpu_table,
840  void            (*thread_dispatch)
841);
842
843
844/*
845 *  _CPU_Install_interrupt_stack
846 *
847 *  This routine installs the hardware interrupt stack pointer.
848 *
849 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
850 *         is TRUE.
851 */
852
853void _CPU_Install_interrupt_stack( void );
854
855/*
856 *  _CPU_Context_switch
857 *
858 *  This routine switches from the run context to the heir context.
859 */
860
861void _CPU_Context_switch(
862  Context_Control  *run,
863  Context_Control  *heir
864);
865
866/*
867 *  _CPU_Context_restore
868 *
869 *  This routine is generallu used only to restart self in an
870 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
871 *
872 *  NOTE: May be unnecessary to reload some registers.
873 */
874
875void _CPU_Context_restore(
876  Context_Control *new_context
877);
878
879/*
880 *  _CPU_Context_save_fp
881 *
882 *  This routine saves the floating point context passed to it.
883 */
884
885void _CPU_Context_save_fp(
886  void **fp_context_ptr
887);
888
889/*
890 *  _CPU_Context_restore_fp
891 *
892 *  This routine restores the floating point context passed to it.
893 */
894
895void _CPU_Context_restore_fp(
896  void **fp_context_ptr
897);
898
899void _CPU_Fatal_error(
900  unsigned32 _error
901);
902
903/*  The following routine swaps the endian format of an unsigned int.
904 *  It must be static because it is referenced indirectly.
905 *
906 *  This version will work on any processor, but if there is a better
907 *  way for your CPU PLEASE use it.  The most common way to do this is to:
908 *
909 *     swap least significant two bytes with 16-bit rotate
910 *     swap upper and lower 16-bits
911 *     swap most significant two bytes with 16-bit rotate
912 *
913 *  Some CPUs have special instructions which swap a 32-bit quantity in
914 *  a single instruction (e.g. i486).  It is probably best to avoid
915 *  an "endian swapping control bit" in the CPU.  One good reason is
916 *  that interrupts would probably have to be disabled to insure that
917 *  an interrupt does not try to access the same "chunk" with the wrong
918 *  endian.  Another good reason is that on some CPUs, the endian bit
919 *  endianness for ALL fetches -- both code and data -- so the code
920 *  will be fetched incorrectly.
921 */
922 
923static inline unsigned int CPU_swap_u32(
924  unsigned int value
925)
926{
927  unsigned32 swapped;
928 
929  asm volatile("rlwimi %0,%1,8,24,31;"
930               "rlwimi %0,%1,24,16,23;"
931               "rlwimi %0,%1,8,8,15;"
932               "rlwimi %0,%1,24,0,7;" :
933               "=&r" ((swapped)) : "r" ((value)));
934
935  return( swapped );
936}
937
938#define CPU_swap_u16( value ) \
939  (((value&0xff) << 8) | ((value >> 8)&0xff))
940
941/*
942 *  Routines to access the decrementer register
943 */
944
945#define PPC_Set_decrementer( _clicks ) \
946  do { \
947    asm volatile( "mtdec %0" : "=r" ((_clicks)) : "r" ((_clicks)) ); \
948  } while (0)
949
950/*
951 *  Routines to access the time base register
952 */
953
954static inline unsigned64 PPC_Get_timebase_register( void )
955{
956  unsigned32 tbr_low;
957  unsigned32 tbr_high;
958  unsigned32 tbr_high_old;
959  unsigned64 tbr;
960
961  do {
962    asm volatile( "mftbu %0" : "=r" (tbr_high_old));
963    asm volatile( "mftb  %0" : "=r" (tbr_low));
964    asm volatile( "mftbu %0" : "=r" (tbr_high));
965  } while ( tbr_high_old != tbr_high );
966
967  tbr = tbr_high;
968  tbr <<= 32;
969  tbr |= tbr_low;
970  return tbr;
971}
972
973#endif /* ndef ASM */
974
975#ifdef __cplusplus
976}
977#endif
978
979#endif
Note: See TracBrowser for help on using the repository browser.