source: rtems/cpukit/score/cpu/powerpc/rtems/new-exceptions/cpu.h @ 19131e97

4.104.114.84.95
Last change on this file since 19131e97 was 19131e97, checked in by Joel Sherrill <joel.sherrill@…>, on 05/14/02 at 17:45:37

2001-05-14 Till Straumann <strauman@…>

  • rtems/new-exceptions/cpu.h: Per PR211 fix saving/restoring floating point context. The fpsave and fprestore routines are only used in a executing context which _is_ fp and hence has the FPU enabled. The current behavior required the FPU always to be on which is very dangerous if lazy context switching is used. [Joel Note: Some ports explicitly enabled the FPU in the FP save and restore routines to avoid this.]

The patch also makes sure (on powerpc only) that the FPU is disabled
for integer tasks. Note that this is crucial if deferred fp context
switching is used. Otherwise, fp context corruption may go undetected!
Also note that even tasks which merely push/pop FP registers to/from
the stack without modifying them still MUST be FP tasks - otherwise
(if lazy FP context switching is used), FP register corruption (of
other, FP, tasks may occur)!

Furthermore, (on PPC) by default, lazy FP context save/restore
is _disabled_.

  • Property mode set to 100644
File size: 32.0 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the PowerPC
4 *  processor.
5 *
6 *  Modified for MPC8260 Andy Dachs <a.dachs@sstl.co.uk>
7 *  Surrey Satellite Technology Limited (SSTL), 2001
8 *
9 *  Author:     Andrew Bray <andy@i-cubed.co.uk>
10 *
11 *  COPYRIGHT (c) 1995 by i-cubed ltd.
12 *
13 *  To anyone who acknowledges that this file is provided "AS IS"
14 *  without any express or implied warranty:
15 *      permission to use, copy, modify, and distribute this file
16 *      for any purpose is hereby granted without fee, provided that
17 *      the above copyright notice and this notice appears in all
18 *      copies, and that the name of i-cubed limited not be used in
19 *      advertising or publicity pertaining to distribution of the
20 *      software without specific, written prior permission.
21 *      i-cubed limited makes no representations about the suitability
22 *      of this software for any purpose.
23 *
24 *  Derived from c/src/exec/cpu/no_cpu/cpu.h:
25 *
26 *  COPYRIGHT (c) 1989-1997.
27 *  On-Line Applications Research Corporation (OAR).
28 *
29 *  The license and distribution terms for this file may be found in
30 *  the file LICENSE in this distribution or at
31 *  http://www.OARcorp.com/rtems/license.html.
32 *
33 *  $Id$
34 */
35
36#ifndef __CPU_h
37#define __CPU_h
38
39#ifndef _rtems_score_cpu_h
40#error "You should include <rtems/score/cpu.h>"
41#endif
42
43#include <rtems/powerpc/registers.h>
44
45#ifdef __cplusplus
46extern "C" {
47#endif
48
49/* conditional compilation parameters */
50
51/*
52 *  Should the calls to _Thread_Enable_dispatch be inlined?
53 *
54 *  If TRUE, then they are inlined.
55 *  If FALSE, then a subroutine call is made.
56 *
57 *  Basically this is an example of the classic trade-off of size
58 *  versus speed.  Inlining the call (TRUE) typically increases the
59 *  size of RTEMS while speeding up the enabling of dispatching.
60 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
61 *  only be 0 or 1 unless you are in an interrupt handler and that
62 *  interrupt handler invokes the executive.]  When not inlined
63 *  something calls _Thread_Enable_dispatch which in turns calls
64 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
65 *  one subroutine call is avoided entirely.]
66 */
67
68#define CPU_INLINE_ENABLE_DISPATCH       FALSE
69
70/*
71 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
72 *  be unrolled one time?  In unrolled each iteration of the loop examines
73 *  two "nodes" on the chain being searched.  Otherwise, only one node
74 *  is examined per iteration.
75 *
76 *  If TRUE, then the loops are unrolled.
77 *  If FALSE, then the loops are not unrolled.
78 *
79 *  The primary factor in making this decision is the cost of disabling
80 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
81 *  body of the loop.  On some CPUs, the flash is more expensive than
82 *  one iteration of the loop body.  In this case, it might be desirable
83 *  to unroll the loop.  It is important to note that on some CPUs, this
84 *  code is the longest interrupt disable period in RTEMS.  So it is
85 *  necessary to strike a balance when setting this parameter.
86 */
87
88#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
89
90/*
91 *  Does RTEMS manage a dedicated interrupt stack in software?
92 *
93 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
94 *  If FALSE, nothing is done.
95 *
96 *  If the CPU supports a dedicated interrupt stack in hardware,
97 *  then it is generally the responsibility of the BSP to allocate it
98 *  and set it up.
99 *
100 *  If the CPU does not support a dedicated interrupt stack, then
101 *  the porter has two options: (1) execute interrupts on the
102 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
103 *  interrupt stack.
104 *
105 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
106 *
107 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
108 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
109 *  possible that both are FALSE for a particular CPU.  Although it
110 *  is unclear what that would imply about the interrupt processing
111 *  procedure on that CPU.
112 */
113
114#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
115
116/*
117 *  Does this CPU have hardware support for a dedicated interrupt stack?
118 *
119 *  If TRUE, then it must be installed during initialization.
120 *  If FALSE, then no installation is performed.
121 *
122 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
123 *
124 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
125 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
126 *  possible that both are FALSE for a particular CPU.  Although it
127 *  is unclear what that would imply about the interrupt processing
128 *  procedure on that CPU.
129 */
130
131#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
132
133/*
134 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
135 *
136 *  If TRUE, then the memory is allocated during initialization.
137 *  If FALSE, then the memory is allocated during initialization.
138 *
139 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
140 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
141 */
142
143#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
144
145/*
146 *  Does the RTEMS invoke the user's ISR with the vector number and
147 *  a pointer to the saved interrupt frame (1) or just the vector
148 *  number (0)?
149 */
150
151#define CPU_ISR_PASSES_FRAME_POINTER 0
152
153/*
154 *  Does the CPU have hardware floating point?
155 *
156 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
157 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
158 *
159 *  If there is a FP coprocessor such as the i387 or mc68881, then
160 *  the answer is TRUE.
161 *
162 *  The macro name "PPC_HAS_FPU" should be made CPU specific.
163 *  It indicates whether or not this CPU model has FP support.  For
164 *  example, it would be possible to have an i386_nofp CPU model
165 *  which set this to false to indicate that you have an i386 without
166 *  an i387 and wish to leave floating point support out of RTEMS.
167 */
168
169#if ( PPC_HAS_FPU == 1 )
170#define CPU_HARDWARE_FP     TRUE
171#else
172#define CPU_HARDWARE_FP     FALSE
173#endif
174
175/*
176 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
177 *
178 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
179 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
180 *
181 *  So far, the only CPU in which this option has been used is the
182 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
183 *  floating point registers to perform integer multiplies.  If
184 *  a function which you would not think utilize the FP unit DOES,
185 *  then one can not easily predict which tasks will use the FP hardware.
186 *  In this case, this option should be TRUE.
187 *
188 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
189 */
190
191#define CPU_ALL_TASKS_ARE_FP     FALSE
192
193/*
194 *  Should the IDLE task have a floating point context?
195 *
196 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
197 *  and it has a floating point context which is switched in and out.
198 *  If FALSE, then the IDLE task does not have a floating point context.
199 *
200 *  Setting this to TRUE negatively impacts the time required to preempt
201 *  the IDLE task from an interrupt because the floating point context
202 *  must be saved as part of the preemption.
203 */
204
205#define CPU_IDLE_TASK_IS_FP      FALSE
206
207/*
208 *  Should the saving of the floating point registers be deferred
209 *  until a context switch is made to another different floating point
210 *  task?
211 *
212 *  If TRUE, then the floating point context will not be stored until
213 *  necessary.  It will remain in the floating point registers and not
214 *  disturned until another floating point task is switched to.
215 *
216 *  If FALSE, then the floating point context is saved when a floating
217 *  point task is switched out and restored when the next floating point
218 *  task is restored.  The state of the floating point registers between
219 *  those two operations is not specified.
220 *
221 *  If the floating point context does NOT have to be saved as part of
222 *  interrupt dispatching, then it should be safe to set this to TRUE.
223 *
224 *  Setting this flag to TRUE results in using a different algorithm
225 *  for deciding when to save and restore the floating point context.
226 *  The deferred FP switch algorithm minimizes the number of times
227 *  the FP context is saved and restored.  The FP context is not saved
228 *  until a context switch is made to another, different FP task.
229 *  Thus in a system with only one FP task, the FP context will never
230 *  be saved or restored.
231 *
232 *  Note, however that compilers may use floating point registers/
233 *  instructions for optimization or they may save/restore FP registers
234 *  on the stack. You must not use deferred switching in these cases
235 *  and on the PowerPC attempting to do so will raise a "FP unavailable"
236 *  exception.
237 */
238/*
239 *  ACB Note:  This could make debugging tricky..
240 */
241
242/* conservative setting (FALSE); probably doesn't affect performance too much */
243#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
244
245/*
246 *  Does this port provide a CPU dependent IDLE task implementation?
247 *
248 *  If TRUE, then the routine _CPU_Thread_Idle_body
249 *  must be provided and is the default IDLE thread body instead of
250 *  _CPU_Thread_Idle_body.
251 *
252 *  If FALSE, then use the generic IDLE thread body if the BSP does
253 *  not provide one.
254 *
255 *  This is intended to allow for supporting processors which have
256 *  a low power or idle mode.  When the IDLE thread is executed, then
257 *  the CPU can be powered down.
258 *
259 *  The order of precedence for selecting the IDLE thread body is:
260 *
261 *    1.  BSP provided
262 *    2.  CPU dependent (if provided)
263 *    3.  generic (if no BSP and no CPU dependent)
264 */
265
266#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
267
268
269/*
270 *  Does the stack grow up (toward higher addresses) or down
271 *  (toward lower addresses)?
272 *
273 *  If TRUE, then the grows upward.
274 *  If FALSE, then the grows toward smaller addresses.
275 */
276
277#define CPU_STACK_GROWS_UP               FALSE
278
279/*
280 *  The following is the variable attribute used to force alignment
281 *  of critical RTEMS structures.  On some processors it may make
282 *  sense to have these aligned on tighter boundaries than
283 *  the minimum requirements of the compiler in order to have as
284 *  much of the critical data area as possible in a cache line.
285 *
286 *  The placement of this macro in the declaration of the variables
287 *  is based on the syntactically requirements of the GNU C
288 *  "__attribute__" extension.  For example with GNU C, use
289 *  the following to force a structures to a 32 byte boundary.
290 *
291 *      __attribute__ ((aligned (32)))
292 *
293 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
294 *         To benefit from using this, the data must be heavily
295 *         used so it will stay in the cache and used frequently enough
296 *         in the executive to justify turning this on.
297 */
298
299#define CPU_STRUCTURE_ALIGNMENT \
300  __attribute__ ((aligned (PPC_CACHE_ALIGNMENT)))
301
302/*
303 *  Define what is required to specify how the network to host conversion
304 *  routines are handled.
305 */
306
307#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
308#define CPU_BIG_ENDIAN                           TRUE
309#define CPU_LITTLE_ENDIAN                        FALSE
310
311
312/*
313 *  Processor defined structures
314 *
315 *  Examples structures include the descriptor tables from the i386
316 *  and the processor control structure on the i960ca.
317 */
318
319/* may need to put some structures here.  */
320
321/*
322 * Contexts
323 *
324 *  Generally there are 2 types of context to save.
325 *     1. Interrupt registers to save
326 *     2. Task level registers to save
327 *
328 *  This means we have the following 3 context items:
329 *     1. task level context stuff::  Context_Control
330 *     2. floating point task stuff:: Context_Control_fp
331 *     3. special interrupt level context :: Context_Control_interrupt
332 *
333 *  On some processors, it is cost-effective to save only the callee
334 *  preserved registers during a task context switch.  This means
335 *  that the ISR code needs to save those registers which do not
336 *  persist across function calls.  It is not mandatory to make this
337 *  distinctions between the caller/callee saves registers for the
338 *  purpose of minimizing context saved during task switch and on interrupts.
339 *  If the cost of saving extra registers is minimal, simplicity is the
340 *  choice.  Save the same context on interrupt entry as for tasks in
341 *  this case.
342 *
343 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
344 *  care should be used in designing the context area.
345 *
346 *  On some CPUs with hardware floating point support, the Context_Control_fp
347 *  structure will not be used or it simply consist of an array of a
348 *  fixed number of bytes.   This is done when the floating point context
349 *  is dumped by a "FP save context" type instruction and the format
350 *  is not really defined by the CPU.  In this case, there is no need
351 *  to figure out the exact format -- only the size.  Of course, although
352 *  this is enough information for RTEMS, it is probably not enough for
353 *  a debugger such as gdb.  But that is another problem.
354 */
355
356#ifndef ASM
357
358typedef struct {
359    unsigned32 gpr1;    /* Stack pointer for all */
360    unsigned32 gpr2;    /* TOC in PowerOpen, reserved SVR4, section ptr EABI + */
361    unsigned32 gpr13;   /* First non volatile PowerOpen, section ptr SVR4/EABI */
362    unsigned32 gpr14;   /* Non volatile for all */
363    unsigned32 gpr15;   /* Non volatile for all */
364    unsigned32 gpr16;   /* Non volatile for all */
365    unsigned32 gpr17;   /* Non volatile for all */
366    unsigned32 gpr18;   /* Non volatile for all */
367    unsigned32 gpr19;   /* Non volatile for all */
368    unsigned32 gpr20;   /* Non volatile for all */
369    unsigned32 gpr21;   /* Non volatile for all */
370    unsigned32 gpr22;   /* Non volatile for all */
371    unsigned32 gpr23;   /* Non volatile for all */
372    unsigned32 gpr24;   /* Non volatile for all */
373    unsigned32 gpr25;   /* Non volatile for all */
374    unsigned32 gpr26;   /* Non volatile for all */
375    unsigned32 gpr27;   /* Non volatile for all */
376    unsigned32 gpr28;   /* Non volatile for all */
377    unsigned32 gpr29;   /* Non volatile for all */
378    unsigned32 gpr30;   /* Non volatile for all */
379    unsigned32 gpr31;   /* Non volatile for all */
380    unsigned32 cr;      /* PART of the CR is non volatile for all */
381    unsigned32 pc;      /* Program counter/Link register */
382    unsigned32 msr;     /* Initial interrupt level */
383} Context_Control;
384
385typedef struct {
386    /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
387     * procedure calls.  However, this would mean that the interrupt
388     * frame had to hold f0-f13, and the fpscr.  And as the majority
389     * of tasks will not have an FP context, we will save the whole
390     * context here.
391     */
392#if (PPC_HAS_DOUBLE == 1)
393    double      f[32];
394    double      fpscr;
395#else
396    float       f[32];
397    float       fpscr;
398#endif
399} Context_Control_fp;
400
401typedef struct CPU_Interrupt_frame {
402    unsigned32 stacklink;       /* Ensure this is a real frame (also reg1 save) */
403    unsigned32 calleeLr;        /* link register used by callees: SVR4/EABI */
404  /* This is what is left out of the primary contexts */
405    unsigned32 gpr0;
406    unsigned32 gpr2;            /* play safe */
407    unsigned32 gpr3;
408    unsigned32 gpr4;
409    unsigned32 gpr5;
410    unsigned32 gpr6;
411    unsigned32 gpr7;
412    unsigned32 gpr8;
413    unsigned32 gpr9;
414    unsigned32 gpr10;
415    unsigned32 gpr11;
416    unsigned32 gpr12;
417    unsigned32 gpr13;   /* Play safe */
418    unsigned32 gpr28;   /* For internal use by the IRQ handler */
419    unsigned32 gpr29;   /* For internal use by the IRQ handler */
420    unsigned32 gpr30;   /* For internal use by the IRQ handler */
421    unsigned32 gpr31;   /* For internal use by the IRQ handler */
422    unsigned32 cr;      /* Bits of this are volatile, so no-one may save */
423    unsigned32 ctr;
424    unsigned32 xer;
425    unsigned32 lr;
426    unsigned32 pc;
427    unsigned32 msr;
428    unsigned32 pad[3];
429} CPU_Interrupt_frame;
430 
431/*
432 *  The following table contains the information required to configure
433 *  the PowerPC processor specific parameters.
434 */
435
436typedef struct {
437  void       (*pretasking_hook)( void );
438  void       (*predriver_hook)( void );
439  void       (*postdriver_hook)( void );
440  void       (*idle_task)( void );
441  boolean      do_zero_of_workspace;
442  unsigned32   idle_task_stack_size;
443  unsigned32   interrupt_stack_size;
444  unsigned32   extra_mpci_receive_server_stack;
445  void *     (*stack_allocate_hook)( unsigned32 );
446  void       (*stack_free_hook)( void* );
447  /* end of fields required on all CPUs */
448
449  unsigned32   clicks_per_usec;        /* Timer clicks per microsecond */
450  boolean      exceptions_in_RAM;     /* TRUE if in RAM */
451
452#if (defined(ppc403) || defined(mpc860) || defined(mpc821) || defined(mpc8260))
453  unsigned32   serial_per_sec;         /* Serial clocks per second */
454  boolean      serial_external_clock;
455  boolean      serial_xon_xoff;
456  boolean      serial_cts_rts;
457  unsigned32   serial_rate;
458  unsigned32   timer_average_overhead; /* Average overhead of timer in ticks */
459  unsigned32   timer_least_valid;      /* Least valid number from timer      */
460  boolean      timer_internal_clock;   /* TRUE, when timer runs with CPU clk */
461#endif
462
463#if (defined(mpc860) || defined(mpc821) || defined(mpc8260))
464  unsigned32   clock_speed;            /* Speed of CPU in Hz */
465#endif
466}   rtems_cpu_table;
467
468/*
469 *  Macros to access required entires in the CPU Table are in
470 *  the file rtems/system.h.
471 */
472
473/*
474 *  Macros to access PowerPC MPC750 specific additions to the CPU Table
475 */
476
477#define rtems_cpu_configuration_get_clicks_per_usec() \
478   (_CPU_Table.clicks_per_usec)
479
480#define rtems_cpu_configuration_get_exceptions_in_ram() \
481   (_CPU_Table.exceptions_in_RAM)
482
483/*
484 *  This variable is optional.  It is used on CPUs on which it is difficult
485 *  to generate an "uninitialized" FP context.  It is filled in by
486 *  _CPU_Initialize and copied into the task's FP context area during
487 *  _CPU_Context_Initialize.
488 */
489
490/* EXTERN Context_Control_fp  _CPU_Null_fp_context; */
491
492/*
493 *  On some CPUs, RTEMS supports a software managed interrupt stack.
494 *  This stack is allocated by the Interrupt Manager and the switch
495 *  is performed in _ISR_Handler.  These variables contain pointers
496 *  to the lowest and highest addresses in the chunk of memory allocated
497 *  for the interrupt stack.  Since it is unknown whether the stack
498 *  grows up or down (in general), this give the CPU dependent
499 *  code the option of picking the version it wants to use.
500 *
501 *  NOTE: These two variables are required if the macro
502 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
503 */
504
505SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
506SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
507
508#endif /* ndef ASM */
509
510/*
511 *  This defines the number of levels and the mask used to pick those
512 *  bits out of a thread mode.
513 */
514
515#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
516#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
517
518/*
519 *  With some compilation systems, it is difficult if not impossible to
520 *  call a high-level language routine from assembly language.  This
521 *  is especially true of commercial Ada compilers and name mangling
522 *  C++ ones.  This variable can be optionally defined by the CPU porter
523 *  and contains the address of the routine _Thread_Dispatch.  This
524 *  can make it easier to invoke that routine at the end of the interrupt
525 *  sequence (if a dispatch is necessary).
526 */
527
528/* EXTERN void           (*_CPU_Thread_dispatch_pointer)(); */
529
530/*
531 *  Nothing prevents the porter from declaring more CPU specific variables.
532 */
533
534#ifndef ASM
535 
536SCORE_EXTERN struct {
537  unsigned32 *Disable_level;
538  void *Stack;
539  volatile boolean *Switch_necessary;
540  boolean *Signal;
541
542} _CPU_IRQ_info CPU_STRUCTURE_ALIGNMENT;
543
544#endif /* ndef ASM */
545
546/*
547 *  The size of the floating point context area.  On some CPUs this
548 *  will not be a "sizeof" because the format of the floating point
549 *  area is not defined -- only the size is.  This is usually on
550 *  CPUs with a "floating point save context" instruction.
551 */
552
553#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
554
555/*
556 * (Optional) # of bytes for libmisc/stackchk to check
557 * If not specifed, then it defaults to something reasonable
558 * for most architectures.
559 */
560
561#define CPU_STACK_CHECK_SIZE    (128)
562
563/*
564 *  Amount of extra stack (above minimum stack size) required by
565 *  MPCI receive server thread.  Remember that in a multiprocessor
566 *  system this thread must exist and be able to process all directives.
567 */
568
569#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
570
571/*
572 *  This defines the number of entries in the ISR_Vector_table managed
573 *  by RTEMS.
574 */
575
576#define CPU_INTERRUPT_NUMBER_OF_VECTORS     (PPC_INTERRUPT_MAX)
577#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (PPC_INTERRUPT_MAX - 1)
578
579/*
580 *  This is defined if the port has a special way to report the ISR nesting
581 *  level.  Most ports maintain the variable _ISR_Nest_level.
582 */
583
584#define CPU_PROVIDES_ISR_IS_IN_PROGRESS TRUE
585
586/*
587 *  Should be large enough to run all RTEMS tests.  This insures
588 *  that a "reasonable" small application should not have any problems.
589 */
590
591#define CPU_STACK_MINIMUM_SIZE          (1024*8)
592
593/*
594 *  CPU's worst alignment requirement for data types on a byte boundary.  This
595 *  alignment does not take into account the requirements for the stack.
596 */
597
598#define CPU_ALIGNMENT              (PPC_ALIGNMENT)
599
600/*
601 *  This number corresponds to the byte alignment requirement for the
602 *  heap handler.  This alignment requirement may be stricter than that
603 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
604 *  common for the heap to follow the same alignment requirement as
605 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
606 *  then this should be set to CPU_ALIGNMENT.
607 *
608 *  NOTE:  This does not have to be a power of 2.  It does have to
609 *         be greater or equal to than CPU_ALIGNMENT.
610 */
611
612#define CPU_HEAP_ALIGNMENT         (PPC_ALIGNMENT)
613
614/*
615 *  This number corresponds to the byte alignment requirement for memory
616 *  buffers allocated by the partition manager.  This alignment requirement
617 *  may be stricter than that for the data types alignment specified by
618 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
619 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
620 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
621 *
622 *  NOTE:  This does not have to be a power of 2.  It does have to
623 *         be greater or equal to than CPU_ALIGNMENT.
624 */
625
626#define CPU_PARTITION_ALIGNMENT    (PPC_ALIGNMENT)
627
628/*
629 *  This number corresponds to the byte alignment requirement for the
630 *  stack.  This alignment requirement may be stricter than that for the
631 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
632 *  is strict enough for the stack, then this should be set to 0.
633 *
634 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
635 */
636
637#define CPU_STACK_ALIGNMENT        (PPC_STACK_ALIGNMENT)
638
639/*
640 * Needed for Interrupt stack
641 */
642#define CPU_MINIMUM_STACK_FRAME_SIZE 8
643
644
645/*
646 *  ISR handler macros
647 */
648
649#define _CPU_Initialize_vectors()
650
651/*
652 *  Disable all interrupts for an RTEMS critical section.  The previous
653 *  level is returned in _isr_cookie.
654 */
655
656#ifndef ASM
657 
658static inline unsigned32 _CPU_ISR_Get_level( void )
659{
660  register unsigned int msr;
661  _CPU_MSR_GET(msr);
662  if (msr & MSR_EE) return 0;
663  else  return 1;
664}
665
666static inline void _CPU_ISR_Set_level( unsigned32 level )
667{
668  register unsigned int msr;
669  _CPU_MSR_GET(msr);
670  if (!(level & CPU_MODES_INTERRUPT_MASK)) {
671    msr |= MSR_EE;
672  }
673  else {
674    msr &= ~MSR_EE;
675  }
676  _CPU_MSR_SET(msr);
677}
678 
679#define _CPU_ISR_install_vector(irq, new, old) {BSP_panic("_CPU_ISR_install_vector called\n");}
680
681/* Context handler macros */
682
683/*
684 *  Initialize the context to a state suitable for starting a
685 *  task after a context restore operation.  Generally, this
686 *  involves:
687 *
688 *     - setting a starting address
689 *     - preparing the stack
690 *     - preparing the stack and frame pointers
691 *     - setting the proper interrupt level in the context
692 *     - initializing the floating point context
693 *
694 *  This routine generally does not set any unnecessary register
695 *  in the context.  The state of the "general data" registers is
696 *  undefined at task start time.
697 *
698 *  NOTE:  Implemented as a subroutine for the SPARC port.
699 */
700
701void _CPU_Context_Initialize(
702  Context_Control  *the_context,
703  unsigned32       *stack_base,
704  unsigned32        size,
705  unsigned32        new_level,
706  void             *entry_point,
707  boolean           is_fp
708);
709
710/*
711 *  This routine is responsible for somehow restarting the currently
712 *  executing task.  If you are lucky, then all that is necessary
713 *  is restoring the context.  Otherwise, there will need to be
714 *  a special assembly routine which does something special in this
715 *  case.  Context_Restore should work most of the time.  It will
716 *  not work if restarting self conflicts with the stack frame
717 *  assumptions of restoring a context.
718 */
719
720#define _CPU_Context_Restart_self( _the_context ) \
721   _CPU_Context_restore( (_the_context) );
722
723/*
724 *  The purpose of this macro is to allow the initial pointer into
725 *  a floating point context area (used to save the floating point
726 *  context) to be at an arbitrary place in the floating point
727 *  context area.
728 *
729 *  This is necessary because some FP units are designed to have
730 *  their context saved as a stack which grows into lower addresses.
731 *  Other FP units can be saved by simply moving registers into offsets
732 *  from the base of the context area.  Finally some FP units provide
733 *  a "dump context" instruction which could fill in from high to low
734 *  or low to high based on the whim of the CPU designers.
735 */
736
737#define _CPU_Context_Fp_start( _base, _offset ) \
738   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
739
740/*
741 *  This routine initializes the FP context area passed to it to.
742 *  There are a few standard ways in which to initialize the
743 *  floating point context.  The code included for this macro assumes
744 *  that this is a CPU in which a "initial" FP context was saved into
745 *  _CPU_Null_fp_context and it simply copies it to the destination
746 *  context passed to it.
747 *
748 *  Other models include (1) not doing anything, and (2) putting
749 *  a "null FP status word" in the correct place in the FP context.
750 */
751
752#define _CPU_Context_Initialize_fp( _destination ) \
753  { \
754   ((Context_Control_fp *) *((void **) _destination))->fpscr = PPC_INIT_FPSCR; \
755  }
756
757/* end of Context handler macros */
758
759/* Fatal Error manager macros */
760
761/*
762 *  This routine copies _error into a known place -- typically a stack
763 *  location or a register, optionally disables interrupts, and
764 *  halts/stops the CPU.
765 */
766
767#define _CPU_Fatal_halt( _error ) \
768  _BSP_Fatal_error(_error)
769
770/* end of Fatal Error manager macros */
771
772/* Bitfield handler macros */
773
774/*
775 *  This routine sets _output to the bit number of the first bit
776 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
777 *  This type may be either 16 or 32 bits wide although only the 16
778 *  least significant bits will be used.
779 *
780 *  There are a number of variables in using a "find first bit" type
781 *  instruction.
782 *
783 *    (1) What happens when run on a value of zero?
784 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
785 *    (3) The numbering may be zero or one based.
786 *    (4) The "find first bit" instruction may search from MSB or LSB.
787 *
788 *  RTEMS guarantees that (1) will never happen so it is not a concern.
789 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
790 *  _CPU_Priority_Bits_index().  These three form a set of routines
791 *  which must logically operate together.  Bits in the _value are
792 *  set and cleared based on masks built by _CPU_Priority_mask().
793 *  The basic major and minor values calculated by _Priority_Major()
794 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
795 *  to properly range between the values returned by the "find first bit"
796 *  instruction.  This makes it possible for _Priority_Get_highest() to
797 *  calculate the major and directly index into the minor table.
798 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
799 *  is the first bit found.
800 *
801 *  This entire "find first bit" and mapping process depends heavily
802 *  on the manner in which a priority is broken into a major and minor
803 *  components with the major being the 4 MSB of a priority and minor
804 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
805 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
806 *  to the lowest priority.
807 *
808 *  If your CPU does not have a "find first bit" instruction, then
809 *  there are ways to make do without it.  Here are a handful of ways
810 *  to implement this in software:
811 *
812 *    - a series of 16 bit test instructions
813 *    - a "binary search using if's"
814 *    - _number = 0
815 *      if _value > 0x00ff
816 *        _value >>=8
817 *        _number = 8;
818 *
819 *      if _value > 0x0000f
820 *        _value >=8
821 *        _number += 4
822 *
823 *      _number += bit_set_table[ _value ]
824 *
825 *    where bit_set_table[ 16 ] has values which indicate the first
826 *      bit set
827 */
828
829#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
830  { \
831    asm volatile ("cntlzw %0, %1" : "=r" ((_output)), "=r" ((_value)) : \
832                  "1" ((_value))); \
833  }
834
835/* end of Bitfield handler macros */
836
837/*
838 *  This routine builds the mask which corresponds to the bit fields
839 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
840 *  for that routine.
841 */
842
843#define _CPU_Priority_Mask( _bit_number ) \
844  ( 0x80000000 >> (_bit_number) )
845
846/*
847 *  This routine translates the bit numbers returned by
848 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
849 *  a major or minor component of a priority.  See the discussion
850 *  for that routine.
851 */
852
853#define _CPU_Priority_bits_index( _priority ) \
854  (_priority)
855
856/* end of Priority handler macros */
857
858/* variables */
859
860extern const unsigned32 _CPU_msrs[4];
861
862/* functions */
863
864/*
865 *  _CPU_Initialize
866 *
867 *  This routine performs CPU dependent initialization.
868 */
869
870void _CPU_Initialize(
871  rtems_cpu_table  *cpu_table,
872  void            (*thread_dispatch)
873);
874
875
876/*
877 *  _CPU_Install_interrupt_stack
878 *
879 *  This routine installs the hardware interrupt stack pointer.
880 *
881 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
882 *         is TRUE.
883 */
884
885void _CPU_Install_interrupt_stack( void );
886
887/*
888 *  _CPU_Context_switch
889 *
890 *  This routine switches from the run context to the heir context.
891 */
892
893void _CPU_Context_switch(
894  Context_Control  *run,
895  Context_Control  *heir
896);
897
898/*
899 *  _CPU_Context_restore
900 *
901 *  This routine is generallu used only to restart self in an
902 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
903 *
904 *  NOTE: May be unnecessary to reload some registers.
905 */
906
907void _CPU_Context_restore(
908  Context_Control *new_context
909);
910
911/*
912 *  _CPU_Context_save_fp
913 *
914 *  This routine saves the floating point context passed to it.
915 */
916
917void _CPU_Context_save_fp(
918  void **fp_context_ptr
919);
920
921/*
922 *  _CPU_Context_restore_fp
923 *
924 *  This routine restores the floating point context passed to it.
925 */
926
927void _CPU_Context_restore_fp(
928  void **fp_context_ptr
929);
930
931void _CPU_Fatal_error(
932  unsigned32 _error
933);
934
935/*  The following routine swaps the endian format of an unsigned int.
936 *  It must be static because it is referenced indirectly.
937 *
938 *  This version will work on any processor, but if there is a better
939 *  way for your CPU PLEASE use it.  The most common way to do this is to:
940 *
941 *     swap least significant two bytes with 16-bit rotate
942 *     swap upper and lower 16-bits
943 *     swap most significant two bytes with 16-bit rotate
944 *
945 *  Some CPUs have special instructions which swap a 32-bit quantity in
946 *  a single instruction (e.g. i486).  It is probably best to avoid
947 *  an "endian swapping control bit" in the CPU.  One good reason is
948 *  that interrupts would probably have to be disabled to insure that
949 *  an interrupt does not try to access the same "chunk" with the wrong
950 *  endian.  Another good reason is that on some CPUs, the endian bit
951 *  endianness for ALL fetches -- both code and data -- so the code
952 *  will be fetched incorrectly.
953 */
954 
955static inline unsigned int CPU_swap_u32(
956  unsigned int value
957)
958{
959  unsigned32 swapped;
960 
961  asm volatile("rlwimi %0,%1,8,24,31;"
962               "rlwimi %0,%1,24,16,23;"
963               "rlwimi %0,%1,8,8,15;"
964               "rlwimi %0,%1,24,0,7;" :
965               "=&r" ((swapped)) : "r" ((value)));
966
967  return( swapped );
968}
969
970#define CPU_swap_u16( value ) \
971  (((value&0xff) << 8) | ((value >> 8)&0xff))
972
973#endif /* ndef ASM */
974
975#ifdef __cplusplus
976}
977#endif
978
979#endif
Note: See TracBrowser for help on using the repository browser.