source: rtems/c/src/exec/score/cpu/sparc/cpu.h @ ea74482

4.104.114.84.95
Last change on this file since ea74482 was c62d36f, checked in by Joel Sherrill <joel.sherrill@…>, on 10/06/95 at 20:01:20

SPARC merged and successfully tested w/o interrupt support

  • Property mode set to 100644
File size: 30.5 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the XXX
4 *  processor.
5 *
6 *  $Id$
7 */
8
9#ifndef __CPU_h
10#define __CPU_h
11
12#ifdef __cplusplus
13extern "C" {
14#endif
15
16#include <rtems/score/sparc.h>               /* pick up machine definitions */
17#ifndef ASM
18#include <rtems/score/sparctypes.h>
19#endif
20
21/* conditional compilation parameters */
22
23/*
24 *  Should the calls to _Thread_Enable_dispatch be inlined?
25 *
26 *  If TRUE, then they are inlined.
27 *  If FALSE, then a subroutine call is made.
28 *
29 *  Basically this is an example of the classic trade-off of size
30 *  versus speed.  Inlining the call (TRUE) typically increases the
31 *  size of the executive while speeding up the enabling of dispatching.
32 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
33 *  only be 0 or 1 unless you are in an interrupt handler and that
34 *  interrupt handler invokes the executive.]  When not inlined
35 *  something calls _Thread_Enable_dispatch which in turns calls
36 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
37 *  one subroutine call is avoided entirely.]
38 */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41
42/*
43 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
44 *  be unrolled one time?  In unrolled each iteration of the loop examines
45 *  two "nodes" on the chain being searched.  Otherwise, only one node
46 *  is examined per iteration.
47 *
48 *  If TRUE, then the loops are unrolled.
49 *  If FALSE, then the loops are not unrolled.
50 *
51 *  The primary factor in making this decision is the cost of disabling
52 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
53 *  body of the loop.  On some CPUs, the flash is more expensive than
54 *  one iteration of the loop body.  In this case, it might be desirable
55 *  to unroll the loop.  It is important to note that on some CPUs, this
56 *  code is the longest interrupt disable period in the executive.  So it is
57 *  necessary to strike a balance when setting this parameter.
58 */
59
60#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
61
62/*
63 *  Does the executive manage a dedicated interrupt stack in software?
64 *
65 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
66 *  If FALSE, nothing is done.
67 *
68 *  If the CPU supports a dedicated interrupt stack in hardware,
69 *  then it is generally the responsibility of the BSP to allocate it
70 *  and set it up.
71 *
72 *  If the CPU does not support a dedicated interrupt stack, then
73 *  the porter has two options: (1) execute interrupts on the stack of
74 *  the interrupted task, and (2) have the executive manage a dedicated
75 *  interrupt stack.
76 *
77 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
78 *
79 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
80 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
81 *  possible that both are FALSE for a particular CPU.  Although it
82 *  is unclear what that would imply about the interrupt processing
83 *  procedure on that CPU.
84 */
85
86#define CPU_HAS_SOFTWARE_INTERRUPT_STACK   FALSE /* XXX */
87
88/*
89 *  Does this CPU have hardware support for a dedicated interrupt stack?
90 *
91 *  If TRUE, then it must be installed during initialization.
92 *  If FALSE, then no installation is performed.
93 *
94 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
95 *
96 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
97 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
98 *  possible that both are FALSE for a particular CPU.  Although it
99 *  is unclear what that would imply about the interrupt processing
100 *  procedure on that CPU.
101 */
102
103#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE /* XXX */
104
105/*
106 *  Do we allocate a dedicated interrupt stack in the Interrupt Manager?
107 *
108 *  If TRUE, then the memory is allocated during initialization.
109 *  If FALSE, then the memory is allocated during initialization.
110 *
111 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
112 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
113 */
114
115#define CPU_ALLOCATE_INTERRUPT_STACK      TRUE
116
117/*
118 *  Does the CPU have hardware floating point?
119 *
120 *  If TRUE, then the FLOATING_POINT task attribute is supported.
121 *  If FALSE, then the FLOATING_POINT task attribute is ignored.
122 *
123 *  If there is a FP coprocessor such as the i387 or mc68881, then
124 *  the answer is TRUE.
125 *
126 *  The macro name "SPARC_HAS_FPU" should be made CPU specific.
127 *  It indicates whether or not this CPU model has FP support.  For
128 *  example, it would be possible to have an i386_nofp CPU model
129 *  which set this to false to indicate that you have an i386 without
130 *  an i387 and wish to leave floating point support out.
131 */
132
133#if ( SPARC_HAS_FPU == 1 )
134#define CPU_HARDWARE_FP     TRUE
135#else
136#define CPU_HARDWARE_FP     FALSE
137#endif
138
139/*
140 *  Are all tasks FLOATING_POINT tasks implicitly?
141 *
142 *  If TRUE, then the FLOATING_POINT task attribute is assumed.
143 *  If FALSE, then the FLOATING_POINT task attribute is followed.
144 *
145 *  So far, the only CPU in which this option has been used is the
146 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
147 *  floating point registers to perform integer multiplies.  If
148 *  a function which you would not think utilize the FP unit DOES,
149 *  then one can not easily predict which tasks will use the FP hardware.
150 *  In this case, this option should be TRUE.
151 *
152 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
153 */
154
155#define CPU_ALL_TASKS_ARE_FP     FALSE
156
157/*
158 *  Should the IDLE task have a floating point context?
159 *
160 *  If TRUE, then the IDLE task is created as a FLOATING_POINT task
161 *  and it has a floating point context which is switched in and out.
162 *  If FALSE, then the IDLE task does not have a floating point context.
163 *
164 *  Setting this to TRUE negatively impacts the time required to preempt
165 *  the IDLE task from an interrupt because the floating point context
166 *  must be saved as part of the preemption.
167 */
168
169#define CPU_IDLE_TASK_IS_FP      FALSE
170
171/*
172 *  Should the saving of the floating point registers be deferred
173 *  until a context switch is made to another different floating point
174 *  task?
175 *
176 *  If TRUE, then the floating point context will not be stored until
177 *  necessary.  It will remain in the floating point registers and not
178 *  disturned until another floating point task is switched to.
179 *
180 *  If FALSE, then the floating point context is saved when a floating
181 *  point task is switched out and restored when the next floating point
182 *  task is restored.  The state of the floating point registers between
183 *  those two operations is not specified.
184 *
185 *  If the floating point context does NOT have to be saved as part of
186 *  interrupt dispatching, then it should be safe to set this to TRUE.
187 *
188 *  Setting this flag to TRUE results in using a different algorithm
189 *  for deciding when to save and restore the floating point context.
190 *  The deferred FP switch algorithm minimizes the number of times
191 *  the FP context is saved and restored.  The FP context is not saved
192 *  until a context switch is made to another, different FP task.
193 *  Thus in a system with only one FP task, the FP context will never
194 *  be saved or restored.
195 */
196
197#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
198
199/*
200 *  Does this port provide a CPU dependent IDLE task implementation?
201 *
202 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
203 *  must be provided and is the default IDLE thread body instead of
204 *  _Internal_threads_Idle_thread_body.
205 *
206 *  If FALSE, then use the generic IDLE thread body if the BSP does
207 *  not provide one.
208 *
209 *  This is intended to allow for supporting processors which have
210 *  a low power or idle mode.  When the IDLE thread is executed, then
211 *  the CPU can be powered down.
212 *
213 *  The order of precedence for selecting the IDLE thread body is:
214 *
215 *    1.  BSP provided
216 *    2.  CPU dependent (if provided)
217 *    3.  generic (if no BSP and no CPU dependent)
218 */
219
220#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
221
222/*
223 *  Does the stack grow up (toward higher addresses) or down
224 *  (toward lower addresses)?
225 *
226 *  If TRUE, then the grows upward.
227 *  If FALSE, then the grows toward smaller addresses.
228 */
229
230#define CPU_STACK_GROWS_UP               FALSE
231
232/*
233 *  The following is the variable attribute used to force alignment
234 *  of critical data structures.  On some processors it may make
235 *  sense to have these aligned on tighter boundaries than
236 *  the minimum requirements of the compiler in order to have as
237 *  much of the critical data area as possible in a cache line.
238 *
239 *  The placement of this macro in the declaration of the variables
240 *  is based on the syntactically requirements of the GNU C
241 *  "__attribute__" extension.  For example with GNU C, use
242 *  the following to force a structures to a 32 byte boundary.
243 *
244 *      __attribute__ ((aligned (32)))
245 *
246 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
247 *         To benefit from using this, the data must be heavily
248 *         used so it will stay in the cache and used frequently enough
249 *         in the executive to justify turning this on.
250 */
251
252#define CPU_STRUCTURE_ALIGNMENT          __attribute__ ((aligned (16)))
253
254/*
255 *  The following defines the number of bits actually used in the
256 *  interrupt field of the task mode.  How those bits map to the
257 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
258 */
259
260#define CPU_MODES_INTERRUPT_MASK   0x0000000F
261
262/*
263 *  Processor defined structures
264 *
265 *  Examples structures include the descriptor tables from the i386
266 *  and the processor control structure on the i960ca.
267 */
268
269/* XXX may need to put some structures here.  */
270
271/*
272 * Contexts
273 *
274 *  Generally there are 2 types of context to save.
275 *     1. Interrupt registers to save
276 *     2. Task level registers to save
277 *
278 *  This means we have the following 3 context items:
279 *     1. task level context stuff::  Context_Control
280 *     2. floating point task stuff:: Context_Control_fp
281 *     3. special interrupt level context :: Context_Control_interrupt
282 *
283 *  On some processors, it is cost-effective to save only the callee
284 *  preserved registers during a task context switch.  This means
285 *  that the ISR code needs to save those registers which do not
286 *  persist across function calls.  It is not mandatory to make this
287 *  distinctions between the caller/callee saves registers for the
288 *  purpose of minimizing context saved during task switch and on interrupts.
289 *  If the cost of saving extra registers is minimal, simplicity is the
290 *  choice.  Save the same context on interrupt entry as for tasks in
291 *  this case.
292 *
293 *  Additionally, if gdb is to be made aware of tasks for this CPU, then
294 *  care should be used in designing the context area.
295 *
296 *  On some CPUs with hardware floating point support, the Context_Control_fp
297 *  structure will not be used or it simply consist of an array of a
298 *  fixed number of bytes.   This is done when the floating point context
299 *  is dumped by a "FP save context" type instruction and the format
300 *  is not really defined by the CPU.  In this case, there is no need
301 *  to figure out the exact format -- only the size.  Of course, although
302 *  this is enough information for context switches, it is probably not
303 *  enough for a debugger such as gdb.  But that is another problem.
304 */
305
306#ifndef ASM
307
308/* XXX */
309typedef struct {
310    unsigned32 g0;
311    unsigned32 g1;
312    unsigned32 g2;
313    unsigned32 g3;
314    unsigned32 g4;
315    unsigned32 g5;
316    unsigned32 g6;
317    unsigned32 g7;
318
319    unsigned32 l0;
320    unsigned32 l1;
321    unsigned32 l2;
322    unsigned32 l3;
323    unsigned32 l4;
324    unsigned32 l5;
325    unsigned32 l6;
326    unsigned32 l7;
327
328    unsigned32 i0;
329    unsigned32 i1;
330    unsigned32 i2;
331    unsigned32 i3;
332    unsigned32 i4;
333    unsigned32 i5;
334    unsigned32 i6;
335    unsigned32 i7;
336
337    unsigned32 o0;
338    unsigned32 o1;
339    unsigned32 o2;
340    unsigned32 o3;
341    unsigned32 o4;
342    unsigned32 o5;
343    unsigned32 o6;
344    unsigned32 o7;
345
346    unsigned32 wim;
347    unsigned32 psr;
348} Context_Control;
349
350#endif /* ASM */
351
352/*
353 *  Offsets of fields with Context_Control for assembly routines.
354 */
355
356#define G0_OFFSET    0x00
357#define G1_OFFSET    0x04
358#define G2_OFFSET    0x08
359#define G3_OFFSET    0x0C
360#define G4_OFFSET    0x10
361#define G5_OFFSET    0x14
362#define G6_OFFSET    0x18
363#define G7_OFFSET    0x1C
364
365#define L0_OFFSET    0x20
366#define L1_OFFSET    0x24
367#define L2_OFFSET    0x28
368#define L3_OFFSET    0x2C
369#define L4_OFFSET    0x30
370#define L5_OFFSET    0x34
371#define L6_OFFSET    0x38
372#define L7_OFFSET    0x3C
373
374#define I0_OFFSET    0x40
375#define I1_OFFSET    0x44
376#define I2_OFFSET    0x48
377#define I3_OFFSET    0x4C
378#define I4_OFFSET    0x50
379#define I5_OFFSET    0x54
380#define I6_OFFSET    0x58
381#define I7_OFFSET    0x5C
382
383#define O0_OFFSET    0x60
384#define O1_OFFSET    0x64
385#define O2_OFFSET    0x68
386#define O3_OFFSET    0x6C
387#define O4_OFFSET    0x70
388#define O5_OFFSET    0x74
389#define O6_OFFSET    0x78
390#define O7_OFFSET    0x7C
391
392#define WIM_OFFSET   0x80
393#define PSR_OFFSET   0x84
394
395#ifndef ASM
396
397/* XXX */
398typedef struct {
399    double      f0_f1;
400    double      f2_f3;
401    double      f4_f5;
402    double      f6_f7;
403    double      f8_f9;
404    double      f10_f11;
405    double      f12_f13;
406    double      f14_f15;
407    double      f16_f17;
408    double      f18_f19;
409    double      f20_f21;
410    double      f22_f23;
411    double      f24_f25;
412    double      f26_f27;
413    double      f28_f29;
414    double      f30_f31;
415    unsigned32  fsr;
416} Context_Control_fp;
417
418#endif /* ASM */
419
420/*
421 *  Offsets of fields with Context_Control_fp for assembly routines.
422 */
423
424#define FO_F1_OFFSET     0x00
425#define F2_F3_OFFSET     0x08
426#define F4_F5_OFFSET     0x10
427#define F6_F7_OFFSET     0x18
428#define F8_F9_OFFSET     0x20
429#define F1O_F11_OFFSET   0x28
430#define F12_F13_OFFSET   0x30
431#define F14_F15_OFFSET   0x38
432#define F16_F17_OFFSET   0x40
433#define F18_F19_OFFSET   0x48
434#define F2O_F21_OFFSET   0x50
435#define F22_F23_OFFSET   0x58
436#define F24_F25_OFFSET   0x60
437#define F26_F27_OFFSET   0x68
438#define F28_F29_OFFSET   0x70
439#define F3O_F31_OFFSET   0x78
440#define FSR_OFFSET       0x80
441
442#ifndef ASM
443
444typedef struct {
445    unsigned32 special_interrupt_register_XXX;
446} CPU_Interrupt_frame;
447
448#endif /* ASM */
449
450/*
451 *  Offsets of fields with CPU_Interrupt_frame for assembly routines.
452 */
453
454#ifndef ASM
455
456/*
457 *  The following table contains the information required to configure
458 *  the XXX processor specific parameters.
459 *
460 *  NOTE: The interrupt_stack_size field is required if
461 *        CPU_ALLOCATE_INTERRUPT_STACK is defined as TRUE.
462 *
463 *        The pretasking_hook, predriver_hook, and postdriver_hook,
464 *        and the do_zero_of_workspace fields are required on ALL CPUs.
465 */
466
467typedef struct {
468  void       (*pretasking_hook)( void );
469  void       (*predriver_hook)( void );
470  void       (*postdriver_hook)( void );
471  void       (*idle_task)( void );
472  boolean      do_zero_of_workspace;
473  unsigned32   interrupt_stack_size;
474  unsigned32   extra_system_initialization_stack;
475  unsigned32   some_other_cpu_dependent_info_XXX;
476}   rtems_cpu_table;
477
478/*
479 *  This variable is optional.  It is used on CPUs on which it is difficult
480 *  to generate an "uninitialized" FP context.  It is filled in by
481 *  _CPU_Initialize and copied into the task's FP context area during
482 *  _CPU_Context_Initialize.
483 */
484
485EXTERN Context_Control_fp  _CPU_Null_fp_context CPU_STRUCTURE_ALIGNMENT;
486
487/*
488 *  On some CPUs, software managed interrupt stack is supported.
489 *  This stack is allocated by the Interrupt Manager and the switch
490 *  is performed in _ISR_Handler.  These variables contain pointers
491 *  to the lowest and highest addresses in the chunk of memory allocated
492 *  for the interrupt stack.  Since it is unknown whether the stack
493 *  grows up or down (in general), this give the CPU dependent
494 *  code the option of picking the version it wants to use.
495 *
496 *  NOTE: These two variables are required if the macro
497 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
498 */
499
500EXTERN void               *_CPU_Interrupt_stack_low;
501EXTERN void               *_CPU_Interrupt_stack_high;
502
503/*
504 *  With some compilation systems, it is difficult if not impossible to
505 *  call a high-level language routine from assembly language.  This
506 *  is especially true of commercial Ada compilers and name mangling
507 *  C++ ones.  This variable can be optionally defined by the CPU porter
508 *  and contains the address of the routine _Thread_Dispatch.  This
509 *  can make it easier to invoke that routine at the end of the interrupt
510 *  sequence (if a dispatch is necessary).
511 */
512
513EXTERN void           (*_CPU_Thread_dispatch_pointer)();
514
515/*
516 *  Nothing prevents the porter from declaring more CPU specific variables.
517 */
518
519/* XXX: if needed, put more variables here */
520
521/*
522 *  The size of the floating point context area.  On some CPUs this
523 *  will not be a "sizeof" because the format of the floating point
524 *  area is not defined -- only the size is.  This is usually on
525 *  CPUs with a "floating point save context" instruction.
526 */
527
528#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
529
530/*
531 *  Amount of extra stack (above minimum stack size) required by
532 *  system initialization thread.  Remember that in a multiprocessor
533 *  system the system intialization thread becomes the MP server thread.
534 */
535
536#define CPU_SYSTEM_INITIALIZATION_THREAD_EXTRA_STACK 1024
537
538/*
539 *  This defines the number of entries in the ISR_Vector_table managed
540 *  by the executive.
541 */
542
543#define CPU_INTERRUPT_NUMBER_OF_VECTORS  255
544
545/*
546 *  Should be large enough to run all tests.  This insures
547 *  that a "reasonable" small application should not have any problems.
548 */
549
550#define CPU_STACK_MINIMUM_SIZE          (1024*2)
551
552/*
553 *  CPU's worst alignment requirement for data types on a byte boundary.  This
554 *  alignment does not take into account the requirements for the stack.
555 */
556
557#define CPU_ALIGNMENT              8
558
559/*
560 *  This number corresponds to the byte alignment requirement for the
561 *  heap handler.  This alignment requirement may be stricter than that
562 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
563 *  common for the heap to follow the same alignment requirement as
564 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
565 *  then this should be set to CPU_ALIGNMENT.
566 *
567 *  NOTE:  This does not have to be a power of 2.  It does have to
568 *         be greater or equal to than CPU_ALIGNMENT.
569 */
570
571#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
572
573/*
574 *  This number corresponds to the byte alignment requirement for memory
575 *  buffers allocated by the partition manager.  This alignment requirement
576 *  may be stricter than that for the data types alignment specified by
577 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
578 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
579 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
580 *
581 *  NOTE:  This does not have to be a power of 2.  It does have to
582 *         be greater or equal to than CPU_ALIGNMENT.
583 */
584
585#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
586
587/*
588 *  This number corresponds to the byte alignment requirement for the
589 *  stack.  This alignment requirement may be stricter than that for the
590 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
591 *  is strict enough for the stack, then this should be set to 0.
592 *
593 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
594 */
595
596#define CPU_STACK_ALIGNMENT        16
597
598#endif  /* ASM */
599
600#ifndef ASM
601
602/* ISR handler macros */
603
604/*
605 *  Disable all interrupts for a critical section.  The previous
606 *  level is returned in _level.
607 */
608
609#define _CPU_ISR_Disable( _level ) \
610  sparc_disable_interrupts( _level )
611 
612/*
613 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
614 *  This indicates the end of a critical section.  The parameter
615 *  _level is not modified.
616 */
617
618#define _CPU_ISR_Enable( _level ) \
619  sparc_enable_interrupts( _level )
620 
621/*
622 *  This temporarily restores the interrupt to _level before immediately
623 *  disabling them again.  This is used to divide long critical
624 *  sections into two or more parts.  The parameter _level is not
625 *  modified.
626 */
627
628#define _CPU_ISR_Flash( _level ) \
629  sparc_flash_interrupts( _level )
630 
631/*
632 *  Map interrupt level in task mode onto the hardware that the CPU
633 *  actually provides.  Currently, interrupt levels which do not
634 *  map onto the CPU in a generic fashion are undefined.  Someday,
635 *  it would be nice if these were "mapped" by the application
636 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
637 *  8 - 255 would be available for bsp/application specific meaning.
638 *  This could be used to manage a programmable interrupt controller
639 *  via the rtems_task_mode directive.
640 */
641
642#define _CPU_ISR_Set_level( _newlevel ) \
643   sparc_set_interrupt_level( _newlevel )
644 
645unsigned32 _CPU_ISR_Get_level( void );
646 
647/* end of ISR handler macros */
648
649/* Context handler macros */
650
651/*
652 *  Initialize the context to a state suitable for starting a
653 *  task after a context restore operation.  Generally, this
654 *  involves:
655 *
656 *     - setting a starting address
657 *     - preparing the stack
658 *     - preparing the stack and frame pointers
659 *     - setting the proper interrupt level in the context
660 *     - initializing the floating point context
661 *
662 *  This routine generally does not set any unnecessary register
663 *  in the context.  The state of the "general data" registers is
664 *  undefined at task start time.
665 *
666 *  NOTE:  Implemented as a subroutine for the SPARC port.
667 */
668
669void _CPU_Context_Initialize(
670  Context_Control  *_the_context,
671  unsigned32       *_stack_base,
672  unsigned32        _size,
673  unsigned32        _new_level,
674  void             *_entry_point
675);
676
677/*
678 *  This routine is responsible for somehow restarting the currently
679 *  executing task.  If you are lucky, then all that is necessary
680 *  is restoring the context.  Otherwise, there will need to be
681 *  a special assembly routine which does something special in this
682 *  case.  Context_Restore should work most of the time.  It will
683 *  not work if restarting self conflicts with the stack frame
684 *  assumptions of restoring a context.
685 */
686
687#define _CPU_Context_Restart_self( _the_context ) \
688   _CPU_Context_restore( (_the_context) );
689
690/*
691 *  The purpose of this macro is to allow the initial pointer into
692 *  a floating point context area (used to save the floating point
693 *  context) to be at an arbitrary place in the floating point
694 *  context area.
695 *
696 *  This is necessary because some FP units are designed to have
697 *  their context saved as a stack which grows into lower addresses.
698 *  Other FP units can be saved by simply moving registers into offsets
699 *  from the base of the context area.  Finally some FP units provide
700 *  a "dump context" instruction which could fill in from high to low
701 *  or low to high based on the whim of the CPU designers.
702 */
703
704#define _CPU_Context_Fp_start( _base, _offset ) \
705   ( (void *) (_base) + (_offset) )
706
707/*
708 *  This routine initializes the FP context area passed to it to.
709 *  There are a few standard ways in which to initialize the
710 *  floating point context.  The code included for this macro assumes
711 *  that this is a CPU in which a "initial" FP context was saved into
712 *  _CPU_Null_fp_context and it simply copies it to the destination
713 *  context passed to it.
714 *
715 *  Other models include (1) not doing anything, and (2) putting
716 *  a "null FP status word" in the correct place in the FP context.
717 */
718
719#define _CPU_Context_Initialize_fp( _destination ) \
720  { \
721   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
722  }
723
724/* end of Context handler macros */
725
726/* Fatal Error manager macros */
727
728/*
729 *  This routine copies _error into a known place -- typically a stack
730 *  location or a register, optionally disables interrupts, and
731 *  halts/stops the CPU.
732 */
733
734#define _CPU_Fatal_halt( _error ) \
735  { \
736  }
737
738/* end of Fatal Error manager macros */
739
740/* Bitfield handler macros */
741
742/*
743 *  This routine sets _output to the bit number of the first bit
744 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
745 *  This type may be either 16 or 32 bits wide although only the 16
746 *  least significant bits will be used.
747 *
748 *  There are a number of variables in using a "find first bit" type
749 *  instruction.
750 *
751 *    (1) What happens when run on a value of zero?
752 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
753 *    (3) The numbering may be zero or one based.
754 *    (4) The "find first bit" instruction may search from MSB or LSB.
755 *
756 *  The executive guarantees that (1) will never happen so it is not a concern.
757 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
758 *  _CPU_Priority_Bits_index().  These three form a set of routines
759 *  which must logically operate together.  Bits in the _value are
760 *  set and cleared based on masks built by _CPU_Priority_mask().
761 *  The basic major and minor values calculated by _Priority_Major()
762 *  and _Priority_Minor() are "massaged" by _CPU_Priority_Bits_index()
763 *  to properly range between the values returned by the "find first bit"
764 *  instruction.  This makes it possible for _Priority_Get_highest() to
765 *  calculate the major and directly index into the minor table.
766 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
767 *  is the first bit found.
768 *
769 *  This entire "find first bit" and mapping process depends heavily
770 *  on the manner in which a priority is broken into a major and minor
771 *  components with the major being the 4 MSB of a priority and minor
772 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
773 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
774 *  to the lowest priority.
775 *
776 *  If your CPU does not have a "find first bit" instruction, then
777 *  there are ways to make do without it.  Here are a handful of ways
778 *  to implement this in software:
779 *
780 *    - a series of 16 bit test instructions
781 *    - a "binary search using if's"
782 *    - _number = 0
783 *      if _value > 0x00ff
784 *        _value >>=8
785 *        _number = 8;
786 *
787 *      if _value > 0x0000f
788 *        _value >=8
789 *        _number += 4
790 *
791 *      _number += bit_set_table[ _value ]
792 *
793 *    where bit_set_table[ 16 ] has values which indicate the first
794 *      bit set
795 */
796
797#ifndef INIT
798  extern const unsigned char __log2table[256];
799#else
800const unsigned char __log2table[256] = {
801    0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
802    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
803    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
804    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
805    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
806    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
807    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
808    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
809    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
810    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
811    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
812    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
813    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
814    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
815    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
816    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
817};
818#endif
819
820#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
821  { \
822    register __value = (_value); \
823    \
824    if ( !(__value & 0xff00) ) \
825      (_output) = __log2table[ __value ]; \
826    else \
827      (_output) = __log2table[ __value >> 8 ] + 8; \
828  }
829
830
831/* end of Bitfield handler macros */
832
833/*
834 *  This routine builds the mask which corresponds to the bit fields
835 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
836 *  for that routine.
837 */
838
839#define _CPU_Priority_Mask( _bit_number ) \
840  ( 0x8000 >> (_bit_number) )
841
842/*
843 *  This routine translates the bit numbers returned by
844 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
845 *  a major or minor component of a priority.  See the discussion
846 *  for that routine.
847 */
848
849#define _CPU_Priority_Bits_index( _priority ) \
850  (15 - (_priority))
851
852/* end of Priority handler macros */
853
854/* functions */
855
856/*
857 *  _CPU_Initialize
858 *
859 *  This routine performs CPU dependent initialization.
860 */
861
862void _CPU_Initialize(
863  rtems_cpu_table  *cpu_table,
864  void            (*thread_dispatch)
865);
866
867/*
868 *  _CPU_ISR_install_vector
869 *
870 *  This routine installs an interrupt vector.
871 */
872
873void _CPU_ISR_install_vector(
874  unsigned32  vector,
875  proc_ptr    new_handler,
876  proc_ptr   *old_handler
877);
878
879/*
880 *  _CPU_Install_interrupt_stack
881 *
882 *  This routine installs the hardware interrupt stack pointer.
883 *
884 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
885 *         is TRUE.
886 */
887
888void _CPU_Install_interrupt_stack( void );
889
890/*
891 *  _CPU_Internal_threads_Idle_thread_body
892 *
893 *  This routine is the CPU dependent IDLE thread body.
894 *
895 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
896 *         is TRUE.
897 */
898
899void _CPU_Internal_threads_Idle_thread_body( void );
900
901/*
902 *  _CPU_Context_switch
903 *
904 *  This routine switches from the run context to the heir context.
905 */
906
907void _CPU_Context_switch(
908  Context_Control  *run,
909  Context_Control  *heir
910);
911
912/*
913 *  _CPU_Context_restore
914 *
915 *  This routine is generallu used only to restart self in an
916 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
917 *
918 *  NOTE: May be unnecessary to reload some registers.
919 */
920
921void _CPU_Context_restore(
922  Context_Control *new_context
923);
924
925/*
926 *  _CPU_Context_save_fp
927 *
928 *  This routine saves the floating point context passed to it.
929 */
930
931void _CPU_Context_save_fp(
932  void **fp_context_ptr
933);
934
935/*
936 *  _CPU_Context_restore_fp
937 *
938 *  This routine restores the floating point context passed to it.
939 */
940
941void _CPU_Context_restore_fp(
942  void **fp_context_ptr
943);
944
945/*  The following routine swaps the endian format of an unsigned int.
946 *  It must be static because it is referenced indirectly.
947 *
948 *  This version will work on any processor, but if there is a better
949 *  way for your CPU PLEASE use it.  The most common way to do this is to:
950 *
951 *     swap least significant two bytes with 16-bit rotate
952 *     swap upper and lower 16-bits
953 *     swap most significant two bytes with 16-bit rotate
954 *
955 *  Some CPUs have special instructions which swap a 32-bit quantity in
956 *  a single instruction (e.g. i486).  It is probably best to avoid
957 *  an "endian swapping control bit" in the CPU.  One good reason is
958 *  that interrupts would probably have to be disabled to insure that
959 *  an interrupt does not try to access the same "chunk" with the wrong
960 *  endian.  Another good reason is that on some CPUs, the endian bit
961 *  endianness for ALL fetches -- both code and data -- so the code
962 *  will be fetched incorrectly.
963 */
964 
965static inline unsigned int CPU_swap_u32(
966  unsigned int value
967)
968{
969  unsigned32 byte1, byte2, byte3, byte4, swapped;
970 
971  byte4 = (value >> 24) & 0xff;
972  byte3 = (value >> 16) & 0xff;
973  byte2 = (value >> 8)  & 0xff;
974  byte1 =  value        & 0xff;
975 
976  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
977  return( swapped );
978}
979
980#endif ASM
981
982#ifdef __cplusplus
983}
984#endif
985
986#endif
Note: See TracBrowser for help on using the repository browser.