source: rtems/c/src/exec/score/cpu/sh/rtems/score/cpu.h @ 458bd34

4.104.114.84.95
Last change on this file since 458bd34 was 458bd34, checked in by Joel Sherrill <joel.sherrill@…>, on 11/05/99 at 16:44:02

This is another pass at making sure that nothing outside the BSP
unnecessarily uses any variables defined by the BSP. On this
sweep, use of BSP_Configuration and Cpu_table was eliminated.

A significant part of this modification was the addition of
macros to access fields in the RTEMS configuration structures.

This is necessary to strengthen the division between the BSP independent
parts of RTEMS and the BSPs themselves. This started after
comments and analysis by Ralf Corsepius <corsepiu@…>.

  • Property mode set to 100644
File size: 27.9 KB
RevLine 
[7908ba5b]1/*
2 *  This include file contains information pertaining to the Hitachi SH
3 *  processor.
4 *
5 *  Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and
6 *           Bernd Becker (becker@faw.uni-ulm.de)
7 *
8 *  COPYRIGHT (c) 1997-1998, FAW Ulm, Germany
9 *
10 *  This program is distributed in the hope that it will be useful,
11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 *
15 *  COPYRIGHT (c) 1998.
16 *  On-Line Applications Research Corporation (OAR).
17 *  Copyright assigned to U.S. Government, 1994.
18 *
19 *  The license and distribution terms for this file may be
20 *  found in the file LICENSE in this distribution or at
21 *  http://www.OARcorp.com/rtems/license.html.
22 *
23 *  $Id$
24 */
25
26#ifndef _SH_CPU_h
27#define _SH_CPU_h
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33#include <rtems/score/sh.h>              /* pick up machine definitions */
34#ifndef ASM
35#include <rtems/score/shtypes.h>
36#endif
37
38/* conditional compilation parameters */
39
40/*
41 *  Should the calls to _Thread_Enable_dispatch be inlined?
42 *
43 *  If TRUE, then they are inlined.
44 *  If FALSE, then a subroutine call is made.
45 *
46 *  Basically this is an example of the classic trade-off of size
47 *  versus speed.  Inlining the call (TRUE) typically increases the
48 *  size of RTEMS while speeding up the enabling of dispatching.
49 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
50 *  only be 0 or 1 unless you are in an interrupt handler and that
51 *  interrupt handler invokes the executive.]  When not inlined
52 *  something calls _Thread_Enable_dispatch which in turns calls
53 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
54 *  one subroutine call is avoided entirely.]
55 */
56
57#define CPU_INLINE_ENABLE_DISPATCH       FALSE
58
59/*
60 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
61 *  be unrolled one time?  In unrolled each iteration of the loop examines
62 *  two "nodes" on the chain being searched.  Otherwise, only one node
63 *  is examined per iteration.
64 *
65 *  If TRUE, then the loops are unrolled.
66 *  If FALSE, then the loops are not unrolled.
67 *
68 *  The primary factor in making this decision is the cost of disabling
69 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
70 *  body of the loop.  On some CPUs, the flash is more expensive than
71 *  one iteration of the loop body.  In this case, it might be desirable
72 *  to unroll the loop.  It is important to note that on some CPUs, this
73 *  code is the longest interrupt disable period in RTEMS.  So it is
74 *  necessary to strike a balance when setting this parameter.
75 */
76
77#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
78
79/*
80 *  Does RTEMS manage a dedicated interrupt stack in software?
81 *
82 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
83 *  If FALSE, nothing is done.
84 *
85 *  If the CPU supports a dedicated interrupt stack in hardware,
86 *  then it is generally the responsibility of the BSP to allocate it
87 *  and set it up.
88 *
89 *  If the CPU does not support a dedicated interrupt stack, then
90 *  the porter has two options: (1) execute interrupts on the
91 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
92 *  interrupt stack.
93 *
94 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
95 *
96 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
97 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
98 *  possible that both are FALSE for a particular CPU.  Although it
99 *  is unclear what that would imply about the interrupt processing
100 *  procedure on that CPU.
101 */
102
103#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
104#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
105
106/*
107 * We define the interrupt stack in the linker script
108 */
109#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
110
111/*
112 *  Does the RTEMS invoke the user's ISR with the vector number and
113 *  a pointer to the saved interrupt frame (1) or just the vector
114 *  number (0)?
115 */
116
117#define CPU_ISR_PASSES_FRAME_POINTER 0
118
119/*
120 *  Does the CPU have hardware floating point?
121 *
122 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
123 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
124 *
125 *  We currently support sh1 only, which has no FPU, other SHes have an FPU
126 *
127 *  The macro name "NO_CPU_HAS_FPU" should be made CPU specific.
128 *  It indicates whether or not this CPU model has FP support.  For
129 *  example, it would be possible to have an i386_nofp CPU model
130 *  which set this to false to indicate that you have an i386 without
131 *  an i387 and wish to leave floating point support out of RTEMS.
132 */
133
134#define CPU_HARDWARE_FP     FALSE
135
136/*
137 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
138 *
139 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
140 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
141 *
142 *  So far, the only CPU in which this option has been used is the
143 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
144 *  floating point registers to perform integer multiplies.  If
145 *  a function which you would not think utilize the FP unit DOES,
146 *  then one can not easily predict which tasks will use the FP hardware.
147 *  In this case, this option should be TRUE.
148 *
149 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
150 */
151
152#define CPU_ALL_TASKS_ARE_FP     FALSE
153
154/*
155 *  Should the IDLE task have a floating point context?
156 *
157 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
158 *  and it has a floating point context which is switched in and out.
159 *  If FALSE, then the IDLE task does not have a floating point context.
160 *
161 *  Setting this to TRUE negatively impacts the time required to preempt
162 *  the IDLE task from an interrupt because the floating point context
163 *  must be saved as part of the preemption.
164 */
165
166#define CPU_IDLE_TASK_IS_FP      FALSE
167
168/*
169 *  Should the saving of the floating point registers be deferred
170 *  until a context switch is made to another different floating point
171 *  task?
172 *
173 *  If TRUE, then the floating point context will not be stored until
174 *  necessary.  It will remain in the floating point registers and not
175 *  disturned until another floating point task is switched to.
176 *
177 *  If FALSE, then the floating point context is saved when a floating
178 *  point task is switched out and restored when the next floating point
179 *  task is restored.  The state of the floating point registers between
180 *  those two operations is not specified.
181 *
182 *  If the floating point context does NOT have to be saved as part of
183 *  interrupt dispatching, then it should be safe to set this to TRUE.
184 *
185 *  Setting this flag to TRUE results in using a different algorithm
186 *  for deciding when to save and restore the floating point context.
187 *  The deferred FP switch algorithm minimizes the number of times
188 *  the FP context is saved and restored.  The FP context is not saved
189 *  until a context switch is made to another, different FP task.
190 *  Thus in a system with only one FP task, the FP context will never
191 *  be saved or restored.
192 */
193
194#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
195
196/*
197 *  Does this port provide a CPU dependent IDLE task implementation?
198 *
199 *  If TRUE, then the routine _CPU_Thread_Idle_body
200 *  must be provided and is the default IDLE thread body instead of
201 *  _CPU_Thread_Idle_body.
202 *
203 *  If FALSE, then use the generic IDLE thread body if the BSP does
204 *  not provide one.
205 *
206 *  This is intended to allow for supporting processors which have
207 *  a low power or idle mode.  When the IDLE thread is executed, then
208 *  the CPU can be powered down.
209 *
210 *  The order of precedence for selecting the IDLE thread body is:
211 *
212 *    1.  BSP provided
213 *    2.  CPU dependent (if provided)
214 *    3.  generic (if no BSP and no CPU dependent)
215 */
216
217#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
218
219/*
220 *  Does the stack grow up (toward higher addresses) or down
221 *  (toward lower addresses)?
222 *
223 *  If TRUE, then the grows upward.
224 *  If FALSE, then the grows toward smaller addresses.
225 */
226
227#define CPU_STACK_GROWS_UP               FALSE
228
229/*
230 *  The following is the variable attribute used to force alignment
231 *  of critical RTEMS structures.  On some processors it may make
232 *  sense to have these aligned on tighter boundaries than
233 *  the minimum requirements of the compiler in order to have as
234 *  much of the critical data area as possible in a cache line.
235 *
236 *  The placement of this macro in the declaration of the variables
237 *  is based on the syntactically requirements of the GNU C
238 *  "__attribute__" extension.  For example with GNU C, use
239 *  the following to force a structures to a 32 byte boundary.
240 *
241 *      __attribute__ ((aligned (32)))
242 *
243 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
244 *         To benefit from using this, the data must be heavily
245 *         used so it will stay in the cache and used frequently enough
246 *         in the executive to justify turning this on.
247 */
248
249#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned(16)))
250
251/*
252 *  Define what is required to specify how the network to host conversion
253 *  routines are handled.
254 *
255 *  NOTE: SHes can be big or little endian, the default is big endian
256 */
257
[6805640e]258#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
[7908ba5b]259
260/* __LITTLE_ENDIAN__ is defined if -ml is given to gcc */
261#if defined(__LITTLE_ENDIAN__)
262#define CPU_BIG_ENDIAN                           FALSE
263#define CPU_LITTLE_ENDIAN                        TRUE
264#else
265#define CPU_BIG_ENDIAN                           TRUE
266#define CPU_LITTLE_ENDIAN                        FALSE
267#endif
268 
269/*
270 *  The following defines the number of bits actually used in the
271 *  interrupt field of the task mode.  How those bits map to the
272 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
273 */
274
275#define CPU_MODES_INTERRUPT_MASK   0x0000000f
276
277/*
278 *  Processor defined structures
279 *
280 *  Examples structures include the descriptor tables from the i386
281 *  and the processor control structure on the i960ca.
282 */
283
284/* may need to put some structures here.  */
285
286/*
287 * Contexts
288 *
289 *  Generally there are 2 types of context to save.
290 *     1. Interrupt registers to save
291 *     2. Task level registers to save
292 *
293 *  This means we have the following 3 context items:
294 *     1. task level context stuff::  Context_Control
295 *     2. floating point task stuff:: Context_Control_fp
296 *     3. special interrupt level context :: Context_Control_interrupt
297 *
298 *  On some processors, it is cost-effective to save only the callee
299 *  preserved registers during a task context switch.  This means
300 *  that the ISR code needs to save those registers which do not
301 *  persist across function calls.  It is not mandatory to make this
302 *  distinctions between the caller/callee saves registers for the
303 *  purpose of minimizing context saved during task switch and on interrupts.
304 *  If the cost of saving extra registers is minimal, simplicity is the
305 *  choice.  Save the same context on interrupt entry as for tasks in
306 *  this case.
307 *
308 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
309 *  care should be used in designing the context area.
310 *
311 *  On some CPUs with hardware floating point support, the Context_Control_fp
312 *  structure will not be used or it simply consist of an array of a
313 *  fixed number of bytes.   This is done when the floating point context
314 *  is dumped by a "FP save context" type instruction and the format
315 *  is not really defined by the CPU.  In this case, there is no need
316 *  to figure out the exact format -- only the size.  Of course, although
317 *  this is enough information for RTEMS, it is probably not enough for
318 *  a debugger such as gdb.  But that is another problem.
319 */
320
321typedef struct {
322  unsigned32 *r15;      /* stack pointer */
323
324  unsigned32 macl;
325  unsigned32 mach;
326  unsigned32 *pr;
327
328  unsigned32 *r14;      /* frame pointer/call saved */
329
330  unsigned32 r13;       /* call saved */
331  unsigned32 r12;       /* call saved */
332  unsigned32 r11;       /* call saved */
333  unsigned32 r10;       /* call saved */
334  unsigned32 r9;        /* call saved */
335  unsigned32 r8;        /* call saved */
336
337  unsigned32 *r7;       /* arg in */
338  unsigned32 *r6;       /* arg in */
339
340#if 0
341  unsigned32 *r5;       /* arg in */
342  unsigned32 *r4;       /* arg in */
343#endif
344
345  unsigned32 *r3;       /* scratch */
346  unsigned32 *r2;       /* scratch */
347  unsigned32 *r1;       /* scratch */
348
349  unsigned32 *r0;       /* arg return */
350
351  unsigned32 gbr;
352  unsigned32 sr;
353
354} Context_Control;
355
356typedef struct {
357} Context_Control_fp;
358
359typedef struct {
360} CPU_Interrupt_frame;
361
362
363/*
364 *  The following table contains the information required to configure
365 *  the SH processor specific parameters.
366 */
367
368typedef struct {
369  void       (*pretasking_hook)( void );
370  void       (*predriver_hook)( void );
371  void       (*postdriver_hook)( void );
372  void       (*idle_task)( void );
373  boolean      do_zero_of_workspace;
374  unsigned32   idle_task_stack_size;
375  unsigned32   interrupt_stack_size;
376  unsigned32   extra_mpci_receive_server_stack;
377  void *     (*stack_allocate_hook)( unsigned32 );
378  void       (*stack_free_hook)( void* );
379  /* end of fields required on all CPUs */
380}   rtems_cpu_table;
381
[458bd34]382/*
383 *  Macros to access required entires in the CPU Table are in
384 *  the file rtems/system.h.
385 */
386
387/*
388 *  Macros to access SH specific additions to the CPU Table
389 */
390
391/* There are no CPU specific additions to the CPU Table for this port. */
392
[7908ba5b]393/*
394 *  This variable is optional.  It is used on CPUs on which it is difficult
395 *  to generate an "uninitialized" FP context.  It is filled in by
396 *  _CPU_Initialize and copied into the task's FP context area during
397 *  _CPU_Context_Initialize.
398 */
399
400/*
401SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
402*/
403
404/*
405 *  On some CPUs, RTEMS supports a software managed interrupt stack.
406 *  This stack is allocated by the Interrupt Manager and the switch
407 *  is performed in _ISR_Handler.  These variables contain pointers
408 *  to the lowest and highest addresses in the chunk of memory allocated
409 *  for the interrupt stack.  Since it is unknown whether the stack
410 *  grows up or down (in general), this give the CPU dependent
411 *  code the option of picking the version it wants to use.
412 *
413 *  NOTE: These two variables are required if the macro
414 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
415 */
416
417SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
418SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
419
420/*
421 *  With some compilation systems, it is difficult if not impossible to
422 *  call a high-level language routine from assembly language.  This
423 *  is especially true of commercial Ada compilers and name mangling
424 *  C++ ones.  This variable can be optionally defined by the CPU porter
425 *  and contains the address of the routine _Thread_Dispatch.  This
426 *  can make it easier to invoke that routine at the end of the interrupt
427 *  sequence (if a dispatch is necessary).
428 */
429
430SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
431
432/*
433 *  Nothing prevents the porter from declaring more CPU specific variables.
434 */
435
436/* XXX: if needed, put more variables here */
437
438/*
439 *  The size of the floating point context area.  On some CPUs this
440 *  will not be a "sizeof" because the format of the floating point
441 *  area is not defined -- only the size is.  This is usually on
442 *  CPUs with a "floating point save context" instruction.
443 */
444
445#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
446
447/*
448 *  Amount of extra stack (above minimum stack size) required by
449 *  MPCI receive server thread.  Remember that in a multiprocessor
450 *  system this thread must exist and be able to process all directives.
451 */
452
453#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
454
455/*
456 *  This defines the number of entries in the ISR_Vector_table managed
457 *  by RTEMS.
458 */
459
460#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
461#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
462
463/*
464 *  Should be large enough to run all RTEMS tests.  This insures
465 *  that a "reasonable" small application should not have any problems.
466 *
467 *  We have been able to run the sptests with this value, but have not
468 *  been able to run the tmtest suite.
469 */
470
471#define CPU_STACK_MINIMUM_SIZE          4096
472
473/*
474 *  CPU's worst alignment requirement for data types on a byte boundary.  This
475 *  alignment does not take into account the requirements for the stack.
476 */
477
478#define CPU_ALIGNMENT              4
479
480/*
481 *  This number corresponds to the byte alignment requirement for the
482 *  heap handler.  This alignment requirement may be stricter than that
483 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
484 *  common for the heap to follow the same alignment requirement as
485 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
486 *  then this should be set to CPU_ALIGNMENT.
487 *
488 *  NOTE:  This does not have to be a power of 2.  It does have to
489 *         be greater or equal to than CPU_ALIGNMENT.
490 */
491
492#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
493
494/*
495 *  This number corresponds to the byte alignment requirement for memory
496 *  buffers allocated by the partition manager.  This alignment requirement
497 *  may be stricter than that for the data types alignment specified by
498 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
499 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
500 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
501 *
502 *  NOTE:  This does not have to be a power of 2.  It does have to
503 *         be greater or equal to than CPU_ALIGNMENT.
504 */
505
506#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
507
508/*
509 *  This number corresponds to the byte alignment requirement for the
510 *  stack.  This alignment requirement may be stricter than that for the
511 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
512 *  is strict enough for the stack, then this should be set to 0.
513 *
514 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
515 */
516
517#define CPU_STACK_ALIGNMENT        CPU_ALIGNMENT
518
519/* ISR handler macros */
520
521/*
522 *  Disable all interrupts for an RTEMS critical section.  The previous
523 *  level is returned in _level.
524 */
525
526#define _CPU_ISR_Disable( _level) \
527  sh_disable_interrupts( _level )
528
529/*
530 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
531 *  This indicates the end of an RTEMS critical section.  The parameter
532 *  _level is not modified.
533 */
534
535#define _CPU_ISR_Enable( _level) \
536   sh_enable_interrupts( _level)
537
538/*
539 *  This temporarily restores the interrupt to _level before immediately
540 *  disabling them again.  This is used to divide long RTEMS critical
541 *  sections into two or more parts.  The parameter _level is not
542 * modified.
543 */
544
545#define _CPU_ISR_Flash( _level) \
546  sh_flash_interrupts( _level)
547
548/*
549 *  Map interrupt level in task mode onto the hardware that the CPU
550 *  actually provides.  Currently, interrupt levels which do not
551 *  map onto the CPU in a generic fashion are undefined.  Someday,
552 *  it would be nice if these were "mapped" by the application
553 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
554 *  8 - 255 would be available for bsp/application specific meaning.
555 *  This could be used to manage a programmable interrupt controller
556 *  via the rtems_task_mode directive.
557 */
558
559#define _CPU_ISR_Set_level( _newlevel) \
560  sh_set_interrupt_level(_newlevel)
561
562unsigned32 _CPU_ISR_Get_level( void );
563
564/* end of ISR handler macros */
565
566/* Context handler macros */
567
568/*
569 *  Initialize the context to a state suitable for starting a
570 *  task after a context restore operation.  Generally, this
571 *  involves:
572 *
573 *     - setting a starting address
574 *     - preparing the stack
575 *     - preparing the stack and frame pointers
576 *     - setting the proper interrupt level in the context
577 *     - initializing the floating point context
578 *
579 *  This routine generally does not set any unnecessary register
580 *  in the context.  The state of the "general data" registers is
581 *  undefined at task start time.
582 *
583 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
584 *        point thread.  This is typically only used on CPUs where the
585 *        FPU may be easily disabled by software such as on the SPARC
586 *        where the PSR contains an enable FPU bit.
587 */
588
589/*
590 * FIXME: defined as a function for debugging - should be a macro
591 */
592SCORE_EXTERN void _CPU_Context_Initialize(
593  Context_Control       *_the_context,
594  void                  *_stack_base,
595  unsigned32            _size,
596  unsigned32            _isr,
597  void    (*_entry_point)(void),
598  int                   _is_fp );
599
600/*
601 *  This routine is responsible for somehow restarting the currently
602 *  executing task.  If you are lucky, then all that is necessary
603 *  is restoring the context.  Otherwise, there will need to be
604 *  a special assembly routine which does something special in this
605 *  case.  Context_Restore should work most of the time.  It will
606 *  not work if restarting self conflicts with the stack frame
607 *  assumptions of restoring a context.
608 */
609
610#define _CPU_Context_Restart_self( _the_context ) \
611   _CPU_Context_restore( (_the_context) );
612
613/*
614 *  The purpose of this macro is to allow the initial pointer into
615 *  a floating point context area (used to save the floating point
616 *  context) to be at an arbitrary place in the floating point
617 *  context area.
618 *
619 *  This is necessary because some FP units are designed to have
620 *  their context saved as a stack which grows into lower addresses.
621 *  Other FP units can be saved by simply moving registers into offsets
622 *  from the base of the context area.  Finally some FP units provide
623 *  a "dump context" instruction which could fill in from high to low
624 *  or low to high based on the whim of the CPU designers.
625 */
626
627#define _CPU_Context_Fp_start( _base, _offset ) \
628   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
629
630/*
631 *  This routine initializes the FP context area passed to it to.
632 *  There are a few standard ways in which to initialize the
633 *  floating point context.  The code included for this macro assumes
634 *  that this is a CPU in which a "initial" FP context was saved into
635 *  _CPU_Null_fp_context and it simply copies it to the destination
636 *  context passed to it.
637 *
638 *  Other models include (1) not doing anything, and (2) putting
639 *  a "null FP status word" in the correct place in the FP context.
640 *  SH has no FPU !!!!!!!!!!!!
641 */
642
643#define _CPU_Context_Initialize_fp( _destination ) \
644  {  }
645
646/* end of Context handler macros */
647
648/* Fatal Error manager macros */
649
650/*
651 * FIXME: Trap32 ???
652 *
653 *  This routine copies _error into a known place -- typically a stack
654 *  location or a register, optionally disables interrupts, and
655 *  invokes a Trap32 Instruction which returns to the breakpoint
656 *  routine of cmon.
657 */
658
659#ifdef BSP_FATAL_HALT
660  /* we manage the fatal error in the board support package */
661  void bsp_fatal_halt( unsigned32 _error);
662#define _CPU_Fatal_halt( _error ) bsp_fatal_halt( _error)
663#else
664#define _CPU_Fatal_halt( _error)\
665{ \
666  asm volatile("mov.l %0,r0"::"m" (_error)); \
667  asm volatile("trapa #34"); \
668}
669#endif
670
671/* end of Fatal Error manager macros */
672
673/* Bitfield handler macros */
674
675/*
676 *  This routine sets _output to the bit number of the first bit
677 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
678 *  This type may be either 16 or 32 bits wide although only the 16
679 *  least significant bits will be used.
680 *
681 *  There are a number of variables in using a "find first bit" type
682 *  instruction.
683 *
684 *    (1) What happens when run on a value of zero?
685 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
686 *    (3) The numbering may be zero or one based.
687 *    (4) The "find first bit" instruction may search from MSB or LSB.
688 *
689 *  RTEMS guarantees that (1) will never happen so it is not a concern.
690 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
691 *  _CPU_Priority_bits_index().  These three form a set of routines
692 *  which must logically operate together.  Bits in the _value are
693 *  set and cleared based on masks built by _CPU_Priority_mask().
694 *  The basic major and minor values calculated by _Priority_Major()
695 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
696 *  to properly range between the values returned by the "find first bit"
697 *  instruction.  This makes it possible for _Priority_Get_highest() to
698 *  calculate the major and directly index into the minor table.
699 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
700 *  is the first bit found.
701 *
702 *  This entire "find first bit" and mapping process depends heavily
703 *  on the manner in which a priority is broken into a major and minor
704 *  components with the major being the 4 MSB of a priority and minor
705 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
706 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
707 *  to the lowest priority.
708 *
709 *  If your CPU does not have a "find first bit" instruction, then
710 *  there are ways to make do without it.  Here are a handful of ways
711 *  to implement this in software:
712 *
713 *    - a series of 16 bit test instructions
714 *    - a "binary search using if's"
715 *    - _number = 0
716 *      if _value > 0x00ff
717 *        _value >>=8
718 *        _number = 8;
719 *
720 *      if _value > 0x0000f
721 *        _value >=8
722 *        _number += 4
723 *
724 *      _number += bit_set_table[ _value ]
725 *
726 *    where bit_set_table[ 16 ] has values which indicate the first
727 *      bit set
728 */
729
730#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
731#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
732
733#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
734
735extern unsigned8 _bit_set_table[];
736
737#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
738  { \
739      _output = 0;\
740      if(_value > 0x00ff) \
741      { _value >>= 8; _output = 8; } \
742      if(_value > 0x000f) \
743        { _output += 4; _value >>= 4; } \
744      _output += _bit_set_table[ _value]; }
745
746#endif
747
748/* end of Bitfield handler macros */
749
750/*
751 *  This routine builds the mask which corresponds to the bit fields
752 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
753 *  for that routine.
754 */
755
756#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
757
758#define _CPU_Priority_Mask( _bit_number ) \
759  ( 1 << (_bit_number) )
760
761#endif
762
763/*
764 *  This routine translates the bit numbers returned by
765 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
766 *  a major or minor component of a priority.  See the discussion
767 *  for that routine.
768 */
769
770#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
771
772#define _CPU_Priority_bits_index( _priority ) \
773  (_priority)
774
775#endif
776
777/* end of Priority handler macros */
778
779/* functions */
780
781/*
782 *  _CPU_Initialize
783 *
784 *  This routine performs CPU dependent initialization.
785 */
786
787void _CPU_Initialize(
788  rtems_cpu_table  *cpu_table,
789  void      (*thread_dispatch)
790);
791
792/*
793 *  _CPU_ISR_install_raw_handler
794 *
795 *  This routine installs a "raw" interrupt handler directly into the
796 *  processor's vector table.
797 */
798 
799void _CPU_ISR_install_raw_handler(
800  unsigned32  vector,
801  proc_ptr    new_handler,
802  proc_ptr   *old_handler
803);
804
805/*
806 *  _CPU_ISR_install_vector
807 *
808 *  This routine installs an interrupt vector.
809 */
810
811void _CPU_ISR_install_vector(
812  unsigned32  vector,
813  proc_ptr    new_handler,
814  proc_ptr   *old_handler
815);
816
817/*
818 *  _CPU_Install_interrupt_stack
819 *
820 *  This routine installs the hardware interrupt stack pointer.
821 *
822 *  NOTE:  It needs only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
823 *         is TRUE.
824 */
825
826void _CPU_Install_interrupt_stack( void );
827
828/*
829 *  _CPU_Thread_Idle_body
830 *
831 *  This routine is the CPU dependent IDLE thread body.
832 *
833 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
834 *         is TRUE.
835 */
836
837void _CPU_Thread_Idle_body( void );
838
839/*
840 *  _CPU_Context_switch
841 *
842 *  This routine switches from the run context to the heir context.
843 */
844
845void _CPU_Context_switch(
846  Context_Control  *run,
847  Context_Control  *heir
848);
849
850/*
851 *  _CPU_Context_restore
852 *
853 *  This routine is generally used only to restart self in an
854 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
855 */
856
857void _CPU_Context_restore(
858  Context_Control *new_context
859);
860
861/*
862 *  _CPU_Context_save_fp
863 *
864 *  This routine saves the floating point context passed to it.
865 */
866
867void _CPU_Context_save_fp(
868  void **fp_context_ptr
869);
870
871/*
872 *  _CPU_Context_restore_fp
873 *
874 *  This routine restores the floating point context passed to it.
875 */
876
877void _CPU_Context_restore_fp(
878  void **fp_context_ptr
879);
880
881
882#ifdef __cplusplus
883}
884#endif
885
886#endif
Note: See TracBrowser for help on using the repository browser.