source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ da215ded

4.104.114.84.95
Last change on this file since da215ded was da215ded, checked in by Ralf Corsepius <ralf.corsepius@…>, on 11/02/04 at 07:38:50

2004-10-02 Ralf Corsepius <ralf_corsepius@…>

  • rtems/score/arm.h: Add doxygen preamble.
  • rtems/score/cpu.h: Add doxygen preamble.
  • rtems/score/cpu_asm.h: Add doxygen preamble.
  • rtems/score/types.h: Add doxygen preamble.
  • Property mode set to 100644
File size: 29.7 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * $Id$
7 *
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2002 Advent Networks, Inc.
12 *        Jay Monkman <jmonkman@adventnetworks.com>
13 *
14 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
15 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
16 *
17 *  The license and distribution terms for this file may be
18 *  found in the file LICENSE in this distribution or at
19 *  http://www.rtems.com/license/LICENSE.
20 *
21 */
22
23/* FIXME: finish commenting/cleaning up this file */
24#ifndef __CPU_h
25#define __CPU_h
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31#include <rtems/score/arm.h>            /* pick up machine definitions */
32#ifndef ASM
33#include <rtems/score/types.h>
34#endif
35
36/* conditional compilation parameters */
37
38/*
39 *  Should the calls to _Thread_Enable_dispatch be inlined?
40 *
41 *  If TRUE, then they are inlined.
42 *  If FALSE, then a subroutine call is made.
43 *
44 *  Basically this is an example of the classic trade-off of size
45 *  versus speed.  Inlining the call (TRUE) typically increases the
46 *  size of RTEMS while speeding up the enabling of dispatching.
47 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
48 *  only be 0 or 1 unless you are in an interrupt handler and that
49 *  interrupt handler invokes the executive.]  When not inlined
50 *  something calls _Thread_Enable_dispatch which in turns calls
51 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
52 *  one subroutine call is avoided entirely.]
53 */
54
55#define CPU_INLINE_ENABLE_DISPATCH       TRUE
56
57/*
58 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
59 *  be unrolled one time?  In unrolled each iteration of the loop examines
60 *  two "nodes" on the chain being searched.  Otherwise, only one node
61 *  is examined per iteration.
62 *
63 *  If TRUE, then the loops are unrolled.
64 *  If FALSE, then the loops are not unrolled.
65 *
66 *  The primary factor in making this decision is the cost of disabling
67 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
68 *  body of the loop.  On some CPUs, the flash is more expensive than
69 *  one iteration of the loop body.  In this case, it might be desirable
70 *  to unroll the loop.  It is important to note that on some CPUs, this
71 *  code is the longest interrupt disable period in RTEMS.  So it is
72 *  necessary to strike a balance when setting this parameter.
73 */
74
75#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
76
77/*
78 *  Does RTEMS manage a dedicated interrupt stack in software?
79 *
80 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
81 *  If FALSE, nothing is done.
82 *
83 *  If the CPU supports a dedicated interrupt stack in hardware,
84 *  then it is generally the responsibility of the BSP to allocate it
85 *  and set it up.
86 *
87 *  If the CPU does not support a dedicated interrupt stack, then
88 *  the porter has two options: (1) execute interrupts on the
89 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
90 *  interrupt stack.
91 *
92 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
93 *
94 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
95 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
96 *  possible that both are FALSE for a particular CPU.  Although it
97 *  is unclear what that would imply about the interrupt processing
98 *  procedure on that CPU.
99 */
100
101#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
102
103/*
104 *  Does this CPU have hardware support for a dedicated interrupt stack?
105 *
106 *  If TRUE, then it must be installed during initialization.
107 *  If FALSE, then no installation is performed.
108 *
109 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
110 *
111 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
112 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
113 *  possible that both are FALSE for a particular CPU.  Although it
114 *  is unclear what that would imply about the interrupt processing
115 *  procedure on that CPU.
116 */
117
118#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
119
120/*
121 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
122 *
123 *  If TRUE, then the memory is allocated during initialization.
124 *  If FALSE, then the memory is allocated during initialization.
125 *
126 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
127 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
128 */
129
130#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
131
132/*
133 *  Does the RTEMS invoke the user's ISR with the vector number and
134 *  a pointer to the saved interrupt frame (1) or just the vector
135 *  number (0)?
136 */
137
138#define CPU_ISR_PASSES_FRAME_POINTER 0
139
140/*
141 *  Does the CPU have hardware floating point?
142 *
143 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
144 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
145 *
146 *  If there is a FP coprocessor such as the i387 or mc68881, then
147 *  the answer is TRUE.
148 *
149 *  The macro name "ARM_HAS_FPU" should be made CPU specific.
150 *  It indicates whether or not this CPU model has FP support.  For
151 *  example, it would be possible to have an i386_nofp CPU model
152 *  which set this to false to indicate that you have an i386 without
153 *  an i387 and wish to leave floating point support out of RTEMS.
154 */
155
156#if ( ARM_HAS_FPU == 1 )
157#define CPU_HARDWARE_FP     TRUE
158#else
159#define CPU_HARDWARE_FP     FALSE
160#endif
161
162#define CPU_SOFTWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 */
172
173#define CPU_ALL_TASKS_ARE_FP     FALSE
174
175/*
176 *  Should the IDLE task have a floating point context?
177 *
178 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
179 *  and it has a floating point context which is switched in and out.
180 *  If FALSE, then the IDLE task does not have a floating point context.
181 *
182 *  Setting this to TRUE negatively impacts the time required to preempt
183 *  the IDLE task from an interrupt because the floating point context
184 *  must be saved as part of the preemption.
185 */
186
187#define CPU_IDLE_TASK_IS_FP      FALSE
188
189/*
190 *  Should the saving of the floating point registers be deferred
191 *  until a context switch is made to another different floating point
192 *  task?
193 *
194 *  If TRUE, then the floating point context will not be stored until
195 *  necessary.  It will remain in the floating point registers and not
196 *  disturned until another floating point task is switched to.
197 *
198 *  If FALSE, then the floating point context is saved when a floating
199 *  point task is switched out and restored when the next floating point
200 *  task is restored.  The state of the floating point registers between
201 *  those two operations is not specified.
202 *
203 *  If the floating point context does NOT have to be saved as part of
204 *  interrupt dispatching, then it should be safe to set this to TRUE.
205 *
206 *  Setting this flag to TRUE results in using a different algorithm
207 *  for deciding when to save and restore the floating point context.
208 *  The deferred FP switch algorithm minimizes the number of times
209 *  the FP context is saved and restored.  The FP context is not saved
210 *  until a context switch is made to another, different FP task.
211 *  Thus in a system with only one FP task, the FP context will never
212 *  be saved or restored.
213 */
214
215#define CPU_USE_DEFERRED_FP_SWITCH   FALSE
216
217/*
218 *  Does this port provide a CPU dependent IDLE task implementation?
219 *
220 *  If TRUE, then the routine _CPU_Thread_Idle_body
221 *  must be provided and is the default IDLE thread body instead of
222 *  _CPU_Thread_Idle_body.
223 *
224 *  If FALSE, then use the generic IDLE thread body if the BSP does
225 *  not provide one.
226 *
227 *  This is intended to allow for supporting processors which have
228 *  a low power or idle mode.  When the IDLE thread is executed, then
229 *  the CPU can be powered down.
230 *
231 *  The order of precedence for selecting the IDLE thread body is:
232 *
233 *    1.  BSP provided
234 *    2.  CPU dependent (if provided)
235 *    3.  generic (if no BSP and no CPU dependent)
236 */
237
238#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
239
240/*
241 *  Does the stack grow up (toward higher addresses) or down
242 *  (toward lower addresses)?
243 *
244 *  If TRUE, then the grows upward.
245 *  If FALSE, then the grows toward smaller addresses.
246 */
247
248#define CPU_STACK_GROWS_UP               FALSE
249
250/*
251 *  The following is the variable attribute used to force alignment
252 *  of critical RTEMS structures.  On some processors it may make
253 *  sense to have these aligned on tighter boundaries than
254 *  the minimum requirements of the compiler in order to have as
255 *  much of the critical data area as possible in a cache line.
256 *
257 *  The placement of this macro in the declaration of the variables
258 *  is based on the syntactically requirements of the GNU C
259 *  "__attribute__" extension.  For example with GNU C, use
260 *  the following to force a structures to a 32 byte boundary.
261 *
262 *      __attribute__ ((aligned (32)))
263 *
264 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
265 *         To benefit from using this, the data must be heavily
266 *         used so it will stay in the cache and used frequently enough
267 *         in the executive to justify turning this on.
268 */
269
270#define CPU_STRUCTURE_ALIGNMENT  __attribute__ ((aligned (32)))
271
272/*
273 *  Define what is required to specify how the network to host conversion
274 *  routines are handled.
275 */
276
277#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
278
279#if defined(__ARMEL__)
280#define CPU_BIG_ENDIAN          FALSE
281#define CPU_LITTLE_ENDIAN       TRUE
282#elif defined(__ARMEB__)
283#define CPU_BIG_ENDIAN          TRUE
284#define CPU_LITTLE_ENDIAN       FALSE
285#else
286#error "Unknown endianness"
287#endif
288                       
289/*
290 *  The following defines the number of bits actually used in the
291 *  interrupt field of the task mode.  How those bits map to the
292 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
293 */
294
295#define CPU_MODES_INTERRUPT_MASK   0x000000c0
296
297/*
298 *  Processor defined structures required by cpukit/score.
299 */
300
301/* may need to put some structures here.  */
302
303/*
304 * Contexts
305 *
306 *  Generally there are 2 types of context to save.
307 *     1. Interrupt registers to save
308 *     2. Task level registers to save
309 *
310 *  This means we have the following 3 context items:
311 *     1. task level context stuff::  Context_Control
312 *     2. floating point task stuff:: Context_Control_fp
313 *     3. special interrupt level context :: Context_Control_interrupt
314 *
315 *  On some processors, it is cost-effective to save only the callee
316 *  preserved registers during a task context switch.  This means
317 *  that the ISR code needs to save those registers which do not
318 *  persist across function calls.  It is not mandatory to make this
319 *  distinctions between the caller/callee saves registers for the
320 *  purpose of minimizing context saved during task switch and on interrupts.
321 *  If the cost of saving extra registers is minimal, simplicity is the
322 *  choice.  Save the same context on interrupt entry as for tasks in
323 *  this case.
324 *
325 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
326 *  care should be used in designing the context area.
327 *
328 *  On some CPUs with hardware floating point support, the Context_Control_fp
329 *  structure will not be used or it simply consist of an array of a
330 *  fixed number of bytes.   This is done when the floating point context
331 *  is dumped by a "FP save context" type instruction and the format
332 *  is not really defined by the CPU.  In this case, there is no need
333 *  to figure out the exact format -- only the size.  Of course, although
334 *  this is enough information for RTEMS, it is probably not enough for
335 *  a debugger such as gdb.  But that is another problem.
336 */
337typedef struct {
338    uint32_t   register_cpsr;
339    uint32_t   register_r4;
340    uint32_t   register_r5;
341    uint32_t   register_r6;
342    uint32_t   register_r7;
343    uint32_t   register_r8;
344    uint32_t   register_r9;
345    uint32_t   register_r10;
346    uint32_t   register_fp;
347    uint32_t   register_sp;
348    uint32_t   register_lr;
349    uint32_t   register_pc;
350} Context_Control;
351
352typedef struct {
353    double      some_float_register;
354} Context_Control_fp;
355
356typedef struct {
357    uint32_t   register_r0;
358    uint32_t   register_r1;
359    uint32_t   register_r2;
360    uint32_t   register_r3;
361    uint32_t   register_ip;
362    uint32_t   register_lr;
363} CPU_Exception_frame;
364
365typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
366extern cpuExcHandlerType _currentExcHandler;
367extern void rtems_exception_init_mngt();
368 
369/*
370 *  The following structure defines the set of information saved
371 *  on the current stack by RTEMS upon receipt of each interrupt
372 *  that will lead to re-enter the kernel to signal the thread.
373 */
374
375typedef CPU_Exception_frame CPU_Interrupt_frame;
376
377/*
378 *  The following table contains the information required to configure
379 *  the XXX processor specific parameters.
380 */
381
382typedef struct {
383  void       (*pretasking_hook)( void );
384  void       (*predriver_hook)( void );
385  void       (*postdriver_hook)( void );
386  void       (*idle_task)( void );
387  boolean      do_zero_of_workspace;
388  uint32_t     idle_task_stack_size;
389  uint32_t     interrupt_stack_size;
390  uint32_t     extra_mpci_receive_server_stack;
391  void *     (*stack_allocate_hook)( uint32_t   );
392  void       (*stack_free_hook)( void* );
393  /* end of fields required on all CPUs */
394
395}   rtems_cpu_table;
396
397/*
398 *  Macros to access required entires in the CPU Table are in
399 *  the file rtems/system.h.
400 */
401
402/*
403 *  Macros to access ARM specific additions to the CPU Table
404 *
405 *  none required
406 */
407
408/* There are no CPU specific additions to the CPU Table for this port. */
409
410/*
411 *  This variable is optional.  It is used on CPUs on which it is difficult
412 *  to generate an "uninitialized" FP context.  It is filled in by
413 *  _CPU_Initialize and copied into the task's FP context area during
414 *  _CPU_Context_Initialize.
415 */
416
417SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
418
419/*
420 *  The size of the floating point context area.  On some CPUs this
421 *  will not be a "sizeof" because the format of the floating point
422 *  area is not defined -- only the size is.  This is usually on
423 *  CPUs with a "floating point save context" instruction.
424 */
425
426#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
427
428/*
429 *  Amount of extra stack (above minimum stack size) required by
430 *  MPCI receive server thread.  Remember that in a multiprocessor
431 *  system this thread must exist and be able to process all directives.
432 */
433
434#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
435
436/*
437 *  This defines the number of entries in the ISR_Vector_table managed
438 *  by RTEMS.
439 */
440
441#define CPU_INTERRUPT_NUMBER_OF_VECTORS      8
442#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
443
444/*
445 *  This is defined if the port has a special way to report the ISR nesting
446 *  level.  Most ports maintain the variable _ISR_Nest_level.
447 */
448
449#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
450
451/*
452 *  Should be large enough to run all RTEMS tests.  This insures
453 *  that a "reasonable" small application should not have any problems.
454 */
455
456#define CPU_STACK_MINIMUM_SIZE          (1024*4)
457
458/*
459 *  CPU's worst alignment requirement for data types on a byte boundary.  This
460 *  alignment does not take into account the requirements for the stack.
461 */
462
463#define CPU_ALIGNMENT              4
464
465/*
466 *  This number corresponds to the byte alignment requirement for the
467 *  heap handler.  This alignment requirement may be stricter than that
468 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
469 *  common for the heap to follow the same alignment requirement as
470 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
471 *  then this should be set to CPU_ALIGNMENT.
472 *
473 *  NOTE:  This does not have to be a power of 2.  It does have to
474 *         be greater or equal to than CPU_ALIGNMENT.
475 */
476
477#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
478
479/*
480 *  This number corresponds to the byte alignment requirement for memory
481 *  buffers allocated by the partition manager.  This alignment requirement
482 *  may be stricter than that for the data types alignment specified by
483 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
484 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
485 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
486 *
487 *  NOTE:  This does not have to be a power of 2.  It does have to
488 *         be greater or equal to than CPU_ALIGNMENT.
489 */
490
491#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
492
493/*
494 *  This number corresponds to the byte alignment requirement for the
495 *  stack.  This alignment requirement may be stricter than that for the
496 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
497 *  is strict enough for the stack, then this should be set to 0.
498 *
499 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
500 */
501
502#define CPU_STACK_ALIGNMENT        4
503
504/* ISR handler macros */
505
506/*
507 *  Support routine to initialize the RTEMS vector table after it is allocated.
508 */
509
510#define _CPU_Initialize_vectors()
511
512/*
513 *  Disable all interrupts for an RTEMS critical section.  The previous
514 *  level is returned in _level.
515 */
516
517#define _CPU_ISR_Disable( _level )                \
518  {                                               \
519    int reg;                                       \
520    asm volatile ("MRS  %0, cpsr \n"               \
521                  "ORR  %1, %0, #0xc0 \n"          \
522                  "MSR  cpsr, %1 \n"               \
523                   : "=&r" (_level), "=&r" (reg)); \
524  }
525
526/*
527 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
528 *  This indicates the end of an RTEMS critical section.  The parameter
529 *  _level is not modified.
530 */
531
532#define _CPU_ISR_Enable( _level )               \
533  {                                             \
534    asm volatile ("MSR  cpsr, %0 \n"            \
535                  : : "r" (_level));            \
536  }
537
538/*
539 *  This temporarily restores the interrupt to _level before immediately
540 *  disabling them again.  This is used to divide long RTEMS critical
541 *  sections into two or more parts.  The parameter _level is not
542 * modified.
543 */
544
545#define _CPU_ISR_Flash( _level ) \
546  { \
547    int reg;                                    \
548    asm volatile ("MRS  %0, cpsr \n"            \
549                  "MSR  cpsr, %1 \n"            \
550                  "MSR  cpsr, %0 \n"            \
551                  : "=&r" (reg)                 \
552                  : "r" (_level));              \
553  }
554
555/*
556 *  Map interrupt level in task mode onto the hardware that the CPU
557 *  actually provides.  Currently, interrupt levels which do not
558 *  map onto the CPU in a generic fashion are undefined.  Someday,
559 *  it would be nice if these were "mapped" by the application
560 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
561 *  8 - 255 would be available for bsp/application specific meaning.
562 *  This could be used to manage a programmable interrupt controller
563 *  via the rtems_task_mode directive.
564 *
565 *  The get routine usually must be implemented as a subroutine.
566 */
567
568#define _CPU_ISR_Set_level( new_level )         \
569  {                                             \
570    int reg = 0; /* to avoid warning */         \
571    asm volatile ("MRS  %0, cpsr \n"            \
572                  "BIC  %0, %0, #0xc0 \n"       \
573                  "ORR  %0, %0, %2 \n"          \
574                  "MSR  cpsr_c, %0 \n"          \
575                  : "=r" (reg)                  \
576                  : "0" (reg), "r" (new_level)); \
577  }
578
579
580uint32_t   _CPU_ISR_Get_level( void );
581
582/* end of ISR handler macros */
583
584/* Context handler macros */
585
586/*
587 *  Initialize the context to a state suitable for starting a
588 *  task after a context restore operation.  Generally, this
589 *  involves:
590 *
591 *     - setting a starting address
592 *     - preparing the stack
593 *     - preparing the stack and frame pointers
594 *     - setting the proper interrupt level in the context
595 *     - initializing the floating point context
596 *
597 *  This routine generally does not set any unnecessary register
598 *  in the context.  The state of the "general data" registers is
599 *  undefined at task start time.
600 *
601 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
602 *        point thread.  This is typically only used on CPUs where the
603 *        FPU may be easily disabled by software such as on the SPARC
604 *        where the PSR contains an enable FPU bit.
605 */
606
607void _CPU_Context_Initialize(
608  Context_Control  *the_context,
609  uint32_t         *stack_base,
610  uint32_t          size,
611  uint32_t          new_level,
612  void             *entry_point,
613  boolean           is_fp
614);
615
616/*
617 *  This routine is responsible for somehow restarting the currently
618 *  executing task.  If you are lucky, then all that is necessary
619 *  is restoring the context.  Otherwise, there will need to be
620 *  a special assembly routine which does something special in this
621 *  case.  Context_Restore should work most of the time.  It will
622 *  not work if restarting self conflicts with the stack frame
623 *  assumptions of restoring a context.
624 */
625
626#define _CPU_Context_Restart_self( _the_context ) \
627   _CPU_Context_restore( (_the_context) );
628
629/*
630 *  The purpose of this macro is to allow the initial pointer into
631 *  a floating point context area (used to save the floating point
632 *  context) to be at an arbitrary place in the floating point
633 *  context area.
634 *
635 *  This is necessary because some FP units are designed to have
636 *  their context saved as a stack which grows into lower addresses.
637 *  Other FP units can be saved by simply moving registers into offsets
638 *  from the base of the context area.  Finally some FP units provide
639 *  a "dump context" instruction which could fill in from high to low
640 *  or low to high based on the whim of the CPU designers.
641 */
642
643#define _CPU_Context_Fp_start( _base, _offset ) \
644   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
645
646/*
647 *  This routine initializes the FP context area passed to it to.
648 *  There are a few standard ways in which to initialize the
649 *  floating point context.  The code included for this macro assumes
650 *  that this is a CPU in which a "initial" FP context was saved into
651 *  _CPU_Null_fp_context and it simply copies it to the destination
652 *  context passed to it.
653 *
654 *  Other models include (1) not doing anything, and (2) putting
655 *  a "null FP status word" in the correct place in the FP context.
656 */
657
658#define _CPU_Context_Initialize_fp( _destination ) \
659  { \
660   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
661  }
662
663/* end of Context handler macros */
664
665/* Fatal Error manager macros */
666
667/*
668 *  This routine copies _error into a known place -- typically a stack
669 *  location or a register, optionally disables interrupts, and
670 *  halts/stops the CPU.
671 */
672
673#define _CPU_Fatal_halt( _error )           \
674   do {                                     \
675     int _level;                            \
676     _CPU_ISR_Disable( _level );            \
677     asm volatile ("mov r0, %0\n"           \
678                   : "=r" (_error)          \
679                   : "0" (_error)           \
680                   : "r0" );                \
681     while(1) ;                             \
682   } while(0);
683 
684
685/* end of Fatal Error manager macros */
686
687/* Bitfield handler macros */
688
689/*
690 *  This routine sets _output to the bit number of the first bit
691 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
692 *  This type may be either 16 or 32 bits wide although only the 16
693 *  least significant bits will be used.
694 *
695 *  There are a number of variables in using a "find first bit" type
696 *  instruction.
697 *
698 *    (1) What happens when run on a value of zero?
699 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
700 *    (3) The numbering may be zero or one based.
701 *    (4) The "find first bit" instruction may search from MSB or LSB.
702 *
703 *  RTEMS guarantees that (1) will never happen so it is not a concern.
704 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
705 *  _CPU_Priority_bits_index().  These three form a set of routines
706 *  which must logically operate together.  Bits in the _value are
707 *  set and cleared based on masks built by _CPU_Priority_mask().
708 *  The basic major and minor values calculated by _Priority_Major()
709 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
710 *  to properly range between the values returned by the "find first bit"
711 *  instruction.  This makes it possible for _Priority_Get_highest() to
712 *  calculate the major and directly index into the minor table.
713 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
714 *  is the first bit found.
715 *
716 *  This entire "find first bit" and mapping process depends heavily
717 *  on the manner in which a priority is broken into a major and minor
718 *  components with the major being the 4 MSB of a priority and minor
719 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
720 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
721 *  to the lowest priority.
722 *
723 *  If your CPU does not have a "find first bit" instruction, then
724 *  there are ways to make do without it.  Here are a handful of ways
725 *  to implement this in software:
726 *
727 *    - a series of 16 bit test instructions
728 *    - a "binary search using if's"
729 *    - _number = 0
730 *      if _value > 0x00ff
731 *        _value >>=8
732 *        _number = 8;
733 *
734 *      if _value > 0x0000f
735 *        _value >=8
736 *        _number += 4
737 *
738 *      _number += bit_set_table[ _value ]
739 *
740 *    where bit_set_table[ 16 ] has values which indicate the first
741 *      bit set
742 */
743#if (ARM_HAS_CLZ == 0)
744#  define CPU_USE_GENERIC_BITFIELD_CODE TRUE
745#  define CPU_USE_GENERIC_BITFIELD_DATA TRUE
746#else
747#  define CPU_USE_GENERIC_BITFIELD_CODE FALSE
748#  define CPU_USE_GENERIC_BITFIELD_DATA FALSE
749
750#  define _CPU_Bitfield_Find_first_bit( _value, _output ) \
751   { \
752     (_output) = 0;   /* do something to prevent warnings */ \
753   }
754
755/* end of Bitfield handler macros */
756
757/*
758 *  This routine builds the mask which corresponds to the bit fields
759 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
760 *  for that routine.
761 */
762
763
764#  define _CPU_Priority_Mask( _bit_number ) \
765   ( 1 << (_bit_number) )
766
767
768/*
769 *  This routine translates the bit numbers returned by
770 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
771 *  a major or minor component of a priority.  See the discussion
772 *  for that routine.
773 */
774
775
776#  define _CPU_Priority_bits_index( _priority ) \
777   (_priority)
778
779#  error "Implement CLZ version of priority bit functions for ARMv5"
780#endif
781
782/* end of Priority handler macros */
783
784/* functions */
785
786/*
787 *  _CPU_Initialize
788 *
789 *  This routine performs CPU dependent initialization.
790 */
791
792void _CPU_Initialize(
793  rtems_cpu_table  *cpu_table,
794  void      (*thread_dispatch)
795);
796
797typedef enum {
798  ARM_EXCEPTION_RESET      = 0,
799  ARM_EXCEPTION_UNDEF      = 1,
800  ARM_EXCEPTION_SWI        = 2,
801  ARM_EXCEPTION_PREF_ABORT = 3,
802  ARM_EXCEPTION_DATA_ABORT = 4,
803  ARM_EXCEPTION_RESERVED   = 5,
804  ARM_EXCEPTION_IRQ        = 6,
805  ARM_EXCEPTION_FIQ        = 7,
806  MAX_EXCEPTIONS           = 8
807} Arm_symbolic_exception_name;
808
809/*
810 *  _CPU_ISR_install_vector
811 *
812 *  This routine installs an interrupt vector.
813 */
814
815void _CPU_ISR_install_vector(
816  uint32_t    vector,
817  proc_ptr    new_handler,
818  proc_ptr   *old_handler
819);
820
821/*
822 *  _CPU_Install_interrupt_stack
823 *
824 *  This routine installs the hardware interrupt stack pointer.
825 *
826 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
827 *         is TRUE.
828 */
829
830void _CPU_Install_interrupt_stack( void );
831
832/*
833 *  _CPU_Context_switch
834 *
835 *  This routine switches from the run context to the heir context.
836 */
837
838void _CPU_Context_switch(
839  Context_Control  *run,
840  Context_Control  *heir
841);
842
843/*
844 *  _CPU_Context_restore
845 *
846 *  This routine is generally used only to restart self in an
847 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
848 *
849 *  NOTE: May be unnecessary to reload some registers.
850 */
851
852void _CPU_Context_restore(
853  Context_Control *new_context
854);
855
856#if (ARM_HAS_FPU == 1)
857/*
858 *  _CPU_Context_save_fp
859 *
860 *  This routine saves the floating point context passed to it.
861 */
862
863void _CPU_Context_save_fp(
864  void **fp_context_ptr
865);
866
867/*
868 *  _CPU_Context_restore_fp
869 *
870 *  This routine restores the floating point context passed to it.
871 */
872
873void _CPU_Context_restore_fp(
874  void **fp_context_ptr
875);
876#endif /* (ARM_HAS_FPU == 1) */
877
878/*  The following routine swaps the endian format of an unsigned int.
879 *  It must be static because it is referenced indirectly.
880 *
881 *  This version will work on any processor, but if there is a better
882 *  way for your CPU PLEASE use it.  The most common way to do this is to:
883 *
884 *     swap least significant two bytes with 16-bit rotate
885 *     swap upper and lower 16-bits
886 *     swap most significant two bytes with 16-bit rotate
887 *
888 *  Some CPUs have special instructions which swap a 32-bit quantity in
889 *  a single instruction (e.g. i486).  It is probably best to avoid
890 *  an "endian swapping control bit" in the CPU.  One good reason is
891 *  that interrupts would probably have to be disabled to insure that
892 *  an interrupt does not try to access the same "chunk" with the wrong
893 *  endian.  Another good reason is that on some CPUs, the endian bit
894 *  endianness for ALL fetches -- both code and data -- so the code
895 *  will be fetched incorrectly.
896 */
897 
898static inline unsigned int CPU_swap_u32(
899  unsigned int value
900)
901{
902    uint32_t   tmp = value; /* make compiler warnings go away */
903    asm volatile ("EOR   %1, %0, %0, ROR #16\n"
904                  "BIC   %1, %1, #0xff0000\n"   
905                  "MOV   %0, %0, ROR #8\n"     
906                  "EOR   %0, %0, %1, LSR #8\n" 
907                  : "=r" (value), "=r" (tmp) 
908                  : "0" (value), "1" (tmp));
909
910    return value;
911}
912
913static inline uint16_t   CPU_swap_u16(uint16_t   value)
914{
915    uint16_t   lower;
916    uint16_t   upper;
917
918    value = value & (uint16_t  ) 0xffff;
919    lower = (value >> 8) ;
920    upper = (value << 8) ;
921
922    return (lower | upper);
923}
924
925#ifdef __cplusplus
926}
927#endif
928
929#endif
Note: See TracBrowser for help on using the repository browser.