source: rtems/cpukit/score/cpu/or32/rtems/score/cpu.h @ 86c827bd

4.104.114.84.95
Last change on this file since 86c827bd was ec8973ed, checked in by Ralf Corsepius <ralf.corsepius@…>, on 01/24/05 at 05:43:40

2005-01-24 Ralf Corsepius <ralf.corsepius@…>

  • rtems/score/cpu.h: *_swap_u32( uint32_t ).
  • Property mode set to 100644
File size: 33.6 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 1989-1999.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 *
16 *  This file adapted from no_cpu example of the RTEMS distribution.
17 *  The body has been modified for the Opencores Or1k implementation by
18 *  Chris Ziomkowski. <chris@asics.ws>
19 *
20 */
21
22#ifndef _OR1K_CPU_h
23#define _OR1K_CPU_h
24
25#ifdef __cplusplus
26extern "C" {
27#endif
28
29#include "rtems/score/or32.h"            /* pick up machine definitions */
30#ifndef ASM
31#include "rtems/score/types.h"
32#endif
33
34/* conditional compilation parameters */
35
36/*
37 *  Should the calls to _Thread_Enable_dispatch be inlined?
38 *
39 *  If TRUE, then they are inlined.
40 *  If FALSE, then a subroutine call is made.
41 *
42 *  Basically this is an example of the classic trade-off of size
43 *  versus speed.  Inlining the call (TRUE) typically increases the
44 *  size of RTEMS while speeding up the enabling of dispatching.
45 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
46 *  only be 0 or 1 unless you are in an interrupt handler and that
47 *  interrupt handler invokes the executive.]  When not inlined
48 *  something calls _Thread_Enable_dispatch which in turns calls
49 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
50 *  one subroutine call is avoided entirely.]
51 *
52 */
53
54#define CPU_INLINE_ENABLE_DISPATCH       FALSE
55
56/*
57 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
58 *  be unrolled one time?  In unrolled each iteration of the loop examines
59 *  two "nodes" on the chain being searched.  Otherwise, only one node
60 *  is examined per iteration.
61 *
62 *  If TRUE, then the loops are unrolled.
63 *  If FALSE, then the loops are not unrolled.
64 *
65 *  The primary factor in making this decision is the cost of disabling
66 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
67 *  body of the loop.  On some CPUs, the flash is more expensive than
68 *  one iteration of the loop body.  In this case, it might be desirable
69 *  to unroll the loop.  It is important to note that on some CPUs, this
70 *  code is the longest interrupt disable period in RTEMS.  So it is
71 *  necessary to strike a balance when setting this parameter.
72 *
73 */
74
75#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
76
77/*
78 *  Does RTEMS manage a dedicated interrupt stack in software?
79 *
80 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
81 *  If FALSE, nothing is done.
82 *
83 *  If the CPU supports a dedicated interrupt stack in hardware,
84 *  then it is generally the responsibility of the BSP to allocate it
85 *  and set it up.
86 *
87 *  If the CPU does not support a dedicated interrupt stack, then
88 *  the porter has two options: (1) execute interrupts on the
89 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
90 *  interrupt stack.
91 *
92 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
93 *
94 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
95 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
96 *  possible that both are FALSE for a particular CPU.  Although it
97 *  is unclear what that would imply about the interrupt processing
98 *  procedure on that CPU.
99 *
100 *  For the first cut of an Or1k implementation, let's not worry
101 *  about this, and assume that our C code will autoperform any
102 *  frame/stack allocation for us when the procedure is entered.
103 *  If we write assembly code, we may have to deal with this manually.
104 *  This can be changed later if we find it is impossible. This
105 *  behavior is desireable as it allows us to work in low memory
106 *  environments where we don't have room for a dedicated stack.
107 */
108
109#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
110
111/*
112 *  Does this CPU have hardware support for a dedicated interrupt stack?
113 *
114 *  If TRUE, then it must be installed during initialization.
115 *  If FALSE, then no installation is performed.
116 *
117 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
118 *
119 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
120 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
121 *  possible that both are FALSE for a particular CPU.  Although it
122 *  is unclear what that would imply about the interrupt processing
123 *  procedure on that CPU.
124 *
125 */
126
127#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
128
129/*
130 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
131 *
132 *  If TRUE, then the memory is allocated during initialization.
133 *  If FALSE, then the memory is allocated during initialization.
134 *
135 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
136 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
137 *
138 */
139
140#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
141
142/*
143 *  Does the RTEMS invoke the user's ISR with the vector number and
144 *  a pointer to the saved interrupt frame (1) or just the vector
145 *  number (0)?
146 *
147 */
148
149#define CPU_ISR_PASSES_FRAME_POINTER 0
150
151/*
152 *  Does the CPU have hardware floating point?
153 *
154 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
155 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
156 *
157 *  If there is a FP coprocessor such as the i387 or mc68881, then
158 *  the answer is TRUE.
159 *
160 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
161 *  It indicates whether or not this CPU model has FP support.  For
162 *  example, it would be possible to have an i386_nofp CPU model
163 *  which set this to false to indicate that you have an i386 without
164 *  an i387 and wish to leave floating point support out of RTEMS.
165 *
166 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
167 *  is software implemented floating point that must be context
168 *  switched.  The determination of whether or not this applies
169 *  is very tool specific and the state saved/restored is also
170 *  compiler specific.
171 *
172 *  Or1k Specific Information:
173 *
174 *  At this time there are no implementations of Or1k that are
175 *  expected to implement floating point. More importantly, the
176 *  floating point architecture is expected to change significantly
177 *  before such chips are fabricated.
178 */
179
180#if ( OR1K_HAS_FPU == 1 )
181#define CPU_HARDWARE_FP     TRUE
182#define CPU_SOFTWARE_FP     FALSE
183#else
184#define CPU_HARDWARE_FP     FALSE
185#define CPU_SOFTWARE_FP     TRUE
186#endif
187
188
189/*
190 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
191 *
192 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
193 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
194 *
195 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
196 *
197 */
198
199#define CPU_ALL_TASKS_ARE_FP     FALSE
200
201/*
202 *  Should the IDLE task have a floating point context?
203 *
204 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
205 *  and it has a floating point context which is switched in and out.
206 *  If FALSE, then the IDLE task does not have a floating point context.
207 *
208 *  Setting this to TRUE negatively impacts the time required to preempt
209 *  the IDLE task from an interrupt because the floating point context
210 *  must be saved as part of the preemption.
211 *
212 */
213
214#define CPU_IDLE_TASK_IS_FP      FALSE
215
216/*
217 *  Should the saving of the floating point registers be deferred
218 *  until a context switch is made to another different floating point
219 *  task?
220 *
221 *  If TRUE, then the floating point context will not be stored until
222 *  necessary.  It will remain in the floating point registers and not
223 *  disturned until another floating point task is switched to.
224 *
225 *  If FALSE, then the floating point context is saved when a floating
226 *  point task is switched out and restored when the next floating point
227 *  task is restored.  The state of the floating point registers between
228 *  those two operations is not specified.
229 *
230 *  If the floating point context does NOT have to be saved as part of
231 *  interrupt dispatching, then it should be safe to set this to TRUE.
232 *
233 *  Setting this flag to TRUE results in using a different algorithm
234 *  for deciding when to save and restore the floating point context.
235 *  The deferred FP switch algorithm minimizes the number of times
236 *  the FP context is saved and restored.  The FP context is not saved
237 *  until a context switch is made to another, different FP task.
238 *  Thus in a system with only one FP task, the FP context will never
239 *  be saved or restored.
240 *
241 */
242
243#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
244
245/*
246 *  Does this port provide a CPU dependent IDLE task implementation?
247 *
248 *  If TRUE, then the routine _CPU_Thread_Idle_body
249 *  must be provided and is the default IDLE thread body instead of
250 *  _CPU_Thread_Idle_body.
251 *
252 *  If FALSE, then use the generic IDLE thread body if the BSP does
253 *  not provide one.
254 *
255 *  This is intended to allow for supporting processors which have
256 *  a low power or idle mode.  When the IDLE thread is executed, then
257 *  the CPU can be powered down.
258 *
259 *  The order of precedence for selecting the IDLE thread body is:
260 *
261 *    1.  BSP provided
262 *    2.  CPU dependent (if provided)
263 *    3.  generic (if no BSP and no CPU dependent)
264 *
265 */
266
267#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
268
269/*
270 *  Does the stack grow up (toward higher addresses) or down
271 *  (toward lower addresses)?
272 *
273 *  If TRUE, then the grows upward.
274 *  If FALSE, then the grows toward smaller addresses.
275 *
276 *  Or1k Specific Information:
277 * 
278 *  Previously I had misread the documentation and set this
279 *  to true. Surprisingly, it seemed to work anyway. I'm
280 *  therefore not 100% sure exactly what this does. It should
281 *  be correct as it is now, however.
282 */
283
284#define CPU_STACK_GROWS_UP               FALSE
285
286/*
287 *  The following is the variable attribute used to force alignment
288 *  of critical RTEMS structures.  On some processors it may make
289 *  sense to have these aligned on tighter boundaries than
290 *  the minimum requirements of the compiler in order to have as
291 *  much of the critical data area as possible in a cache line.
292 *
293 *  The placement of this macro in the declaration of the variables
294 *  is based on the syntactically requirements of the GNU C
295 *  "__attribute__" extension.  For example with GNU C, use
296 *  the following to force a structures to a 32 byte boundary.
297 *
298 *      __attribute__ ((aligned (32)))
299 *
300 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
301 *         To benefit from using this, the data must be heavily
302 *         used so it will stay in the cache and used frequently enough
303 *         in the executive to justify turning this on.
304 *
305 */
306
307#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
308
309/*
310 *  Define what is required to specify how the network to host conversion
311 *  routines are handled.
312 *
313 *  Or1k Specific Information:
314 *
315 *  This version of RTEMS is designed specifically to run with
316 *  big endian architectures. If you want little endian, you'll
317 *  have to make the appropriate adjustments here and write
318 *  efficient routines for byte swapping. The Or1k architecture
319 *  doesn't do this very well.
320 */
321
322#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
323#define CPU_BIG_ENDIAN                           TRUE
324#define CPU_LITTLE_ENDIAN                        FALSE
325
326/*
327 *  The following defines the number of bits actually used in the
328 *  interrupt field of the task mode.  How those bits map to the
329 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
330 *
331 */
332
333#define CPU_MODES_INTERRUPT_MASK   0x00000001
334
335/*
336 *  Processor defined structures required for cpukit/score.
337 */
338
339
340/*
341 * Contexts
342 *
343 *  Generally there are 2 types of context to save.
344 *     1. Interrupt registers to save
345 *     2. Task level registers to save
346 *
347 *  This means we have the following 3 context items:
348 *     1. task level context stuff::  Context_Control
349 *     2. floating point task stuff:: Context_Control_fp
350 *     3. special interrupt level context :: Context_Control_interrupt
351 *
352 *  On some processors, it is cost-effective to save only the callee
353 *  preserved registers during a task context switch.  This means
354 *  that the ISR code needs to save those registers which do not
355 *  persist across function calls.  It is not mandatory to make this
356 *  distinctions between the caller/callee saves registers for the
357 *  purpose of minimizing context saved during task switch and on interrupts.
358 *  If the cost of saving extra registers is minimal, simplicity is the
359 *  choice.  Save the same context on interrupt entry as for tasks in
360 *  this case.
361 *
362 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
363 *  care should be used in designing the context area.
364 *
365 *  On some CPUs with hardware floating point support, the Context_Control_fp
366 *  structure will not be used or it simply consist of an array of a
367 *  fixed number of bytes.   This is done when the floating point context
368 *  is dumped by a "FP save context" type instruction and the format
369 *  is not really defined by the CPU.  In this case, there is no need
370 *  to figure out the exact format -- only the size.  Of course, although
371 *  this is enough information for RTEMS, it is probably not enough for
372 *  a debugger such as gdb.  But that is another problem.
373 *
374 * 
375 */
376
377#ifdef OR1K_64BIT_ARCH
378#define or1kreg uint64_t 
379#else
380#define or1kreg uint32_t 
381#endif
382
383/* SR_MASK is the mask of values that will be copied to/from the status
384   register on a context switch. Some values, like the flag state, are
385   specific on the context, while others, such as interrupt enables,
386   are global. The currently defined global bits are:
387
388   0x00001 SUPV:     Supervisor mode
389   0x00002 EXR:      Exceptions on/off
390   0x00004 EIR:      Interrupts enabled/disabled
391   0x00008 DCE:      Data cache enabled/disabled
392   0x00010 ICE:      Instruction cache enabled/disabled
393   0x00020 DME:      Data MMU enabled/disabled
394   0x00040 IME:      Instruction MMU enabled/disabled
395   0x00080 LEE:      Little/Big Endian enable
396   0x00100 CE:       Context ID/shadow regs enabled/disabled
397   0x01000 OVE:      Overflow causes exception
398   0x04000 EP:       Exceptions @ 0x0 or 0xF0000000
399   0x08000 PXR:      Partial exception recognition enabled/disabled
400   0x10000 SUMRA:    SPR's accessible/inaccessible
401
402   The context specific bits are:
403
404   0x00200 F         Branch flag indicator
405   0x00400 CY        Carry flag indicator
406   0x00800 OV        Overflow flag indicator
407   0x02000 DSX       Delay slot exception occurred
408   0xF8000000 CID    Current Context ID
409*/
410
411#define SR_MASK 0xF8002E00
412
413typedef enum {
414  SR_SUPV = 0x00001,
415  SR_EXR = 0x00002,
416  SR_EIR = 0x00004,
417  SR_DCE = 0x00008,
418  SR_ICE = 0x00010,
419  SR_DME = 0x00020,
420  SR_IME = 0x00040,
421  SR_LEE = 0x00080,
422  SR_CE = 0x00100,
423  SR_F = 0x00200,
424  SR_CY = 0x00400,
425  SR_OV = 0x00800,
426  SR_OVE = 0x01000,
427  SR_DSX = 0x02000,
428  SR_EP = 0x04000,
429  SR_PXR = 0x08000,
430  SR_SUMRA = 0x10000,
431  SR_CID = 0xF8000000,
432} StatusRegisterBits;
433
434typedef struct {
435  uint32_t    sr;     /* Current status register non persistent values */
436  uint32_t    esr;    /* Saved exception status register */
437  uint32_t    ear;    /* Saved exception effective address register */
438  uint32_t    epc;    /* Saved exception PC register    */
439  or1kreg     r[31];  /* Registers */
440  or1kreg     pc;     /* Context PC 4 or 8 bytes for 64 bit alignment */
441} Context_Control;
442
443typedef int Context_Control_fp;
444typedef Context_Control CPU_Interrupt_frame;
445#define _CPU_Null_fp_context 0
446#define _CPU_Interrupt_stack_low 0
447#define _CPU_Interrupt_stack_high 0
448
449/*
450 *  The following table contains the information required to configure
451 *  the XXX processor specific parameters.
452 *
453 */
454
455typedef struct {
456  void       (*pretasking_hook)( void );
457  void       (*predriver_hook)( void );
458  void       (*postdriver_hook)( void );
459  void       (*idle_task)( void );
460  boolean      do_zero_of_workspace;
461  uint32_t     idle_task_stack_size;
462  uint32_t     interrupt_stack_size;
463  uint32_t     extra_mpci_receive_server_stack;
464  void *     (*stack_allocate_hook)( uint32_t   );
465  void       (*stack_free_hook)( void* );
466  /* end of fields required on all CPUs */
467}   rtems_cpu_table;
468
469/*
470 *  Macros to access required entires in the CPU Table are in
471 *  the file rtems/system.h.
472 *
473 */
474
475/*
476 *  Macros to access OR1K specific additions to the CPU Table
477 *
478 */
479
480/* There are no CPU specific additions to the CPU Table for this port. */
481
482/*
483 *  This variable is optional.  It is used on CPUs on which it is difficult
484 *  to generate an "uninitialized" FP context.  It is filled in by
485 *  _CPU_Initialize and copied into the task's FP context area during
486 *  _CPU_Context_Initialize.
487 *
488 */
489
490/* SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context; */
491
492/*
493 *  On some CPUs, RTEMS supports a software managed interrupt stack.
494 *  This stack is allocated by the Interrupt Manager and the switch
495 *  is performed in _ISR_Handler.  These variables contain pointers
496 *  to the lowest and highest addresses in the chunk of memory allocated
497 *  for the interrupt stack.  Since it is unknown whether the stack
498 *  grows up or down (in general), this give the CPU dependent
499 *  code the option of picking the version it wants to use.
500 *
501 *  NOTE: These two variables are required if the macro
502 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
503 *
504 */
505
506/*
507SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
508SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
509*/
510
511/*
512 *  With some compilation systems, it is difficult if not impossible to
513 *  call a high-level language routine from assembly language.  This
514 *  is especially true of commercial Ada compilers and name mangling
515 *  C++ ones.  This variable can be optionally defined by the CPU porter
516 *  and contains the address of the routine _Thread_Dispatch.  This
517 *  can make it easier to invoke that routine at the end of the interrupt
518 *  sequence (if a dispatch is necessary).
519 *
520 */
521
522SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
523
524/*
525 *  Nothing prevents the porter from declaring more CPU specific variables.
526 *
527 */
528
529/* XXX: if needed, put more variables here */
530
531/*
532 *  The size of the floating point context area.  On some CPUs this
533 *  will not be a "sizeof" because the format of the floating point
534 *  area is not defined -- only the size is.  This is usually on
535 *  CPUs with a "floating point save context" instruction.
536 *
537 *  Or1k Specific Information:
538 *
539 *  We don't support floating point in this version, so the size is 0
540 */
541
542#define CPU_CONTEXT_FP_SIZE 0
543
544/*
545 *  Amount of extra stack (above minimum stack size) required by
546 *  MPCI receive server thread.  Remember that in a multiprocessor
547 *  system this thread must exist and be able to process all directives.
548 *
549 */
550
551#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
552
553/*
554 *  This defines the number of entries in the ISR_Vector_table managed
555 *  by RTEMS.
556 *
557 */
558
559#define CPU_INTERRUPT_NUMBER_OF_VECTORS      16
560#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
561
562/*
563 *  Should be large enough to run all RTEMS tests.  This insures
564 *  that a "reasonable" small application should not have any problems.
565 *
566 */
567
568#define CPU_STACK_MINIMUM_SIZE          4096
569
570/*
571 *  CPU's worst alignment requirement for data types on a byte boundary.  This
572 *  alignment does not take into account the requirements for the stack.
573 *
574 */
575
576#define CPU_ALIGNMENT              8
577
578/*
579 *  This number corresponds to the byte alignment requirement for the
580 *  heap handler.  This alignment requirement may be stricter than that
581 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
582 *  common for the heap to follow the same alignment requirement as
583 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
584 *  then this should be set to CPU_ALIGNMENT.
585 *
586 *  NOTE:  This does not have to be a power of 2 although it should be
587 *         a multiple of 2 greater than or equal to 2.  The requirement
588 *         to be a multiple of 2 is because the heap uses the least
589 *         significant field of the front and back flags to indicate
590 *         that a block is in use or free.  So you do not want any odd
591 *         length blocks really putting length data in that bit.
592 *
593 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
594 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
595 *         elements allocated from the heap meet all restrictions.
596 *
597 */
598
599#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
600
601/*
602 *  This number corresponds to the byte alignment requirement for memory
603 *  buffers allocated by the partition manager.  This alignment requirement
604 *  may be stricter than that for the data types alignment specified by
605 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
606 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
607 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
608 *
609 *  NOTE:  This does not have to be a power of 2.  It does have to
610 *         be greater or equal to than CPU_ALIGNMENT.
611 *
612 */
613
614#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
615
616/*
617 *  This number corresponds to the byte alignment requirement for the
618 *  stack.  This alignment requirement may be stricter than that for the
619 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
620 *  is strict enough for the stack, then this should be set to 0.
621 *
622 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
623 *
624 */
625
626#define CPU_STACK_ALIGNMENT        0
627
628/* ISR handler macros */
629
630/*
631 *  Support routine to initialize the RTEMS vector table after it is allocated.
632 * 
633 *  NO_CPU Specific Information:
634 *
635 *  XXX document implementation including references if appropriate
636 */
637
638#define _CPU_Initialize_vectors()
639
640
641/*
642 *  Disable all interrupts for an RTEMS critical section.  The previous
643 *  level is returned in _level.
644 *
645 */
646
647#define _CPU_ISR_Disable( _isr_cookie ) \
648  { \
649    (_isr_cookie) = 0;   /* do something to prevent warnings */ \
650  }
651
652/*
653 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
654 *  This indicates the end of an RTEMS critical section.  The parameter
655 *  _level is not modified.
656 *
657 */
658
659#define _CPU_ISR_Enable( _isr_cookie )  \
660  { \
661  }
662
663/*
664 *  This temporarily restores the interrupt to _level before immediately
665 *  disabling them again.  This is used to divide long RTEMS critical
666 *  sections into two or more parts.  The parameter _level is not
667 * modified.
668 *
669 */
670
671#define _CPU_ISR_Flash( _isr_cookie ) \
672  { \
673  }
674
675/*
676 *  Map interrupt level in task mode onto the hardware that the CPU
677 *  actually provides.  Currently, interrupt levels which do not
678 *  map onto the CPU in a generic fashion are undefined.  Someday,
679 *  it would be nice if these were "mapped" by the application
680 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
681 *  8 - 255 would be available for bsp/application specific meaning.
682 *  This could be used to manage a programmable interrupt controller
683 *  via the rtems_task_mode directive.
684 *
685 *  The get routine usually must be implemented as a subroutine.
686 *
687 */
688
689#define _CPU_ISR_Set_level( new_level ) \
690  { \
691  }
692
693uint32_t   _CPU_ISR_Get_level( void );
694
695/* end of ISR handler macros */
696
697/* Context handler macros */
698
699/*
700 *  Initialize the context to a state suitable for starting a
701 *  task after a context restore operation.  Generally, this
702 *  involves:
703 *
704 *     - setting a starting address
705 *     - preparing the stack
706 *     - preparing the stack and frame pointers
707 *     - setting the proper interrupt level in the context
708 *     - initializing the floating point context
709 *
710 *  This routine generally does not set any unnecessary register
711 *  in the context.  The state of the "general data" registers is
712 *  undefined at task start time.
713 *
714 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
715 *        point thread.  This is typically only used on CPUs where the
716 *        FPU may be easily disabled by software such as on the SPARC
717 *        where the PSR contains an enable FPU bit.
718 *
719 */
720
721#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
722                                 _isr, _entry_point, _is_fp ) \
723  { \
724  memset(_the_context,'\0',sizeof(Context_Control)); \
725  (_the_context)->r[1] = (uint32_t  *) ((uint32_t  ) (_stack_base) + (_size) ); \
726  (_the_context)->r[2] = (uint32_t  *) ((uint32_t  ) (_stack_base)); \
727  (_the_context)->sr  = (_isr) ? 0x0000001B : 0x0000001F; \
728  (_the_context)->pc  = (uint32_t  *) _entry_point ; \
729  }
730
731/*
732 *  This routine is responsible for somehow restarting the currently
733 *  executing task.  If you are lucky, then all that is necessary
734 *  is restoring the context.  Otherwise, there will need to be
735 *  a special assembly routine which does something special in this
736 *  case.  Context_Restore should work most of the time.  It will
737 *  not work if restarting self conflicts with the stack frame
738 *  assumptions of restoring a context.
739 *
740 */
741
742#define _CPU_Context_Restart_self( _the_context ) \
743   _CPU_Context_restore( (_the_context) );
744
745/*
746 *  The purpose of this macro is to allow the initial pointer into
747 *  a floating point context area (used to save the floating point
748 *  context) to be at an arbitrary place in the floating point
749 *  context area.
750 *
751 *  This is necessary because some FP units are designed to have
752 *  their context saved as a stack which grows into lower addresses.
753 *  Other FP units can be saved by simply moving registers into offsets
754 *  from the base of the context area.  Finally some FP units provide
755 *  a "dump context" instruction which could fill in from high to low
756 *  or low to high based on the whim of the CPU designers.
757 *
758 */
759
760#define _CPU_Context_Fp_start( _base, _offset ) \
761   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
762
763/*
764 *  This routine initializes the FP context area passed to it to.
765 *  There are a few standard ways in which to initialize the
766 *  floating point context.  The code included for this macro assumes
767 *  that this is a CPU in which a "initial" FP context was saved into
768 *  _CPU_Null_fp_context and it simply copies it to the destination
769 *  context passed to it.
770 *
771 *  Other models include (1) not doing anything, and (2) putting
772 *  a "null FP status word" in the correct place in the FP context.
773 *
774 */
775
776#define _CPU_Context_Initialize_fp( _destination ) \
777  { \
778   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
779  }
780
781/* end of Context handler macros */
782
783/* Fatal Error manager macros */
784
785/*
786 *  This routine copies _error into a known place -- typically a stack
787 *  location or a register, optionally disables interrupts, and
788 *  halts/stops the CPU.
789 *
790 */
791
792#define _CPU_Fatal_halt( _error ) \
793  { \
794  }
795
796/* end of Fatal Error manager macros */
797
798/* Bitfield handler macros */
799
800/*
801 *  This routine sets _output to the bit number of the first bit
802 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
803 *  This type may be either 16 or 32 bits wide although only the 16
804 *  least significant bits will be used.
805 *
806 *  There are a number of variables in using a "find first bit" type
807 *  instruction.
808 *
809 *    (1) What happens when run on a value of zero?
810 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
811 *    (3) The numbering may be zero or one based.
812 *    (4) The "find first bit" instruction may search from MSB or LSB.
813 *
814 *  RTEMS guarantees that (1) will never happen so it is not a concern.
815 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
816 *  _CPU_Priority_bits_index().  These three form a set of routines
817 *  which must logically operate together.  Bits in the _value are
818 *  set and cleared based on masks built by _CPU_Priority_mask().
819 *  The basic major and minor values calculated by _Priority_Major()
820 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
821 *  to properly range between the values returned by the "find first bit"
822 *  instruction.  This makes it possible for _Priority_Get_highest() to
823 *  calculate the major and directly index into the minor table.
824 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
825 *  is the first bit found.
826 *
827 *  This entire "find first bit" and mapping process depends heavily
828 *  on the manner in which a priority is broken into a major and minor
829 *  components with the major being the 4 MSB of a priority and minor
830 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
831 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
832 *  to the lowest priority.
833 *
834 *  If your CPU does not have a "find first bit" instruction, then
835 *  there are ways to make do without it.  Here are a handful of ways
836 *  to implement this in software:
837 *
838 *    - a series of 16 bit test instructions
839 *    - a "binary search using if's"
840 *    - _number = 0
841 *      if _value > 0x00ff
842 *        _value >>=8
843 *        _number = 8;
844 *
845 *      if _value > 0x0000f
846 *        _value >=8
847 *        _number += 4
848 *
849 *      _number += bit_set_table[ _value ]
850 *
851 *    where bit_set_table[ 16 ] has values which indicate the first
852 *      bit set
853 *
854 */
855
856  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
857#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
858#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
859
860#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
861
862  /* Get a value between 0 and N where N is the bit size */
863  /* This routine makes use of the fact that CPUCFGR defines
864     OB32S to have value 32, and OB64S to have value 64. If
865     this ever changes then this routine will fail. */
866#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
867     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
868                   "l.andi  %0,%0,0x60  \n\t"\
869                   "l.ff1   %1,%1,r0    \n\t"\
870                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
871
872#endif
873   
874/* end of Bitfield handler macros */
875
876/*
877 *  This routine builds the mask which corresponds to the bit fields
878 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
879 *  for that routine.
880 *
881 */
882
883#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
884
885#define _CPU_Priority_Mask( _bit_number ) \
886    (1 << _bit_number)
887
888#endif
889
890/*
891 *  This routine translates the bit numbers returned by
892 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
893 *  a major or minor component of a priority.  See the discussion
894 *  for that routine.
895 *
896 */
897
898#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
899
900#define _CPU_Priority_bits_index( _priority ) \
901  (_priority)
902
903#endif
904
905/* end of Priority handler macros */
906
907/* functions */
908
909/*
910 *  _CPU_Initialize
911 *
912 *  This routine performs CPU dependent initialization.
913 *
914 */
915
916void _CPU_Initialize(
917  rtems_cpu_table  *cpu_table,
918  void      (*thread_dispatch)
919);
920
921/*
922 *  _CPU_ISR_install_raw_handler
923 *
924 *  This routine installs a "raw" interrupt handler directly into the
925 *  processor's vector table.
926 *
927 */
928 
929void _CPU_ISR_install_raw_handler(
930  uint32_t    vector,
931  proc_ptr    new_handler,
932  proc_ptr   *old_handler
933);
934
935/*
936 *  _CPU_ISR_install_vector
937 *
938 *  This routine installs an interrupt vector.
939 *
940 *  NO_CPU Specific Information:
941 *
942 *  XXX document implementation including references if appropriate
943 */
944
945void _CPU_ISR_install_vector(
946  uint32_t    vector,
947  proc_ptr    new_handler,
948  proc_ptr   *old_handler
949);
950
951/*
952 *  _CPU_Install_interrupt_stack
953 *
954 *  This routine installs the hardware interrupt stack pointer.
955 *
956 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
957 *         is TRUE.
958 *
959 */
960
961void _CPU_Install_interrupt_stack( void );
962
963/*
964 *  _CPU_Thread_Idle_body
965 *
966 *  This routine is the CPU dependent IDLE thread body.
967 *
968 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
969 *         is TRUE.
970 *
971 */
972
973void _CPU_Thread_Idle_body( void );
974
975/*
976 *  _CPU_Context_switch
977 *
978 *  This routine switches from the run context to the heir context.
979 *
980 *  Or1k Specific Information:
981 *
982 *  Please see the comments in the .c file for a description of how
983 *  this function works. There are several things to be aware of.
984 */
985
986void _CPU_Context_switch(
987  Context_Control  *run,
988  Context_Control  *heir
989);
990
991/*
992 *  _CPU_Context_restore
993 *
994 *  This routine is generally used only to restart self in an
995 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
996 *
997 *  NOTE: May be unnecessary to reload some registers.
998 *
999 */
1000
1001void _CPU_Context_restore(
1002  Context_Control *new_context
1003);
1004
1005/*
1006 *  _CPU_Context_save_fp
1007 *
1008 *  This routine saves the floating point context passed to it.
1009 *
1010 */
1011
1012void _CPU_Context_save_fp(
1013  void **fp_context_ptr
1014);
1015
1016/*
1017 *  _CPU_Context_restore_fp
1018 *
1019 *  This routine restores the floating point context passed to it.
1020 *
1021 */
1022
1023void _CPU_Context_restore_fp(
1024  void **fp_context_ptr
1025);
1026
1027/*  The following routine swaps the endian format of an unsigned int.
1028 *  It must be static because it is referenced indirectly.
1029 *
1030 *  This version will work on any processor, but if there is a better
1031 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1032 *
1033 *     swap least significant two bytes with 16-bit rotate
1034 *     swap upper and lower 16-bits
1035 *     swap most significant two bytes with 16-bit rotate
1036 *
1037 *  Some CPUs have special instructions which swap a 32-bit quantity in
1038 *  a single instruction (e.g. i486).  It is probably best to avoid
1039 *  an "endian swapping control bit" in the CPU.  One good reason is
1040 *  that interrupts would probably have to be disabled to insure that
1041 *  an interrupt does not try to access the same "chunk" with the wrong
1042 *  endian.  Another good reason is that on some CPUs, the endian bit
1043 *  endianness for ALL fetches -- both code and data -- so the code
1044 *  will be fetched incorrectly.
1045 *
1046 */
1047 
1048static inline uint32_t CPU_swap_u32(
1049  uint32_t value
1050)
1051{
1052  uint32_t   byte1, byte2, byte3, byte4, swapped;
1053 
1054  byte4 = (value >> 24) & 0xff;
1055  byte3 = (value >> 16) & 0xff;
1056  byte2 = (value >> 8)  & 0xff;
1057  byte1 =  value        & 0xff;
1058 
1059  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1060  return( swapped );
1061}
1062
1063#define CPU_swap_u16( value ) \
1064  (((value&0xff) << 8) | ((value >> 8)&0xff))
1065
1066#ifdef __cplusplus
1067}
1068#endif
1069
1070#endif
Note: See TracBrowser for help on using the repository browser.