source: rtems/cpukit/score/cpu/or1k/rtems/score/cpu.h @ 94d45f6

4.115
Last change on this file since 94d45f6 was 94d45f6, checked in by Hesham ALMatary <heshamelmatary@…>, on 08/12/14 at 15:57:42

Add support for OpenRISC - Fixed issues

This work is based on the old or32 port (that has been
removed back in 2005) authored by Chris Ziomkowski. The patch includes the
basic functions every port should implement like: context switch, exception
handling, OpenRISC ABI and machine definitions and configurations.

  • Property mode set to 100644
File size: 31.6 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.com/license/LICENSE.
16 *
17 *  This file adapted from no_cpu example of the RTEMS distribution.
18 *  The body has been modified for the Opencores OR1k implementation by
19 *  Chris Ziomkowski. <chris@asics.ws>
20 *
21 */
22
23#ifndef _OR1K_CPU_H
24#define _OR1K_CPU_H
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30
31#include <rtems/score/or1k.h>            /* pick up machine definitions */
32#include <rtems/score/or1k-utility.h>
33#include <rtems/score/types.h>
34#ifndef ASM
35#include <rtems/bspIo.h>
36#include <stdint.h>
37#include <stdio.h> /* for printk */
38#endif
39
40/* conditional compilation parameters */
41
42/*
43 *  Should the calls to _Thread_Enable_dispatch be inlined?
44 *
45 *  If TRUE, then they are inlined.
46 *  If FALSE, then a subroutine call is made.
47 *
48 *  Basically this is an example of the classic trade-off of size
49 *  versus speed.  Inlining the call (TRUE) typically increases the
50 *  size of RTEMS while speeding up the enabling of dispatching.
51 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
52 *  only be 0 or 1 unless you are in an interrupt handler and that
53 *  interrupt handler invokes the executive.]  When not inlined
54 *  something calls _Thread_Enable_dispatch which in turns calls
55 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
56 *  one subroutine call is avoided entirely.]
57 *
58 */
59
60#define CPU_INLINE_ENABLE_DISPATCH       FALSE
61
62/*
63 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
64 *  be unrolled one time?  In unrolled each iteration of the loop examines
65 *  two "nodes" on the chain being searched.  Otherwise, only one node
66 *  is examined per iteration.
67 *
68 *  If TRUE, then the loops are unrolled.
69 *  If FALSE, then the loops are not unrolled.
70 *
71 *  The primary factor in making this decision is the cost of disabling
72 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
73 *  body of the loop.  On some CPUs, the flash is more expensive than
74 *  one iteration of the loop body.  In this case, it might be desirable
75 *  to unroll the loop.  It is important to note that on some CPUs, this
76 *  code is the longest interrupt disable period in RTEMS.  So it is
77 *  necessary to strike a balance when setting this parameter.
78 *
79 */
80
81#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
82
83/*
84 *  Does RTEMS manage a dedicated interrupt stack in software?
85 *
86 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
87 *  If FALSE, nothing is done.
88 *
89 *  If the CPU supports a dedicated interrupt stack in hardware,
90 *  then it is generally the responsibility of the BSP to allocate it
91 *  and set it up.
92 *
93 *  If the CPU does not support a dedicated interrupt stack, then
94 *  the porter has two options: (1) execute interrupts on the
95 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
96 *  interrupt stack.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 *
106 *  Currently, for or1k port, _ISR_Handler is responsible for switching to
107 *  RTEMS dedicated interrupt task.
108 *
109 */
110
111#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
112
113/*
114 *  Does this CPU have hardware support for a dedicated interrupt stack?
115 *
116 *  If TRUE, then it must be installed during initialization.
117 *  If FALSE, then no installation is performed.
118 *
119 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
120 *
121 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
122 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
123 *  possible that both are FALSE for a particular CPU.  Although it
124 *  is unclear what that would imply about the interrupt processing
125 *  procedure on that CPU.
126 *
127 */
128
129#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
130
131/*
132 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
133 *
134 *  If TRUE, then the memory is allocated during initialization.
135 *  If FALSE, then the memory is allocated during initialization.
136 *
137 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
138 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
139 *
140 */
141
142#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
143
144/*
145 *  Does the RTEMS invoke the user's ISR with the vector number and
146 *  a pointer to the saved interrupt frame (1) or just the vector
147 *  number (0)?
148 *
149 */
150
151#define CPU_ISR_PASSES_FRAME_POINTER 1
152
153/*
154 *  Does the CPU have hardware floating point?
155 *
156 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
157 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
158 *
159 *  If there is a FP coprocessor such as the i387 or mc68881, then
160 *  the answer is TRUE.
161 *
162 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
163 *  It indicates whether or not this CPU model has FP support.  For
164 *  example, it would be possible to have an i386_nofp CPU model
165 *  which set this to false to indicate that you have an i386 without
166 *  an i387 and wish to leave floating point support out of RTEMS.
167 *
168 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
169 *  is software implemented floating point that must be context
170 *  switched.  The determination of whether or not this applies
171 *  is very tool specific and the state saved/restored is also
172 *  compiler specific.
173 *
174 *  Or1k Specific Information:
175 *
176 *  At this time there are no implementations of Or1k that are
177 *  expected to implement floating point. More importantly, the
178 *  floating point architecture is expected to change significantly
179 *  before such chips are fabricated.
180 */
181
182#define CPU_HARDWARE_FP     FALSE
183#define CPU_SOFTWARE_FP     FALSE
184
185/*
186 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
187 *
188 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
189 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
190 *
191 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
192 *
193 */
194
195#define CPU_ALL_TASKS_ARE_FP     FALSE
196
197/*
198 *  Should the IDLE task have a floating point context?
199 *
200 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
201 *  and it has a floating point context which is switched in and out.
202 *  If FALSE, then the IDLE task does not have a floating point context.
203 *
204 *  Setting this to TRUE negatively impacts the time required to preempt
205 *  the IDLE task from an interrupt because the floating point context
206 *  must be saved as part of the preemption.
207 *
208 */
209
210#define CPU_IDLE_TASK_IS_FP      FALSE
211
212/*
213 *  Should the saving of the floating point registers be deferred
214 *  until a context switch is made to another different floating point
215 *  task?
216 *
217 *  If TRUE, then the floating point context will not be stored until
218 *  necessary.  It will remain in the floating point registers and not
219 *  disturned until another floating point task is switched to.
220 *
221 *  If FALSE, then the floating point context is saved when a floating
222 *  point task is switched out and restored when the next floating point
223 *  task is restored.  The state of the floating point registers between
224 *  those two operations is not specified.
225 *
226 *  If the floating point context does NOT have to be saved as part of
227 *  interrupt dispatching, then it should be safe to set this to TRUE.
228 *
229 *  Setting this flag to TRUE results in using a different algorithm
230 *  for deciding when to save and restore the floating point context.
231 *  The deferred FP switch algorithm minimizes the number of times
232 *  the FP context is saved and restored.  The FP context is not saved
233 *  until a context switch is made to another, different FP task.
234 *  Thus in a system with only one FP task, the FP context will never
235 *  be saved or restored.
236 *
237 */
238
239#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
240
241/*
242 *  Does this port provide a CPU dependent IDLE task implementation?
243 *
244 *  If TRUE, then the routine _CPU_Thread_Idle_body
245 *  must be provided and is the default IDLE thread body instead of
246 *  _CPU_Thread_Idle_body.
247 *
248 *  If FALSE, then use the generic IDLE thread body if the BSP does
249 *  not provide one.
250 *
251 *  This is intended to allow for supporting processors which have
252 *  a low power or idle mode.  When the IDLE thread is executed, then
253 *  the CPU can be powered down.
254 *
255 *  The order of precedence for selecting the IDLE thread body is:
256 *
257 *    1.  BSP provided
258 *    2.  CPU dependent (if provided)
259 *    3.  generic (if no BSP and no CPU dependent)
260 *
261 */
262
263#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
264
265/*
266 *  Does the stack grow up (toward higher addresses) or down
267 *  (toward lower addresses)?
268 *
269 *  If TRUE, then the grows upward.
270 *  If FALSE, then the grows toward smaller addresses.
271 *
272 */
273
274#define CPU_STACK_GROWS_UP               FALSE
275
276/*
277 *  The following is the variable attribute used to force alignment
278 *  of critical RTEMS structures.  On some processors it may make
279 *  sense to have these aligned on tighter boundaries than
280 *  the minimum requirements of the compiler in order to have as
281 *  much of the critical data area as possible in a cache line.
282 *
283 *  The placement of this macro in the declaration of the variables
284 *  is based on the syntactically requirements of the GNU C
285 *  "__attribute__" extension.  For example with GNU C, use
286 *  the following to force a structures to a 32 byte boundary.
287 *
288 *      __attribute__ ((aligned (32)))
289 *
290 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
291 *         To benefit from using this, the data must be heavily
292 *         used so it will stay in the cache and used frequently enough
293 *         in the executive to justify turning this on.
294 *
295 */
296
297#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
298
299/*
300 *  Define what is required to specify how the network to host conversion
301 *  routines are handled.
302 *
303 *  Or1k Specific Information:
304 *
305 *  This version of RTEMS is designed specifically to run with
306 *  big endian architectures. If you want little endian, you'll
307 *  have to make the appropriate adjustments here and write
308 *  efficient routines for byte swapping. The Or1k architecture
309 *  doesn't do this very well.
310 */
311
312#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
313#define CPU_BIG_ENDIAN                           TRUE
314#define CPU_LITTLE_ENDIAN                        FALSE
315
316/*
317 *  The following defines the number of bits actually used in the
318 *  interrupt field of the task mode.  How those bits map to the
319 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
320 *
321 */
322
323#define CPU_MODES_INTERRUPT_MASK   0x00000001
324
325/*
326 *  Processor defined structures required for cpukit/score.
327 */
328
329
330/*
331 * Contexts
332 *
333 *  Generally there are 2 types of context to save.
334 *     1. Interrupt registers to save
335 *     2. Task level registers to save
336 *
337 *  This means we have the following 3 context items:
338 *     1. task level context stuff::  Context_Control
339 *     2. floating point task stuff:: Context_Control_fp
340 *     3. special interrupt level context :: Context_Control_interrupt
341 *
342 *  On some processors, it is cost-effective to save only the callee
343 *  preserved registers during a task context switch.  This means
344 *  that the ISR code needs to save those registers which do not
345 *  persist across function calls.  It is not mandatory to make this
346 *  distinctions between the caller/callee saves registers for the
347 *  purpose of minimizing context saved during task switch and on interrupts.
348 *  If the cost of saving extra registers is minimal, simplicity is the
349 *  choice.  Save the same context on interrupt entry as for tasks in
350 *  this case.
351 *
352 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
353 *  care should be used in designing the context area.
354 *
355 *  On some CPUs with hardware floating point support, the Context_Control_fp
356 *  structure will not be used or it simply consist of an array of a
357 *  fixed number of bytes.   This is done when the floating point context
358 *  is dumped by a "FP save context" type instruction and the format
359 *  is not really defined by the CPU.  In this case, there is no need
360 *  to figure out the exact format -- only the size.  Of course, although
361 *  this is enough information for RTEMS, it is probably not enough for
362 *  a debugger such as gdb.  But that is another problem.
363 *
364 *
365 */
366#ifndef ASM
367#ifdef OR1K_64BIT_ARCH
368#define or1kreg uint64_t
369#else
370#define or1kreg uint32_t
371#endif
372
373typedef struct {
374  uint32_t  r1;     /* Stack pointer */
375  uint32_t  r2;     /* Frame pointer */
376  uint32_t  r3;
377  uint32_t  r4;
378  uint32_t  r5;
379  uint32_t  r6;
380  uint32_t  r7;
381  uint32_t  r8;
382  uint32_t  r9;
383  uint32_t  r10;
384  uint32_t  r11;
385  uint32_t  r12;
386  uint32_t  r13;
387  uint32_t  r14;
388  uint32_t  r15;
389  uint32_t  r16;
390  uint32_t  r17;
391  uint32_t  r18;
392  uint32_t  r19;
393  uint32_t  r20;
394  uint32_t  r21;
395  uint32_t  r22;
396  uint32_t  r23;
397  uint32_t  r24;
398  uint32_t  r25;
399  uint32_t  r26;
400  uint32_t  r27;
401  uint32_t  r28;
402  uint32_t  r29;
403  uint32_t  r30;
404  uint32_t  r31;
405
406  uint32_t  sr;  /* Current supervision register non persistent values */
407  uint32_t  epcr;
408  uint32_t  eear;
409  uint32_t  esr;
410} Context_Control;
411
412#define _CPU_Context_Get_SP( _context ) \
413  (_context)->r1
414
415typedef struct {
416  /** FPU registers are listed here */
417  double      some_float_register;
418} Context_Control_fp;
419
420typedef Context_Control CPU_Interrupt_frame;
421
422/*
423 *  The size of the floating point context area.  On some CPUs this
424 *  will not be a "sizeof" because the format of the floating point
425 *  area is not defined -- only the size is.  This is usually on
426 *  CPUs with a "floating point save context" instruction.
427 *
428 *  Or1k Specific Information:
429 *
430 */
431
432#define CPU_CONTEXT_FP_SIZE  0
433SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
434
435/*
436 *  Amount of extra stack (above minimum stack size) required by
437 *  MPCI receive server thread.  Remember that in a multiprocessor
438 *  system this thread must exist and be able to process all directives.
439 *
440 */
441
442#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
443
444/*
445 *  Should be large enough to run all RTEMS tests.  This insures
446 *  that a "reasonable" small application should not have any problems.
447 *
448 */
449
450#define CPU_STACK_MINIMUM_SIZE  4096
451
452/*
453 *  CPU's worst alignment requirement for data types on a byte boundary.  This
454 *  alignment does not take into account the requirements for the stack.
455 *
456 */
457
458#define CPU_ALIGNMENT  8
459
460/*
461 *  This is defined if the port has a special way to report the ISR nesting
462 *  level.  Most ports maintain the variable _ISR_Nest_level.
463 */
464#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
465
466/**
467 * Size of a pointer.
468 *
469 * This must be an integer literal that can be used by the assembler.  This
470 * value will be used to calculate offsets of structure members.  These
471 * offsets will be used in assembler code.
472 */
473#define CPU_SIZEOF_POINTER         4
474
475/*
476 *  This number corresponds to the byte alignment requirement for the
477 *  heap handler.  This alignment requirement may be stricter than that
478 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
479 *  common for the heap to follow the same alignment requirement as
480 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
481 *  then this should be set to CPU_ALIGNMENT.
482 *
483 *  NOTE:  This does not have to be a power of 2 although it should be
484 *         a multiple of 2 greater than or equal to 2.  The requirement
485 *         to be a multiple of 2 is because the heap uses the least
486 *         significant field of the front and back flags to indicate
487 *         that a block is in use or free.  So you do not want any odd
488 *         length blocks really putting length data in that bit.
489 *
490 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
491 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
492 *         elements allocated from the heap meet all restrictions.
493 *
494 */
495
496#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
497
498/*
499 *  This number corresponds to the byte alignment requirement for memory
500 *  buffers allocated by the partition manager.  This alignment requirement
501 *  may be stricter than that for the data types alignment specified by
502 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
503 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
504 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
505 *
506 *  NOTE:  This does not have to be a power of 2.  It does have to
507 *         be greater or equal to than CPU_ALIGNMENT.
508 *
509 */
510
511#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
512
513/*
514 *  This number corresponds to the byte alignment requirement for the
515 *  stack.  This alignment requirement may be stricter than that for the
516 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
517 *  is strict enough for the stack, then this should be set to 0.
518 *
519 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
520 *
521 */
522
523#define CPU_STACK_ALIGNMENT        0
524
525/* ISR handler macros */
526
527/*
528 *  Support routine to initialize the RTEMS vector table after it is allocated.
529 *
530 *  NO_CPU Specific Information:
531 *
532 *  XXX document implementation including references if appropriate
533 */
534
535#define _CPU_Initialize_vectors()
536
537/*
538 *  Disable all interrupts for an RTEMS critical section.  The previous
539 *  level is returned in _level.
540 *
541 */
542
543static inline uint32_t or1k_interrupt_disable( void )
544{
545  uint32_t sr;
546  sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
547
548  _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_IEE));
549
550  return sr;
551}
552
553static inline void or1k_interrupt_enable(uint32_t level)
554{
555  uint32_t sr;
556
557  /* Enable interrupts and restore rs */
558  sr = level | CPU_OR1K_SPR_SR_IEE | CPU_OR1K_SPR_SR_TEE;
559  _OR1K_mtspr(CPU_OR1K_SPR_SR, sr);
560
561}
562
563#define _CPU_ISR_Disable( _level ) \
564    _level = or1k_interrupt_disable()
565
566
567/*
568 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
569 *  This indicates the end of an RTEMS critical section.  The parameter
570 *  _level is not modified.
571 *
572 */
573
574#define _CPU_ISR_Enable( _level )  \
575  or1k_interrupt_enable( _level )
576
577/*
578 *  This temporarily restores the interrupt to _level before immediately
579 *  disabling them again.  This is used to divide long RTEMS critical
580 *  sections into two or more parts.  The parameter _level is not
581 *  modified.
582 *
583 */
584
585#define _CPU_ISR_Flash( _level ) \
586  do{ \
587      _CPU_ISR_Enable( _level ); \
588      _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
589    } while(0)
590
591/*
592 *  Map interrupt level in task mode onto the hardware that the CPU
593 *  actually provides.  Currently, interrupt levels which do not
594 *  map onto the CPU in a generic fashion are undefined.  Someday,
595 *  it would be nice if these were "mapped" by the application
596 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
597 *  8 - 255 would be available for bsp/application specific meaning.
598 *  This could be used to manage a programmable interrupt controller
599 *  via the rtems_task_mode directive.
600 *
601 *  The get routine usually must be implemented as a subroutine.
602 *
603 */
604
605void _CPU_ISR_Set_level( uint32_t level );
606
607uint32_t _CPU_ISR_Get_level( void );
608
609/* end of ISR handler macros */
610
611/* Context handler macros */
612
613#define OR1K_FAST_CONTEXT_SWITCH_ENABLED FALSE
614/*
615 *  Initialize the context to a state suitable for starting a
616 *  task after a context restore operation.  Generally, this
617 *  involves:
618 *
619 *     - setting a starting address
620 *     - preparing the stack
621 *     - preparing the stack and frame pointers
622 *     - setting the proper interrupt level in the context
623 *     - initializing the floating point context
624 *
625 *  This routine generally does not set any unnecessary register
626 *  in the context.  The state of the "general data" registers is
627 *  undefined at task start time.
628 *
629 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
630 *        point thread.  This is typically only used on CPUs where the
631 *        FPU may be easily disabled by software such as on the SPARC
632 *        where the PSR contains an enable FPU bit.
633 *
634 */
635
636/**
637 * @brief Initializes the CPU context.
638 *
639 * The following steps are performed:
640 *  - setting a starting address
641 *  - preparing the stack
642 *  - preparing the stack and frame pointers
643 *  - setting the proper interrupt level in the context
644 *
645 * @param[in] context points to the context area
646 * @param[in] stack_area_begin is the low address of the allocated stack area
647 * @param[in] stack_area_size is the size of the stack area in bytes
648 * @param[in] new_level is the interrupt level for the task
649 * @param[in] entry_point is the task's entry point
650 * @param[in] is_fp is set to @c true if the task is a floating point task
651 * @param[in] tls_area is the thread-local storage (TLS) area
652 */
653void _CPU_Context_Initialize(
654  Context_Control *context,
655  void *stack_area_begin,
656  size_t stack_area_size,
657  uint32_t new_level,
658  void (*entry_point)( void ),
659  bool is_fp,
660  void *tls_area
661);
662
663/*
664 *  This routine is responsible for somehow restarting the currently
665 *  executing task.  If you are lucky, then all that is necessary
666 *  is restoring the context.  Otherwise, there will need to be
667 *  a special assembly routine which does something special in this
668 *  case.  Context_Restore should work most of the time.  It will
669 *  not work if restarting self conflicts with the stack frame
670 *  assumptions of restoring a context.
671 *
672 */
673
674#define _CPU_Context_Restart_self( _the_context ) \
675   _CPU_Context_restore( (_the_context) );
676
677/*
678 *  The purpose of this macro is to allow the initial pointer into
679 *  a floating point context area (used to save the floating point
680 *  context) to be at an arbitrary place in the floating point
681 *  context area.
682 *
683 *  This is necessary because some FP units are designed to have
684 *  their context saved as a stack which grows into lower addresses.
685 *  Other FP units can be saved by simply moving registers into offsets
686 *  from the base of the context area.  Finally some FP units provide
687 *  a "dump context" instruction which could fill in from high to low
688 *  or low to high based on the whim of the CPU designers.
689 *
690 */
691
692#define _CPU_Context_Fp_start( _base, _offset ) \
693   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
694
695/*
696 *  This routine initializes the FP context area passed to it to.
697 *  There are a few standard ways in which to initialize the
698 *  floating point context.  The code included for this macro assumes
699 *  that this is a CPU in which a "initial" FP context was saved into
700 *  _CPU_Null_fp_context and it simply copies it to the destination
701 *  context passed to it.
702 *
703 *  Other models include (1) not doing anything, and (2) putting
704 *  a "null FP status word" in the correct place in the FP context.
705 *
706 */
707
708#define _CPU_Context_Initialize_fp( _destination ) \
709  { \
710   *(*(_destination)) = _CPU_Null_fp_context; \
711  }
712
713/* end of Context handler macros */
714
715/* Fatal Error manager macros */
716
717/*
718 *  This routine copies _error into a known place -- typically a stack
719 *  location or a register, optionally disables interrupts, and
720 *  halts/stops the CPU.
721 *
722 */
723
724#define _CPU_Fatal_halt( _error ) \
725        printk("Fatal Error %d Halted\n",_error); \
726        for(;;)
727
728/* end of Fatal Error manager macros */
729
730/* Bitfield handler macros */
731
732/*
733 *  This routine sets _output to the bit number of the first bit
734 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
735 *  This type may be either 16 or 32 bits wide although only the 16
736 *  least significant bits will be used.
737 *
738 *  There are a number of variables in using a "find first bit" type
739 *  instruction.
740 *
741 *    (1) What happens when run on a value of zero?
742 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
743 *    (3) The numbering may be zero or one based.
744 *    (4) The "find first bit" instruction may search from MSB or LSB.
745 *
746 *  RTEMS guarantees that (1) will never happen so it is not a concern.
747 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
748 *  _CPU_Priority_bits_index().  These three form a set of routines
749 *  which must logically operate together.  Bits in the _value are
750 *  set and cleared based on masks built by _CPU_Priority_mask().
751 *  The basic major and minor values calculated by _Priority_Major()
752 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
753 *  to properly range between the values returned by the "find first bit"
754 *  instruction.  This makes it possible for _Priority_Get_highest() to
755 *  calculate the major and directly index into the minor table.
756 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
757 *  is the first bit found.
758 *
759 *  This entire "find first bit" and mapping process depends heavily
760 *  on the manner in which a priority is broken into a major and minor
761 *  components with the major being the 4 MSB of a priority and minor
762 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
763 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
764 *  to the lowest priority.
765 *
766 *  If your CPU does not have a "find first bit" instruction, then
767 *  there are ways to make do without it.  Here are a handful of ways
768 *  to implement this in software:
769 *
770 *    - a series of 16 bit test instructions
771 *    - a "binary search using if's"
772 *    - _number = 0
773 *      if _value > 0x00ff
774 *        _value >>=8
775 *        _number = 8;
776 *
777 *      if _value > 0x0000f
778 *        _value >=8
779 *        _number += 4
780 *
781 *      _number += bit_set_table[ _value ]
782 *
783 *    where bit_set_table[ 16 ] has values which indicate the first
784 *      bit set
785 *
786 */
787
788  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
789#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
790#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
791
792#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
793
794  /* Get a value between 0 and N where N is the bit size */
795  /* This routine makes use of the fact that CPUCFGR defines
796     OB32S to have value 32, and OB64S to have value 64. If
797     this ever changes then this routine will fail. */
798#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
799     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
800                   "l.andi  %0,%0,0x60  \n\t"\
801                   "l.ff1   %1,%1,r0    \n\t"\
802                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
803
804#endif
805
806/* end of Bitfield handler macros */
807
808/*
809 *  This routine builds the mask which corresponds to the bit fields
810 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
811 *  for that routine.
812 *
813 */
814
815#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
816
817#define _CPU_Priority_Mask( _bit_number ) \
818    (1 << _bit_number)
819
820#endif
821
822/*
823 *  This routine translates the bit numbers returned by
824 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
825 *  a major or minor component of a priority.  See the discussion
826 *  for that routine.
827 *
828 */
829
830#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
831
832#define _CPU_Priority_bits_index( _priority ) \
833  (_priority)
834
835#endif
836
837#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
838#define CPU_TIMESTAMP_USE_INT64 TRUE
839#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
840
841typedef struct {
842/* There is no CPU specific per-CPU state */
843} CPU_Per_CPU_control;
844#endif /* ASM */
845
846#define CPU_SIZEOF_POINTER 4
847#define CPU_PER_CPU_CONTROL_SIZE 0
848
849#ifndef ASM
850typedef uint32_t CPU_Counter_ticks;
851typedef uint16_t Priority_bit_map_Word;
852
853typedef struct {
854  uint32_t r[32];
855
856  /* The following registers must be saved if we have
857  fast context switch disabled and nested interrupt
858  levels are enabled.
859  */
860#if !OR1K_FAST_CONTEXT_SWITCH_ENABLED
861  uint32_t epcr; /* exception PC register */
862  uint32_t eear; /* exception effective address register */
863  uint32_t esr; /* exception supervision register */
864#endif
865
866} CPU_Exception_frame;
867
868/**
869 * @brief Prints the exception frame via printk().
870 *
871 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
872 */
873void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
874
875
876/* end of Priority handler macros */
877
878/* functions */
879
880/*
881 *  _CPU_Initialize
882 *
883 *  This routine performs CPU dependent initialization.
884 *
885 */
886
887void _CPU_Initialize(
888  void
889);
890
891/*
892 *  _CPU_ISR_install_raw_handler
893 *
894 *  This routine installs a "raw" interrupt handler directly into the
895 *  processor's vector table.
896 *
897 */
898
899void _CPU_ISR_install_raw_handler(
900  uint32_t    vector,
901  proc_ptr    new_handler,
902  proc_ptr   *old_handler
903);
904
905/*
906 *  _CPU_ISR_install_vector
907 *
908 *  This routine installs an interrupt vector.
909 *
910 *  NO_CPU Specific Information:
911 *
912 *  XXX document implementation including references if appropriate
913 */
914
915void _CPU_ISR_install_vector(
916  uint32_t    vector,
917  proc_ptr   new_handler,
918  proc_ptr   *old_handler
919);
920
921/*
922 *  _CPU_Install_interrupt_stack
923 *
924 *  This routine installs the hardware interrupt stack pointer.
925 *
926 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
927 *         is TRUE.
928 *
929 */
930
931void _CPU_Install_interrupt_stack( void );
932
933/*
934 *  _CPU_Thread_Idle_body
935 *
936 *  This routine is the CPU dependent IDLE thread body.
937 *
938 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
939 *         is TRUE.
940 *
941 */
942
943void _CPU_Thread_Idle_body( void );
944
945/*
946 *  _CPU_Context_switch
947 *
948 *  This routine switches from the run context to the heir context.
949 *
950 *  Or1k Specific Information:
951 *
952 *  Please see the comments in the .c file for a description of how
953 *  this function works. There are several things to be aware of.
954 */
955
956void _CPU_Context_switch(
957  Context_Control  *run,
958  Context_Control  *heir
959);
960
961/*
962 *  _CPU_Context_restore
963 *
964 *  This routine is generally used only to restart self in an
965 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
966 *
967 *  NOTE: May be unnecessary to reload some registers.
968 *
969 */
970
971void _CPU_Context_restore(
972  Context_Control *new_context
973);
974
975/*
976 *  _CPU_Context_save_fp
977 *
978 *  This routine saves the floating point context passed to it.
979 *
980 */
981
982void _CPU_Context_save_fp(
983  void **fp_context_ptr
984);
985
986/*
987 *  _CPU_Context_restore_fp
988 *
989 *  This routine restores the floating point context passed to it.
990 *
991 */
992
993void _CPU_Context_restore_fp(
994  void **fp_context_ptr
995);
996
997/*  The following routine swaps the endian format of an unsigned int.
998 *  It must be static because it is referenced indirectly.
999 *
1000 *  This version will work on any processor, but if there is a better
1001 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1002 *
1003 *     swap least significant two bytes with 16-bit rotate
1004 *     swap upper and lower 16-bits
1005 *     swap most significant two bytes with 16-bit rotate
1006 *
1007 *  Some CPUs have special instructions which swap a 32-bit quantity in
1008 *  a single instruction (e.g. i486).  It is probably best to avoid
1009 *  an "endian swapping control bit" in the CPU.  One good reason is
1010 *  that interrupts would probably have to be disabled to insure that
1011 *  an interrupt does not try to access the same "chunk" with the wrong
1012 *  endian.  Another good reason is that on some CPUs, the endian bit
1013 *  endianness for ALL fetches -- both code and data -- so the code
1014 *  will be fetched incorrectly.
1015 *
1016 */
1017
1018static inline unsigned int CPU_swap_u32(
1019  unsigned int value
1020)
1021{
1022  uint32_t   byte1, byte2, byte3, byte4, swapped;
1023
1024  byte4 = (value >> 24) & 0xff;
1025  byte3 = (value >> 16) & 0xff;
1026  byte2 = (value >> 8)  & 0xff;
1027  byte1 =  value        & 0xff;
1028
1029  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1030  return( swapped );
1031}
1032
1033#define CPU_swap_u16( value ) \
1034  (((value&0xff) << 8) | ((value >> 8)&0xff))
1035
1036typedef uint32_t CPU_Counter_ticks;
1037
1038CPU_Counter_ticks _CPU_Counter_read( void );
1039
1040CPU_Counter_ticks _CPU_Counter_difference(
1041  CPU_Counter_ticks second,
1042  CPU_Counter_ticks first
1043);
1044
1045#endif /* ASM */
1046
1047#ifdef __cplusplus
1048}
1049#endif
1050
1051#endif
Note: See TracBrowser for help on using the repository browser.