source: rtems/cpukit/score/cpu/or1k/rtems/score/cpu.h @ 18d0a49

4.115
Last change on this file since 18d0a49 was 8ac3549, checked in by Sebastian Huber <sebastian.huber@…>, on 03/04/15 at 15:13:49

score: Delete unused CPU_UNROLL_ENQUEUE_PRIORITY

  • Property mode set to 100644
File size: 30.7 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.com/license/LICENSE.
16 *
17 *  This file adapted from no_cpu example of the RTEMS distribution.
18 *  The body has been modified for the Opencores OR1k implementation by
19 *  Chris Ziomkowski. <chris@asics.ws>
20 *
21 */
22
23#ifndef _OR1K_CPU_H
24#define _OR1K_CPU_H
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30
31#include <rtems/score/or1k.h>            /* pick up machine definitions */
32#include <rtems/score/or1k-utility.h>
33#include <rtems/score/types.h>
34#ifndef ASM
35#include <rtems/bspIo.h>
36#include <stdint.h>
37#include <stdio.h> /* for printk */
38#endif
39
40/* conditional compilation parameters */
41
42/*
43 *  Should the calls to _Thread_Enable_dispatch be inlined?
44 *
45 *  If TRUE, then they are inlined.
46 *  If FALSE, then a subroutine call is made.
47 *
48 *  Basically this is an example of the classic trade-off of size
49 *  versus speed.  Inlining the call (TRUE) typically increases the
50 *  size of RTEMS while speeding up the enabling of dispatching.
51 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
52 *  only be 0 or 1 unless you are in an interrupt handler and that
53 *  interrupt handler invokes the executive.]  When not inlined
54 *  something calls _Thread_Enable_dispatch which in turns calls
55 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
56 *  one subroutine call is avoided entirely.]
57 *
58 */
59
60#define CPU_INLINE_ENABLE_DISPATCH       FALSE
61
62/*
63 *  Does RTEMS manage a dedicated interrupt stack in software?
64 *
65 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
66 *  If FALSE, nothing is done.
67 *
68 *  If the CPU supports a dedicated interrupt stack in hardware,
69 *  then it is generally the responsibility of the BSP to allocate it
70 *  and set it up.
71 *
72 *  If the CPU does not support a dedicated interrupt stack, then
73 *  the porter has two options: (1) execute interrupts on the
74 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
75 *  interrupt stack.
76 *
77 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
78 *
79 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
80 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
81 *  possible that both are FALSE for a particular CPU.  Although it
82 *  is unclear what that would imply about the interrupt processing
83 *  procedure on that CPU.
84 *
85 *  Currently, for or1k port, _ISR_Handler is responsible for switching to
86 *  RTEMS dedicated interrupt task.
87 *
88 */
89
90#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
91
92/*
93 *  Does this CPU have hardware support for a dedicated interrupt stack?
94 *
95 *  If TRUE, then it must be installed during initialization.
96 *  If FALSE, then no installation is performed.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 *
106 */
107
108#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
109
110/*
111 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
112 *
113 *  If TRUE, then the memory is allocated during initialization.
114 *  If FALSE, then the memory is allocated during initialization.
115 *
116 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
117 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
118 *
119 */
120
121#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
122
123/*
124 *  Does the RTEMS invoke the user's ISR with the vector number and
125 *  a pointer to the saved interrupt frame (1) or just the vector
126 *  number (0)?
127 *
128 */
129
130#define CPU_ISR_PASSES_FRAME_POINTER 1
131
132/*
133 *  Does the CPU have hardware floating point?
134 *
135 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
136 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
137 *
138 *  If there is a FP coprocessor such as the i387 or mc68881, then
139 *  the answer is TRUE.
140 *
141 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
142 *  It indicates whether or not this CPU model has FP support.  For
143 *  example, it would be possible to have an i386_nofp CPU model
144 *  which set this to false to indicate that you have an i386 without
145 *  an i387 and wish to leave floating point support out of RTEMS.
146 *
147 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
148 *  is software implemented floating point that must be context
149 *  switched.  The determination of whether or not this applies
150 *  is very tool specific and the state saved/restored is also
151 *  compiler specific.
152 *
153 *  Or1k Specific Information:
154 *
155 *  At this time there are no implementations of Or1k that are
156 *  expected to implement floating point. More importantly, the
157 *  floating point architecture is expected to change significantly
158 *  before such chips are fabricated.
159 */
160
161#define CPU_HARDWARE_FP     FALSE
162#define CPU_SOFTWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 *
172 */
173
174#define CPU_ALL_TASKS_ARE_FP     FALSE
175
176/*
177 *  Should the IDLE task have a floating point context?
178 *
179 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
180 *  and it has a floating point context which is switched in and out.
181 *  If FALSE, then the IDLE task does not have a floating point context.
182 *
183 *  Setting this to TRUE negatively impacts the time required to preempt
184 *  the IDLE task from an interrupt because the floating point context
185 *  must be saved as part of the preemption.
186 *
187 */
188
189#define CPU_IDLE_TASK_IS_FP      FALSE
190
191/*
192 *  Should the saving of the floating point registers be deferred
193 *  until a context switch is made to another different floating point
194 *  task?
195 *
196 *  If TRUE, then the floating point context will not be stored until
197 *  necessary.  It will remain in the floating point registers and not
198 *  disturned until another floating point task is switched to.
199 *
200 *  If FALSE, then the floating point context is saved when a floating
201 *  point task is switched out and restored when the next floating point
202 *  task is restored.  The state of the floating point registers between
203 *  those two operations is not specified.
204 *
205 *  If the floating point context does NOT have to be saved as part of
206 *  interrupt dispatching, then it should be safe to set this to TRUE.
207 *
208 *  Setting this flag to TRUE results in using a different algorithm
209 *  for deciding when to save and restore the floating point context.
210 *  The deferred FP switch algorithm minimizes the number of times
211 *  the FP context is saved and restored.  The FP context is not saved
212 *  until a context switch is made to another, different FP task.
213 *  Thus in a system with only one FP task, the FP context will never
214 *  be saved or restored.
215 *
216 */
217
218#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
219
220/*
221 *  Does this port provide a CPU dependent IDLE task implementation?
222 *
223 *  If TRUE, then the routine _CPU_Thread_Idle_body
224 *  must be provided and is the default IDLE thread body instead of
225 *  _CPU_Thread_Idle_body.
226 *
227 *  If FALSE, then use the generic IDLE thread body if the BSP does
228 *  not provide one.
229 *
230 *  This is intended to allow for supporting processors which have
231 *  a low power or idle mode.  When the IDLE thread is executed, then
232 *  the CPU can be powered down.
233 *
234 *  The order of precedence for selecting the IDLE thread body is:
235 *
236 *    1.  BSP provided
237 *    2.  CPU dependent (if provided)
238 *    3.  generic (if no BSP and no CPU dependent)
239 *
240 */
241
242#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
243
244/*
245 *  Does the stack grow up (toward higher addresses) or down
246 *  (toward lower addresses)?
247 *
248 *  If TRUE, then the grows upward.
249 *  If FALSE, then the grows toward smaller addresses.
250 *
251 */
252
253#define CPU_STACK_GROWS_UP               FALSE
254
255/*
256 *  The following is the variable attribute used to force alignment
257 *  of critical RTEMS structures.  On some processors it may make
258 *  sense to have these aligned on tighter boundaries than
259 *  the minimum requirements of the compiler in order to have as
260 *  much of the critical data area as possible in a cache line.
261 *
262 *  The placement of this macro in the declaration of the variables
263 *  is based on the syntactically requirements of the GNU C
264 *  "__attribute__" extension.  For example with GNU C, use
265 *  the following to force a structures to a 32 byte boundary.
266 *
267 *      __attribute__ ((aligned (32)))
268 *
269 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
270 *         To benefit from using this, the data must be heavily
271 *         used so it will stay in the cache and used frequently enough
272 *         in the executive to justify turning this on.
273 *
274 */
275
276#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
277
278/*
279 *  Define what is required to specify how the network to host conversion
280 *  routines are handled.
281 *
282 *  Or1k Specific Information:
283 *
284 *  This version of RTEMS is designed specifically to run with
285 *  big endian architectures. If you want little endian, you'll
286 *  have to make the appropriate adjustments here and write
287 *  efficient routines for byte swapping. The Or1k architecture
288 *  doesn't do this very well.
289 */
290
291#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
292#define CPU_BIG_ENDIAN                           TRUE
293#define CPU_LITTLE_ENDIAN                        FALSE
294
295/*
296 *  The following defines the number of bits actually used in the
297 *  interrupt field of the task mode.  How those bits map to the
298 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
299 *
300 */
301
302#define CPU_MODES_INTERRUPT_MASK   0x00000001
303
304/*
305 *  Processor defined structures required for cpukit/score.
306 */
307
308
309/*
310 * Contexts
311 *
312 *  Generally there are 2 types of context to save.
313 *     1. Interrupt registers to save
314 *     2. Task level registers to save
315 *
316 *  This means we have the following 3 context items:
317 *     1. task level context stuff::  Context_Control
318 *     2. floating point task stuff:: Context_Control_fp
319 *     3. special interrupt level context :: Context_Control_interrupt
320 *
321 *  On some processors, it is cost-effective to save only the callee
322 *  preserved registers during a task context switch.  This means
323 *  that the ISR code needs to save those registers which do not
324 *  persist across function calls.  It is not mandatory to make this
325 *  distinctions between the caller/callee saves registers for the
326 *  purpose of minimizing context saved during task switch and on interrupts.
327 *  If the cost of saving extra registers is minimal, simplicity is the
328 *  choice.  Save the same context on interrupt entry as for tasks in
329 *  this case.
330 *
331 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
332 *  care should be used in designing the context area.
333 *
334 *  On some CPUs with hardware floating point support, the Context_Control_fp
335 *  structure will not be used or it simply consist of an array of a
336 *  fixed number of bytes.   This is done when the floating point context
337 *  is dumped by a "FP save context" type instruction and the format
338 *  is not really defined by the CPU.  In this case, there is no need
339 *  to figure out the exact format -- only the size.  Of course, although
340 *  this is enough information for RTEMS, it is probably not enough for
341 *  a debugger such as gdb.  But that is another problem.
342 *
343 *
344 */
345#ifndef ASM
346#ifdef OR1K_64BIT_ARCH
347#define or1kreg uint64_t
348#else
349#define or1kreg uint32_t
350#endif
351
352typedef struct {
353  uint32_t  r1;     /* Stack pointer */
354  uint32_t  r2;     /* Frame pointer */
355  uint32_t  r3;
356  uint32_t  r4;
357  uint32_t  r5;
358  uint32_t  r6;
359  uint32_t  r7;
360  uint32_t  r8;
361  uint32_t  r9;
362  uint32_t  r10;
363  uint32_t  r11;
364  uint32_t  r12;
365  uint32_t  r13;
366  uint32_t  r14;
367  uint32_t  r15;
368  uint32_t  r16;
369  uint32_t  r17;
370  uint32_t  r18;
371  uint32_t  r19;
372  uint32_t  r20;
373  uint32_t  r21;
374  uint32_t  r22;
375  uint32_t  r23;
376  uint32_t  r24;
377  uint32_t  r25;
378  uint32_t  r26;
379  uint32_t  r27;
380  uint32_t  r28;
381  uint32_t  r29;
382  uint32_t  r30;
383  uint32_t  r31;
384
385  uint32_t  sr;  /* Current supervision register non persistent values */
386  uint32_t  epcr;
387  uint32_t  eear;
388  uint32_t  esr;
389} Context_Control;
390
391#define _CPU_Context_Get_SP( _context ) \
392  (_context)->r1
393
394typedef struct {
395  /** FPU registers are listed here */
396  double      some_float_register;
397} Context_Control_fp;
398
399typedef Context_Control CPU_Interrupt_frame;
400
401/*
402 *  The size of the floating point context area.  On some CPUs this
403 *  will not be a "sizeof" because the format of the floating point
404 *  area is not defined -- only the size is.  This is usually on
405 *  CPUs with a "floating point save context" instruction.
406 *
407 *  Or1k Specific Information:
408 *
409 */
410
411#define CPU_CONTEXT_FP_SIZE  0
412SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
413
414/*
415 *  Amount of extra stack (above minimum stack size) required by
416 *  MPCI receive server thread.  Remember that in a multiprocessor
417 *  system this thread must exist and be able to process all directives.
418 *
419 */
420
421#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
422
423/*
424 *  Should be large enough to run all RTEMS tests.  This insures
425 *  that a "reasonable" small application should not have any problems.
426 *
427 */
428
429#define CPU_STACK_MINIMUM_SIZE  4096
430
431/*
432 *  CPU's worst alignment requirement for data types on a byte boundary.  This
433 *  alignment does not take into account the requirements for the stack.
434 *
435 */
436
437#define CPU_ALIGNMENT  8
438
439/*
440 *  This is defined if the port has a special way to report the ISR nesting
441 *  level.  Most ports maintain the variable _ISR_Nest_level.
442 */
443#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
444
445/**
446 * Size of a pointer.
447 *
448 * This must be an integer literal that can be used by the assembler.  This
449 * value will be used to calculate offsets of structure members.  These
450 * offsets will be used in assembler code.
451 */
452#define CPU_SIZEOF_POINTER         4
453
454/*
455 *  This number corresponds to the byte alignment requirement for the
456 *  heap handler.  This alignment requirement may be stricter than that
457 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
458 *  common for the heap to follow the same alignment requirement as
459 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
460 *  then this should be set to CPU_ALIGNMENT.
461 *
462 *  NOTE:  This does not have to be a power of 2 although it should be
463 *         a multiple of 2 greater than or equal to 2.  The requirement
464 *         to be a multiple of 2 is because the heap uses the least
465 *         significant field of the front and back flags to indicate
466 *         that a block is in use or free.  So you do not want any odd
467 *         length blocks really putting length data in that bit.
468 *
469 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
470 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
471 *         elements allocated from the heap meet all restrictions.
472 *
473 */
474
475#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
476
477/*
478 *  This number corresponds to the byte alignment requirement for memory
479 *  buffers allocated by the partition manager.  This alignment requirement
480 *  may be stricter than that for the data types alignment specified by
481 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
482 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
483 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
484 *
485 *  NOTE:  This does not have to be a power of 2.  It does have to
486 *         be greater or equal to than CPU_ALIGNMENT.
487 *
488 */
489
490#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
491
492/*
493 *  This number corresponds to the byte alignment requirement for the
494 *  stack.  This alignment requirement may be stricter than that for the
495 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
496 *  is strict enough for the stack, then this should be set to 0.
497 *
498 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
499 *
500 */
501
502#define CPU_STACK_ALIGNMENT        0
503
504/* ISR handler macros */
505
506/*
507 *  Support routine to initialize the RTEMS vector table after it is allocated.
508 *
509 *  NO_CPU Specific Information:
510 *
511 *  XXX document implementation including references if appropriate
512 */
513
514#define _CPU_Initialize_vectors()
515
516/*
517 *  Disable all interrupts for an RTEMS critical section.  The previous
518 *  level is returned in _level.
519 *
520 */
521
522static inline uint32_t or1k_interrupt_disable( void )
523{
524  uint32_t sr;
525  sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
526
527  _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_IEE));
528
529  return sr;
530}
531
532static inline void or1k_interrupt_enable(uint32_t level)
533{
534  uint32_t sr;
535
536  /* Enable interrupts and restore rs */
537  sr = level | CPU_OR1K_SPR_SR_IEE | CPU_OR1K_SPR_SR_TEE;
538  _OR1K_mtspr(CPU_OR1K_SPR_SR, sr);
539
540}
541
542#define _CPU_ISR_Disable( _level ) \
543    _level = or1k_interrupt_disable()
544
545
546/*
547 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
548 *  This indicates the end of an RTEMS critical section.  The parameter
549 *  _level is not modified.
550 *
551 */
552
553#define _CPU_ISR_Enable( _level )  \
554  or1k_interrupt_enable( _level )
555
556/*
557 *  This temporarily restores the interrupt to _level before immediately
558 *  disabling them again.  This is used to divide long RTEMS critical
559 *  sections into two or more parts.  The parameter _level is not
560 *  modified.
561 *
562 */
563
564#define _CPU_ISR_Flash( _level ) \
565  do{ \
566      _CPU_ISR_Enable( _level ); \
567      _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
568    } while(0)
569
570/*
571 *  Map interrupt level in task mode onto the hardware that the CPU
572 *  actually provides.  Currently, interrupt levels which do not
573 *  map onto the CPU in a generic fashion are undefined.  Someday,
574 *  it would be nice if these were "mapped" by the application
575 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
576 *  8 - 255 would be available for bsp/application specific meaning.
577 *  This could be used to manage a programmable interrupt controller
578 *  via the rtems_task_mode directive.
579 *
580 *  The get routine usually must be implemented as a subroutine.
581 *
582 */
583
584void _CPU_ISR_Set_level( uint32_t level );
585
586uint32_t _CPU_ISR_Get_level( void );
587
588/* end of ISR handler macros */
589
590/* Context handler macros */
591
592#define OR1K_FAST_CONTEXT_SWITCH_ENABLED FALSE
593/*
594 *  Initialize the context to a state suitable for starting a
595 *  task after a context restore operation.  Generally, this
596 *  involves:
597 *
598 *     - setting a starting address
599 *     - preparing the stack
600 *     - preparing the stack and frame pointers
601 *     - setting the proper interrupt level in the context
602 *     - initializing the floating point context
603 *
604 *  This routine generally does not set any unnecessary register
605 *  in the context.  The state of the "general data" registers is
606 *  undefined at task start time.
607 *
608 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
609 *        point thread.  This is typically only used on CPUs where the
610 *        FPU may be easily disabled by software such as on the SPARC
611 *        where the PSR contains an enable FPU bit.
612 *
613 */
614
615/**
616 * @brief Initializes the CPU context.
617 *
618 * The following steps are performed:
619 *  - setting a starting address
620 *  - preparing the stack
621 *  - preparing the stack and frame pointers
622 *  - setting the proper interrupt level in the context
623 *
624 * @param[in] context points to the context area
625 * @param[in] stack_area_begin is the low address of the allocated stack area
626 * @param[in] stack_area_size is the size of the stack area in bytes
627 * @param[in] new_level is the interrupt level for the task
628 * @param[in] entry_point is the task's entry point
629 * @param[in] is_fp is set to @c true if the task is a floating point task
630 * @param[in] tls_area is the thread-local storage (TLS) area
631 */
632void _CPU_Context_Initialize(
633  Context_Control *context,
634  void *stack_area_begin,
635  size_t stack_area_size,
636  uint32_t new_level,
637  void (*entry_point)( void ),
638  bool is_fp,
639  void *tls_area
640);
641
642/*
643 *  This routine is responsible for somehow restarting the currently
644 *  executing task.  If you are lucky, then all that is necessary
645 *  is restoring the context.  Otherwise, there will need to be
646 *  a special assembly routine which does something special in this
647 *  case.  Context_Restore should work most of the time.  It will
648 *  not work if restarting self conflicts with the stack frame
649 *  assumptions of restoring a context.
650 *
651 */
652
653#define _CPU_Context_Restart_self( _the_context ) \
654   _CPU_Context_restore( (_the_context) );
655
656/*
657 *  The purpose of this macro is to allow the initial pointer into
658 *  a floating point context area (used to save the floating point
659 *  context) to be at an arbitrary place in the floating point
660 *  context area.
661 *
662 *  This is necessary because some FP units are designed to have
663 *  their context saved as a stack which grows into lower addresses.
664 *  Other FP units can be saved by simply moving registers into offsets
665 *  from the base of the context area.  Finally some FP units provide
666 *  a "dump context" instruction which could fill in from high to low
667 *  or low to high based on the whim of the CPU designers.
668 *
669 */
670
671#define _CPU_Context_Fp_start( _base, _offset ) \
672   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
673
674/*
675 *  This routine initializes the FP context area passed to it to.
676 *  There are a few standard ways in which to initialize the
677 *  floating point context.  The code included for this macro assumes
678 *  that this is a CPU in which a "initial" FP context was saved into
679 *  _CPU_Null_fp_context and it simply copies it to the destination
680 *  context passed to it.
681 *
682 *  Other models include (1) not doing anything, and (2) putting
683 *  a "null FP status word" in the correct place in the FP context.
684 *
685 */
686
687#define _CPU_Context_Initialize_fp( _destination ) \
688  { \
689   *(*(_destination)) = _CPU_Null_fp_context; \
690  }
691
692/* end of Context handler macros */
693
694/* Fatal Error manager macros */
695
696/*
697 *  This routine copies _error into a known place -- typically a stack
698 *  location or a register, optionally disables interrupts, and
699 *  halts/stops the CPU.
700 *
701 */
702
703#define _CPU_Fatal_halt(_source, _error ) \
704        printk("Fatal Error %d.%d Halted\n",_source, _error); \
705        for(;;)
706
707/* end of Fatal Error manager macros */
708
709/* Bitfield handler macros */
710
711/*
712 *  This routine sets _output to the bit number of the first bit
713 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
714 *  This type may be either 16 or 32 bits wide although only the 16
715 *  least significant bits will be used.
716 *
717 *  There are a number of variables in using a "find first bit" type
718 *  instruction.
719 *
720 *    (1) What happens when run on a value of zero?
721 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
722 *    (3) The numbering may be zero or one based.
723 *    (4) The "find first bit" instruction may search from MSB or LSB.
724 *
725 *  RTEMS guarantees that (1) will never happen so it is not a concern.
726 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
727 *  _CPU_Priority_bits_index().  These three form a set of routines
728 *  which must logically operate together.  Bits in the _value are
729 *  set and cleared based on masks built by _CPU_Priority_mask().
730 *  The basic major and minor values calculated by _Priority_Major()
731 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
732 *  to properly range between the values returned by the "find first bit"
733 *  instruction.  This makes it possible for _Priority_Get_highest() to
734 *  calculate the major and directly index into the minor table.
735 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
736 *  is the first bit found.
737 *
738 *  This entire "find first bit" and mapping process depends heavily
739 *  on the manner in which a priority is broken into a major and minor
740 *  components with the major being the 4 MSB of a priority and minor
741 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
742 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
743 *  to the lowest priority.
744 *
745 *  If your CPU does not have a "find first bit" instruction, then
746 *  there are ways to make do without it.  Here are a handful of ways
747 *  to implement this in software:
748 *
749 *    - a series of 16 bit test instructions
750 *    - a "binary search using if's"
751 *    - _number = 0
752 *      if _value > 0x00ff
753 *        _value >>=8
754 *        _number = 8;
755 *
756 *      if _value > 0x0000f
757 *        _value >=8
758 *        _number += 4
759 *
760 *      _number += bit_set_table[ _value ]
761 *
762 *    where bit_set_table[ 16 ] has values which indicate the first
763 *      bit set
764 *
765 */
766
767  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
768#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
769#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
770
771#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
772
773  /* Get a value between 0 and N where N is the bit size */
774  /* This routine makes use of the fact that CPUCFGR defines
775     OB32S to have value 32, and OB64S to have value 64. If
776     this ever changes then this routine will fail. */
777#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
778     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
779                   "l.andi  %0,%0,0x60  \n\t"\
780                   "l.ff1   %1,%1,r0    \n\t"\
781                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
782
783#endif
784
785/* end of Bitfield handler macros */
786
787/*
788 *  This routine builds the mask which corresponds to the bit fields
789 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
790 *  for that routine.
791 *
792 */
793
794#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
795
796#define _CPU_Priority_Mask( _bit_number ) \
797    (1 << _bit_number)
798
799#endif
800
801/*
802 *  This routine translates the bit numbers returned by
803 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
804 *  a major or minor component of a priority.  See the discussion
805 *  for that routine.
806 *
807 */
808
809#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
810
811#define _CPU_Priority_bits_index( _priority ) \
812  (_priority)
813
814#endif
815
816#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
817#define CPU_TIMESTAMP_USE_INT64 TRUE
818#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
819
820typedef struct {
821/* There is no CPU specific per-CPU state */
822} CPU_Per_CPU_control;
823#endif /* ASM */
824
825#define CPU_SIZEOF_POINTER 4
826#define CPU_PER_CPU_CONTROL_SIZE 0
827
828#ifndef ASM
829typedef uint32_t CPU_Counter_ticks;
830typedef uint16_t Priority_bit_map_Word;
831
832typedef struct {
833  uint32_t r[32];
834
835  /* The following registers must be saved if we have
836  fast context switch disabled and nested interrupt
837  levels are enabled.
838  */
839#if !OR1K_FAST_CONTEXT_SWITCH_ENABLED
840  uint32_t epcr; /* exception PC register */
841  uint32_t eear; /* exception effective address register */
842  uint32_t esr; /* exception supervision register */
843#endif
844
845} CPU_Exception_frame;
846
847/**
848 * @brief Prints the exception frame via printk().
849 *
850 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
851 */
852void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
853
854
855/* end of Priority handler macros */
856
857/* functions */
858
859/*
860 *  _CPU_Initialize
861 *
862 *  This routine performs CPU dependent initialization.
863 *
864 */
865
866void _CPU_Initialize(
867  void
868);
869
870/*
871 *  _CPU_ISR_install_raw_handler
872 *
873 *  This routine installs a "raw" interrupt handler directly into the
874 *  processor's vector table.
875 *
876 */
877
878void _CPU_ISR_install_raw_handler(
879  uint32_t    vector,
880  proc_ptr    new_handler,
881  proc_ptr   *old_handler
882);
883
884/*
885 *  _CPU_ISR_install_vector
886 *
887 *  This routine installs an interrupt vector.
888 *
889 *  NO_CPU Specific Information:
890 *
891 *  XXX document implementation including references if appropriate
892 */
893
894void _CPU_ISR_install_vector(
895  uint32_t    vector,
896  proc_ptr   new_handler,
897  proc_ptr   *old_handler
898);
899
900/*
901 *  _CPU_Install_interrupt_stack
902 *
903 *  This routine installs the hardware interrupt stack pointer.
904 *
905 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
906 *         is TRUE.
907 *
908 */
909
910void _CPU_Install_interrupt_stack( void );
911
912/*
913 *  _CPU_Thread_Idle_body
914 *
915 *  This routine is the CPU dependent IDLE thread body.
916 *
917 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
918 *         is TRUE.
919 *
920 */
921
922void _CPU_Thread_Idle_body( void );
923
924/*
925 *  _CPU_Context_switch
926 *
927 *  This routine switches from the run context to the heir context.
928 *
929 *  Or1k Specific Information:
930 *
931 *  Please see the comments in the .c file for a description of how
932 *  this function works. There are several things to be aware of.
933 */
934
935void _CPU_Context_switch(
936  Context_Control  *run,
937  Context_Control  *heir
938);
939
940/*
941 *  _CPU_Context_restore
942 *
943 *  This routine is generally used only to restart self in an
944 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
945 *
946 *  NOTE: May be unnecessary to reload some registers.
947 *
948 */
949
950void _CPU_Context_restore(
951  Context_Control *new_context
952);
953
954/*
955 *  _CPU_Context_save_fp
956 *
957 *  This routine saves the floating point context passed to it.
958 *
959 */
960
961void _CPU_Context_save_fp(
962  void **fp_context_ptr
963);
964
965/*
966 *  _CPU_Context_restore_fp
967 *
968 *  This routine restores the floating point context passed to it.
969 *
970 */
971
972void _CPU_Context_restore_fp(
973  void **fp_context_ptr
974);
975
976/*  The following routine swaps the endian format of an unsigned int.
977 *  It must be static because it is referenced indirectly.
978 *
979 *  This version will work on any processor, but if there is a better
980 *  way for your CPU PLEASE use it.  The most common way to do this is to:
981 *
982 *     swap least significant two bytes with 16-bit rotate
983 *     swap upper and lower 16-bits
984 *     swap most significant two bytes with 16-bit rotate
985 *
986 *  Some CPUs have special instructions which swap a 32-bit quantity in
987 *  a single instruction (e.g. i486).  It is probably best to avoid
988 *  an "endian swapping control bit" in the CPU.  One good reason is
989 *  that interrupts would probably have to be disabled to insure that
990 *  an interrupt does not try to access the same "chunk" with the wrong
991 *  endian.  Another good reason is that on some CPUs, the endian bit
992 *  endianness for ALL fetches -- both code and data -- so the code
993 *  will be fetched incorrectly.
994 *
995 */
996
997static inline unsigned int CPU_swap_u32(
998  unsigned int value
999)
1000{
1001  uint32_t   byte1, byte2, byte3, byte4, swapped;
1002
1003  byte4 = (value >> 24) & 0xff;
1004  byte3 = (value >> 16) & 0xff;
1005  byte2 = (value >> 8)  & 0xff;
1006  byte1 =  value        & 0xff;
1007
1008  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1009  return( swapped );
1010}
1011
1012#define CPU_swap_u16( value ) \
1013  (((value&0xff) << 8) | ((value >> 8)&0xff))
1014
1015typedef uint32_t CPU_Counter_ticks;
1016
1017CPU_Counter_ticks _CPU_Counter_read( void );
1018
1019CPU_Counter_ticks _CPU_Counter_difference(
1020  CPU_Counter_ticks second,
1021  CPU_Counter_ticks first
1022);
1023
1024#endif /* ASM */
1025
1026#ifdef __cplusplus
1027}
1028#endif
1029
1030#endif
Note: See TracBrowser for help on using the repository browser.