source: rtems/cpukit/score/cpu/or1k/rtems/score/cpu.h @ a8865f8

5
Last change on this file since a8865f8 was a8865f8, checked in by Sebastian Huber <sebastian.huber@…>, on 01/25/16 at 09:20:28

score: Introduce CPU_CACHE_LINE_BYTES

Add CPU_CACHE_LINE_BYTES for the maximum cache line size in bytes. The
actual processor may use no cache or a smaller cache line size.

  • Property mode set to 100644
File size: 30.0 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 *
17 *  This file adapted from no_cpu example of the RTEMS distribution.
18 *  The body has been modified for the Opencores OR1k implementation by
19 *  Chris Ziomkowski. <chris@asics.ws>
20 *
21 */
22
23#ifndef _OR1K_CPU_H
24#define _OR1K_CPU_H
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30
31#include <rtems/score/or1k.h>            /* pick up machine definitions */
32#include <rtems/score/or1k-utility.h>
33#include <rtems/score/types.h>
34#ifndef ASM
35#include <rtems/bspIo.h>
36#include <stdint.h>
37#include <stdio.h> /* for printk */
38#endif
39
40/* conditional compilation parameters */
41
42/*
43 *  Should the calls to _Thread_Enable_dispatch be inlined?
44 *
45 *  If TRUE, then they are inlined.
46 *  If FALSE, then a subroutine call is made.
47 *
48 *  Basically this is an example of the classic trade-off of size
49 *  versus speed.  Inlining the call (TRUE) typically increases the
50 *  size of RTEMS while speeding up the enabling of dispatching.
51 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
52 *  only be 0 or 1 unless you are in an interrupt handler and that
53 *  interrupt handler invokes the executive.]  When not inlined
54 *  something calls _Thread_Enable_dispatch which in turns calls
55 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
56 *  one subroutine call is avoided entirely.]
57 *
58 */
59
60#define CPU_INLINE_ENABLE_DISPATCH       FALSE
61
62/*
63 *  Does RTEMS manage a dedicated interrupt stack in software?
64 *
65 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
66 *  If FALSE, nothing is done.
67 *
68 *  If the CPU supports a dedicated interrupt stack in hardware,
69 *  then it is generally the responsibility of the BSP to allocate it
70 *  and set it up.
71 *
72 *  If the CPU does not support a dedicated interrupt stack, then
73 *  the porter has two options: (1) execute interrupts on the
74 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
75 *  interrupt stack.
76 *
77 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
78 *
79 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
80 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
81 *  possible that both are FALSE for a particular CPU.  Although it
82 *  is unclear what that would imply about the interrupt processing
83 *  procedure on that CPU.
84 *
85 *  Currently, for or1k port, _ISR_Handler is responsible for switching to
86 *  RTEMS dedicated interrupt task.
87 *
88 */
89
90#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
91
92/*
93 *  Does this CPU have hardware support for a dedicated interrupt stack?
94 *
95 *  If TRUE, then it must be installed during initialization.
96 *  If FALSE, then no installation is performed.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 *
106 */
107
108#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
109
110/*
111 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
112 *
113 *  If TRUE, then the memory is allocated during initialization.
114 *  If FALSE, then the memory is allocated during initialization.
115 *
116 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
117 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
118 *
119 */
120
121#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
122
123/*
124 *  Does the RTEMS invoke the user's ISR with the vector number and
125 *  a pointer to the saved interrupt frame (1) or just the vector
126 *  number (0)?
127 *
128 */
129
130#define CPU_ISR_PASSES_FRAME_POINTER 1
131
132/*
133 *  Does the CPU have hardware floating point?
134 *
135 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
136 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
137 *
138 *  If there is a FP coprocessor such as the i387 or mc68881, then
139 *  the answer is TRUE.
140 *
141 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
142 *  It indicates whether or not this CPU model has FP support.  For
143 *  example, it would be possible to have an i386_nofp CPU model
144 *  which set this to false to indicate that you have an i386 without
145 *  an i387 and wish to leave floating point support out of RTEMS.
146 *
147 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
148 *  is software implemented floating point that must be context
149 *  switched.  The determination of whether or not this applies
150 *  is very tool specific and the state saved/restored is also
151 *  compiler specific.
152 *
153 *  Or1k Specific Information:
154 *
155 *  At this time there are no implementations of Or1k that are
156 *  expected to implement floating point. More importantly, the
157 *  floating point architecture is expected to change significantly
158 *  before such chips are fabricated.
159 */
160
161#define CPU_HARDWARE_FP     FALSE
162#define CPU_SOFTWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 *
172 */
173
174#define CPU_ALL_TASKS_ARE_FP     FALSE
175
176/*
177 *  Should the IDLE task have a floating point context?
178 *
179 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
180 *  and it has a floating point context which is switched in and out.
181 *  If FALSE, then the IDLE task does not have a floating point context.
182 *
183 *  Setting this to TRUE negatively impacts the time required to preempt
184 *  the IDLE task from an interrupt because the floating point context
185 *  must be saved as part of the preemption.
186 *
187 */
188
189#define CPU_IDLE_TASK_IS_FP      FALSE
190
191/*
192 *  Should the saving of the floating point registers be deferred
193 *  until a context switch is made to another different floating point
194 *  task?
195 *
196 *  If TRUE, then the floating point context will not be stored until
197 *  necessary.  It will remain in the floating point registers and not
198 *  disturned until another floating point task is switched to.
199 *
200 *  If FALSE, then the floating point context is saved when a floating
201 *  point task is switched out and restored when the next floating point
202 *  task is restored.  The state of the floating point registers between
203 *  those two operations is not specified.
204 *
205 *  If the floating point context does NOT have to be saved as part of
206 *  interrupt dispatching, then it should be safe to set this to TRUE.
207 *
208 *  Setting this flag to TRUE results in using a different algorithm
209 *  for deciding when to save and restore the floating point context.
210 *  The deferred FP switch algorithm minimizes the number of times
211 *  the FP context is saved and restored.  The FP context is not saved
212 *  until a context switch is made to another, different FP task.
213 *  Thus in a system with only one FP task, the FP context will never
214 *  be saved or restored.
215 *
216 */
217
218#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
219
220/*
221 *  Does this port provide a CPU dependent IDLE task implementation?
222 *
223 *  If TRUE, then the routine _CPU_Thread_Idle_body
224 *  must be provided and is the default IDLE thread body instead of
225 *  _CPU_Thread_Idle_body.
226 *
227 *  If FALSE, then use the generic IDLE thread body if the BSP does
228 *  not provide one.
229 *
230 *  This is intended to allow for supporting processors which have
231 *  a low power or idle mode.  When the IDLE thread is executed, then
232 *  the CPU can be powered down.
233 *
234 *  The order of precedence for selecting the IDLE thread body is:
235 *
236 *    1.  BSP provided
237 *    2.  CPU dependent (if provided)
238 *    3.  generic (if no BSP and no CPU dependent)
239 *
240 */
241
242#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
243
244/*
245 *  Does the stack grow up (toward higher addresses) or down
246 *  (toward lower addresses)?
247 *
248 *  If TRUE, then the grows upward.
249 *  If FALSE, then the grows toward smaller addresses.
250 *
251 */
252
253#define CPU_STACK_GROWS_UP               FALSE
254
255/* FIXME: Is this the right value? */
256#define CPU_CACHE_LINE_BYTES 32
257
258#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
259
260/*
261 *  Define what is required to specify how the network to host conversion
262 *  routines are handled.
263 *
264 *  Or1k Specific Information:
265 *
266 *  This version of RTEMS is designed specifically to run with
267 *  big endian architectures. If you want little endian, you'll
268 *  have to make the appropriate adjustments here and write
269 *  efficient routines for byte swapping. The Or1k architecture
270 *  doesn't do this very well.
271 */
272
273#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
274#define CPU_BIG_ENDIAN                           TRUE
275#define CPU_LITTLE_ENDIAN                        FALSE
276
277/*
278 *  The following defines the number of bits actually used in the
279 *  interrupt field of the task mode.  How those bits map to the
280 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
281 *
282 */
283
284#define CPU_MODES_INTERRUPT_MASK   0x00000001
285
286/*
287 *  Processor defined structures required for cpukit/score.
288 */
289
290
291/*
292 * Contexts
293 *
294 *  Generally there are 2 types of context to save.
295 *     1. Interrupt registers to save
296 *     2. Task level registers to save
297 *
298 *  This means we have the following 3 context items:
299 *     1. task level context stuff::  Context_Control
300 *     2. floating point task stuff:: Context_Control_fp
301 *     3. special interrupt level context :: Context_Control_interrupt
302 *
303 *  On some processors, it is cost-effective to save only the callee
304 *  preserved registers during a task context switch.  This means
305 *  that the ISR code needs to save those registers which do not
306 *  persist across function calls.  It is not mandatory to make this
307 *  distinctions between the caller/callee saves registers for the
308 *  purpose of minimizing context saved during task switch and on interrupts.
309 *  If the cost of saving extra registers is minimal, simplicity is the
310 *  choice.  Save the same context on interrupt entry as for tasks in
311 *  this case.
312 *
313 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
314 *  care should be used in designing the context area.
315 *
316 *  On some CPUs with hardware floating point support, the Context_Control_fp
317 *  structure will not be used or it simply consist of an array of a
318 *  fixed number of bytes.   This is done when the floating point context
319 *  is dumped by a "FP save context" type instruction and the format
320 *  is not really defined by the CPU.  In this case, there is no need
321 *  to figure out the exact format -- only the size.  Of course, although
322 *  this is enough information for RTEMS, it is probably not enough for
323 *  a debugger such as gdb.  But that is another problem.
324 *
325 *
326 */
327#ifndef ASM
328#ifdef OR1K_64BIT_ARCH
329#define or1kreg uint64_t
330#else
331#define or1kreg uint32_t
332#endif
333
334typedef struct {
335  uint32_t  r1;     /* Stack pointer */
336  uint32_t  r2;     /* Frame pointer */
337  uint32_t  r3;
338  uint32_t  r4;
339  uint32_t  r5;
340  uint32_t  r6;
341  uint32_t  r7;
342  uint32_t  r8;
343  uint32_t  r9;
344  uint32_t  r10;
345  uint32_t  r11;
346  uint32_t  r12;
347  uint32_t  r13;
348  uint32_t  r14;
349  uint32_t  r15;
350  uint32_t  r16;
351  uint32_t  r17;
352  uint32_t  r18;
353  uint32_t  r19;
354  uint32_t  r20;
355  uint32_t  r21;
356  uint32_t  r22;
357  uint32_t  r23;
358  uint32_t  r24;
359  uint32_t  r25;
360  uint32_t  r26;
361  uint32_t  r27;
362  uint32_t  r28;
363  uint32_t  r29;
364  uint32_t  r30;
365  uint32_t  r31;
366
367  uint32_t  sr;  /* Current supervision register non persistent values */
368  uint32_t  epcr;
369  uint32_t  eear;
370  uint32_t  esr;
371} Context_Control;
372
373#define _CPU_Context_Get_SP( _context ) \
374  (_context)->r1
375
376typedef struct {
377  /** FPU registers are listed here */
378  double      some_float_register;
379} Context_Control_fp;
380
381typedef Context_Control CPU_Interrupt_frame;
382
383/*
384 *  The size of the floating point context area.  On some CPUs this
385 *  will not be a "sizeof" because the format of the floating point
386 *  area is not defined -- only the size is.  This is usually on
387 *  CPUs with a "floating point save context" instruction.
388 *
389 *  Or1k Specific Information:
390 *
391 */
392
393#define CPU_CONTEXT_FP_SIZE  0
394SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
395
396/*
397 *  Amount of extra stack (above minimum stack size) required by
398 *  MPCI receive server thread.  Remember that in a multiprocessor
399 *  system this thread must exist and be able to process all directives.
400 *
401 */
402
403#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
404
405/*
406 *  Should be large enough to run all RTEMS tests.  This insures
407 *  that a "reasonable" small application should not have any problems.
408 *
409 */
410
411#define CPU_STACK_MINIMUM_SIZE  4096
412
413/*
414 *  CPU's worst alignment requirement for data types on a byte boundary.  This
415 *  alignment does not take into account the requirements for the stack.
416 *
417 */
418
419#define CPU_ALIGNMENT  8
420
421/*
422 *  This is defined if the port has a special way to report the ISR nesting
423 *  level.  Most ports maintain the variable _ISR_Nest_level.
424 */
425#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
426
427/**
428 * Size of a pointer.
429 *
430 * This must be an integer literal that can be used by the assembler.  This
431 * value will be used to calculate offsets of structure members.  These
432 * offsets will be used in assembler code.
433 */
434#define CPU_SIZEOF_POINTER         4
435
436/*
437 *  This number corresponds to the byte alignment requirement for the
438 *  heap handler.  This alignment requirement may be stricter than that
439 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
440 *  common for the heap to follow the same alignment requirement as
441 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
442 *  then this should be set to CPU_ALIGNMENT.
443 *
444 *  NOTE:  This does not have to be a power of 2 although it should be
445 *         a multiple of 2 greater than or equal to 2.  The requirement
446 *         to be a multiple of 2 is because the heap uses the least
447 *         significant field of the front and back flags to indicate
448 *         that a block is in use or free.  So you do not want any odd
449 *         length blocks really putting length data in that bit.
450 *
451 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
452 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
453 *         elements allocated from the heap meet all restrictions.
454 *
455 */
456
457#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
458
459/*
460 *  This number corresponds to the byte alignment requirement for memory
461 *  buffers allocated by the partition manager.  This alignment requirement
462 *  may be stricter than that for the data types alignment specified by
463 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
464 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
465 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
466 *
467 *  NOTE:  This does not have to be a power of 2.  It does have to
468 *         be greater or equal to than CPU_ALIGNMENT.
469 *
470 */
471
472#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
473
474/*
475 *  This number corresponds to the byte alignment requirement for the
476 *  stack.  This alignment requirement may be stricter than that for the
477 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
478 *  is strict enough for the stack, then this should be set to 0.
479 *
480 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
481 *
482 */
483
484#define CPU_STACK_ALIGNMENT        0
485
486/* ISR handler macros */
487
488/*
489 *  Support routine to initialize the RTEMS vector table after it is allocated.
490 *
491 *  NO_CPU Specific Information:
492 *
493 *  XXX document implementation including references if appropriate
494 */
495
496#define _CPU_Initialize_vectors()
497
498/*
499 *  Disable all interrupts for an RTEMS critical section.  The previous
500 *  level is returned in _level.
501 *
502 */
503
504static inline uint32_t or1k_interrupt_disable( void )
505{
506  uint32_t sr;
507  sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
508
509  _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_IEE));
510
511  return sr;
512}
513
514static inline void or1k_interrupt_enable(uint32_t level)
515{
516  uint32_t sr;
517
518  /* Enable interrupts and restore rs */
519  sr = level | CPU_OR1K_SPR_SR_IEE | CPU_OR1K_SPR_SR_TEE;
520  _OR1K_mtspr(CPU_OR1K_SPR_SR, sr);
521
522}
523
524#define _CPU_ISR_Disable( _level ) \
525    _level = or1k_interrupt_disable()
526
527
528/*
529 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
530 *  This indicates the end of an RTEMS critical section.  The parameter
531 *  _level is not modified.
532 *
533 */
534
535#define _CPU_ISR_Enable( _level )  \
536  or1k_interrupt_enable( _level )
537
538/*
539 *  This temporarily restores the interrupt to _level before immediately
540 *  disabling them again.  This is used to divide long RTEMS critical
541 *  sections into two or more parts.  The parameter _level is not
542 *  modified.
543 *
544 */
545
546#define _CPU_ISR_Flash( _level ) \
547  do{ \
548      _CPU_ISR_Enable( _level ); \
549      _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
550    } while(0)
551
552/*
553 *  Map interrupt level in task mode onto the hardware that the CPU
554 *  actually provides.  Currently, interrupt levels which do not
555 *  map onto the CPU in a generic fashion are undefined.  Someday,
556 *  it would be nice if these were "mapped" by the application
557 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
558 *  8 - 255 would be available for bsp/application specific meaning.
559 *  This could be used to manage a programmable interrupt controller
560 *  via the rtems_task_mode directive.
561 *
562 *  The get routine usually must be implemented as a subroutine.
563 *
564 */
565
566void _CPU_ISR_Set_level( uint32_t level );
567
568uint32_t _CPU_ISR_Get_level( void );
569
570/* end of ISR handler macros */
571
572/* Context handler macros */
573
574#define OR1K_FAST_CONTEXT_SWITCH_ENABLED FALSE
575/*
576 *  Initialize the context to a state suitable for starting a
577 *  task after a context restore operation.  Generally, this
578 *  involves:
579 *
580 *     - setting a starting address
581 *     - preparing the stack
582 *     - preparing the stack and frame pointers
583 *     - setting the proper interrupt level in the context
584 *     - initializing the floating point context
585 *
586 *  This routine generally does not set any unnecessary register
587 *  in the context.  The state of the "general data" registers is
588 *  undefined at task start time.
589 *
590 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
591 *        point thread.  This is typically only used on CPUs where the
592 *        FPU may be easily disabled by software such as on the SPARC
593 *        where the PSR contains an enable FPU bit.
594 *
595 */
596
597/**
598 * @brief Initializes the CPU context.
599 *
600 * The following steps are performed:
601 *  - setting a starting address
602 *  - preparing the stack
603 *  - preparing the stack and frame pointers
604 *  - setting the proper interrupt level in the context
605 *
606 * @param[in] context points to the context area
607 * @param[in] stack_area_begin is the low address of the allocated stack area
608 * @param[in] stack_area_size is the size of the stack area in bytes
609 * @param[in] new_level is the interrupt level for the task
610 * @param[in] entry_point is the task's entry point
611 * @param[in] is_fp is set to @c true if the task is a floating point task
612 * @param[in] tls_area is the thread-local storage (TLS) area
613 */
614void _CPU_Context_Initialize(
615  Context_Control *context,
616  void *stack_area_begin,
617  size_t stack_area_size,
618  uint32_t new_level,
619  void (*entry_point)( void ),
620  bool is_fp,
621  void *tls_area
622);
623
624/*
625 *  This routine is responsible for somehow restarting the currently
626 *  executing task.  If you are lucky, then all that is necessary
627 *  is restoring the context.  Otherwise, there will need to be
628 *  a special assembly routine which does something special in this
629 *  case.  Context_Restore should work most of the time.  It will
630 *  not work if restarting self conflicts with the stack frame
631 *  assumptions of restoring a context.
632 *
633 */
634
635#define _CPU_Context_Restart_self( _the_context ) \
636   _CPU_Context_restore( (_the_context) );
637
638/*
639 *  The purpose of this macro is to allow the initial pointer into
640 *  a floating point context area (used to save the floating point
641 *  context) to be at an arbitrary place in the floating point
642 *  context area.
643 *
644 *  This is necessary because some FP units are designed to have
645 *  their context saved as a stack which grows into lower addresses.
646 *  Other FP units can be saved by simply moving registers into offsets
647 *  from the base of the context area.  Finally some FP units provide
648 *  a "dump context" instruction which could fill in from high to low
649 *  or low to high based on the whim of the CPU designers.
650 *
651 */
652
653#define _CPU_Context_Fp_start( _base, _offset ) \
654   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
655
656/*
657 *  This routine initializes the FP context area passed to it to.
658 *  There are a few standard ways in which to initialize the
659 *  floating point context.  The code included for this macro assumes
660 *  that this is a CPU in which a "initial" FP context was saved into
661 *  _CPU_Null_fp_context and it simply copies it to the destination
662 *  context passed to it.
663 *
664 *  Other models include (1) not doing anything, and (2) putting
665 *  a "null FP status word" in the correct place in the FP context.
666 *
667 */
668
669#define _CPU_Context_Initialize_fp( _destination ) \
670  { \
671   *(*(_destination)) = _CPU_Null_fp_context; \
672  }
673
674/* end of Context handler macros */
675
676/* Fatal Error manager macros */
677
678/*
679 *  This routine copies _error into a known place -- typically a stack
680 *  location or a register, optionally disables interrupts, and
681 *  halts/stops the CPU.
682 *
683 */
684
685#define _CPU_Fatal_halt(_source, _error ) \
686        printk("Fatal Error %d.%d Halted\n",_source, _error); \
687        _OR1KSIM_CPU_Halt(); \
688        for(;;)
689
690/* end of Fatal Error manager macros */
691
692/* Bitfield handler macros */
693
694/*
695 *  This routine sets _output to the bit number of the first bit
696 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
697 *  This type may be either 16 or 32 bits wide although only the 16
698 *  least significant bits will be used.
699 *
700 *  There are a number of variables in using a "find first bit" type
701 *  instruction.
702 *
703 *    (1) What happens when run on a value of zero?
704 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
705 *    (3) The numbering may be zero or one based.
706 *    (4) The "find first bit" instruction may search from MSB or LSB.
707 *
708 *  RTEMS guarantees that (1) will never happen so it is not a concern.
709 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
710 *  _CPU_Priority_bits_index().  These three form a set of routines
711 *  which must logically operate together.  Bits in the _value are
712 *  set and cleared based on masks built by _CPU_Priority_mask().
713 *  The basic major and minor values calculated by _Priority_Major()
714 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
715 *  to properly range between the values returned by the "find first bit"
716 *  instruction.  This makes it possible for _Priority_Get_highest() to
717 *  calculate the major and directly index into the minor table.
718 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
719 *  is the first bit found.
720 *
721 *  This entire "find first bit" and mapping process depends heavily
722 *  on the manner in which a priority is broken into a major and minor
723 *  components with the major being the 4 MSB of a priority and minor
724 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
725 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
726 *  to the lowest priority.
727 *
728 *  If your CPU does not have a "find first bit" instruction, then
729 *  there are ways to make do without it.  Here are a handful of ways
730 *  to implement this in software:
731 *
732 *    - a series of 16 bit test instructions
733 *    - a "binary search using if's"
734 *    - _number = 0
735 *      if _value > 0x00ff
736 *        _value >>=8
737 *        _number = 8;
738 *
739 *      if _value > 0x0000f
740 *        _value >=8
741 *        _number += 4
742 *
743 *      _number += bit_set_table[ _value ]
744 *
745 *    where bit_set_table[ 16 ] has values which indicate the first
746 *      bit set
747 *
748 */
749
750  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
751#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
752#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
753
754#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
755
756  /* Get a value between 0 and N where N is the bit size */
757  /* This routine makes use of the fact that CPUCFGR defines
758     OB32S to have value 32, and OB64S to have value 64. If
759     this ever changes then this routine will fail. */
760#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
761     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
762                   "l.andi  %0,%0,0x60  \n\t"\
763                   "l.ff1   %1,%1,r0    \n\t"\
764                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
765
766#endif
767
768/* end of Bitfield handler macros */
769
770/*
771 *  This routine builds the mask which corresponds to the bit fields
772 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
773 *  for that routine.
774 *
775 */
776
777#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
778
779#define _CPU_Priority_Mask( _bit_number ) \
780    (1 << _bit_number)
781
782#endif
783
784/*
785 *  This routine translates the bit numbers returned by
786 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
787 *  a major or minor component of a priority.  See the discussion
788 *  for that routine.
789 *
790 */
791
792#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
793
794#define _CPU_Priority_bits_index( _priority ) \
795  (_priority)
796
797#endif
798
799typedef struct {
800/* There is no CPU specific per-CPU state */
801} CPU_Per_CPU_control;
802#endif /* ASM */
803
804#define CPU_SIZEOF_POINTER 4
805#define CPU_PER_CPU_CONTROL_SIZE 0
806
807#ifndef ASM
808typedef uint32_t CPU_Counter_ticks;
809typedef uint16_t Priority_bit_map_Word;
810
811typedef struct {
812  uint32_t r[32];
813
814  /* The following registers must be saved if we have
815  fast context switch disabled and nested interrupt
816  levels are enabled.
817  */
818#if !OR1K_FAST_CONTEXT_SWITCH_ENABLED
819  uint32_t epcr; /* exception PC register */
820  uint32_t eear; /* exception effective address register */
821  uint32_t esr; /* exception supervision register */
822#endif
823
824} CPU_Exception_frame;
825
826/**
827 * @brief Prints the exception frame via printk().
828 *
829 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
830 */
831void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
832
833
834/* end of Priority handler macros */
835
836/* functions */
837
838/*
839 *  _CPU_Initialize
840 *
841 *  This routine performs CPU dependent initialization.
842 *
843 */
844
845void _CPU_Initialize(
846  void
847);
848
849/*
850 *  _CPU_ISR_install_raw_handler
851 *
852 *  This routine installs a "raw" interrupt handler directly into the
853 *  processor's vector table.
854 *
855 */
856
857void _CPU_ISR_install_raw_handler(
858  uint32_t    vector,
859  proc_ptr    new_handler,
860  proc_ptr   *old_handler
861);
862
863/*
864 *  _CPU_ISR_install_vector
865 *
866 *  This routine installs an interrupt vector.
867 *
868 *  NO_CPU Specific Information:
869 *
870 *  XXX document implementation including references if appropriate
871 */
872
873void _CPU_ISR_install_vector(
874  uint32_t    vector,
875  proc_ptr   new_handler,
876  proc_ptr   *old_handler
877);
878
879/*
880 *  _CPU_Install_interrupt_stack
881 *
882 *  This routine installs the hardware interrupt stack pointer.
883 *
884 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
885 *         is TRUE.
886 *
887 */
888
889void _CPU_Install_interrupt_stack( void );
890
891/*
892 *  _CPU_Thread_Idle_body
893 *
894 *  This routine is the CPU dependent IDLE thread body.
895 *
896 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
897 *         is TRUE.
898 *
899 */
900
901void *_CPU_Thread_Idle_body( uintptr_t ignored );
902
903/*
904 *  _CPU_Context_switch
905 *
906 *  This routine switches from the run context to the heir context.
907 *
908 *  Or1k Specific Information:
909 *
910 *  Please see the comments in the .c file for a description of how
911 *  this function works. There are several things to be aware of.
912 */
913
914void _CPU_Context_switch(
915  Context_Control  *run,
916  Context_Control  *heir
917);
918
919/*
920 *  _CPU_Context_restore
921 *
922 *  This routine is generally used only to restart self in an
923 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
924 *
925 *  NOTE: May be unnecessary to reload some registers.
926 *
927 */
928
929void _CPU_Context_restore(
930  Context_Control *new_context
931) RTEMS_NO_RETURN;
932
933/*
934 *  _CPU_Context_save_fp
935 *
936 *  This routine saves the floating point context passed to it.
937 *
938 */
939
940void _CPU_Context_save_fp(
941  void **fp_context_ptr
942);
943
944/*
945 *  _CPU_Context_restore_fp
946 *
947 *  This routine restores the floating point context passed to it.
948 *
949 */
950
951void _CPU_Context_restore_fp(
952  void **fp_context_ptr
953);
954
955/*  The following routine swaps the endian format of an unsigned int.
956 *  It must be static because it is referenced indirectly.
957 *
958 *  This version will work on any processor, but if there is a better
959 *  way for your CPU PLEASE use it.  The most common way to do this is to:
960 *
961 *     swap least significant two bytes with 16-bit rotate
962 *     swap upper and lower 16-bits
963 *     swap most significant two bytes with 16-bit rotate
964 *
965 *  Some CPUs have special instructions which swap a 32-bit quantity in
966 *  a single instruction (e.g. i486).  It is probably best to avoid
967 *  an "endian swapping control bit" in the CPU.  One good reason is
968 *  that interrupts would probably have to be disabled to insure that
969 *  an interrupt does not try to access the same "chunk" with the wrong
970 *  endian.  Another good reason is that on some CPUs, the endian bit
971 *  endianness for ALL fetches -- both code and data -- so the code
972 *  will be fetched incorrectly.
973 *
974 */
975
976void _CPU_Context_volatile_clobber( uintptr_t pattern );
977
978void _CPU_Context_validate( uintptr_t pattern );
979
980static inline unsigned int CPU_swap_u32(
981  unsigned int value
982)
983{
984  uint32_t   byte1, byte2, byte3, byte4, swapped;
985
986  byte4 = (value >> 24) & 0xff;
987  byte3 = (value >> 16) & 0xff;
988  byte2 = (value >> 8)  & 0xff;
989  byte1 =  value        & 0xff;
990
991  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
992  return( swapped );
993}
994
995#define CPU_swap_u16( value ) \
996  (((value&0xff) << 8) | ((value >> 8)&0xff))
997
998typedef uint32_t CPU_Counter_ticks;
999
1000CPU_Counter_ticks _CPU_Counter_read( void );
1001
1002CPU_Counter_ticks _CPU_Counter_difference(
1003  CPU_Counter_ticks second,
1004  CPU_Counter_ticks first
1005);
1006
1007#endif /* ASM */
1008
1009#ifdef __cplusplus
1010}
1011#endif
1012
1013#endif
Note: See TracBrowser for help on using the repository browser.