source: rtems/cpukit/score/cpu/or1k/rtems/score/cpu.h @ d4e81e3

5
Last change on this file since d4e81e3 was d4e81e3, checked in by Sebastian Huber <sebastian.huber@…>, on 02/03/16 at 10:05:07

or1k: Delete superfluous _CPU_Null_fp_context

Update #2559.

  • Property mode set to 100644
File size: 29.4 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 *
17 *  This file adapted from no_cpu example of the RTEMS distribution.
18 *  The body has been modified for the Opencores OR1k implementation by
19 *  Chris Ziomkowski. <chris@asics.ws>
20 *
21 */
22
23#ifndef _OR1K_CPU_H
24#define _OR1K_CPU_H
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30
31#include <rtems/score/or1k.h>            /* pick up machine definitions */
32#include <rtems/score/or1k-utility.h>
33#include <rtems/score/types.h>
34#ifndef ASM
35#include <rtems/bspIo.h>
36#include <stdint.h>
37#include <stdio.h> /* for printk */
38#endif
39
40/* conditional compilation parameters */
41
42/*
43 *  Should the calls to _Thread_Enable_dispatch be inlined?
44 *
45 *  If TRUE, then they are inlined.
46 *  If FALSE, then a subroutine call is made.
47 *
48 *  Basically this is an example of the classic trade-off of size
49 *  versus speed.  Inlining the call (TRUE) typically increases the
50 *  size of RTEMS while speeding up the enabling of dispatching.
51 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
52 *  only be 0 or 1 unless you are in an interrupt handler and that
53 *  interrupt handler invokes the executive.]  When not inlined
54 *  something calls _Thread_Enable_dispatch which in turns calls
55 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
56 *  one subroutine call is avoided entirely.]
57 *
58 */
59
60#define CPU_INLINE_ENABLE_DISPATCH       FALSE
61
62/*
63 *  Does RTEMS manage a dedicated interrupt stack in software?
64 *
65 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
66 *  If FALSE, nothing is done.
67 *
68 *  If the CPU supports a dedicated interrupt stack in hardware,
69 *  then it is generally the responsibility of the BSP to allocate it
70 *  and set it up.
71 *
72 *  If the CPU does not support a dedicated interrupt stack, then
73 *  the porter has two options: (1) execute interrupts on the
74 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
75 *  interrupt stack.
76 *
77 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
78 *
79 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
80 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
81 *  possible that both are FALSE for a particular CPU.  Although it
82 *  is unclear what that would imply about the interrupt processing
83 *  procedure on that CPU.
84 *
85 *  Currently, for or1k port, _ISR_Handler is responsible for switching to
86 *  RTEMS dedicated interrupt task.
87 *
88 */
89
90#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
91
92/*
93 *  Does this CPU have hardware support for a dedicated interrupt stack?
94 *
95 *  If TRUE, then it must be installed during initialization.
96 *  If FALSE, then no installation is performed.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 *
106 */
107
108#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
109
110/*
111 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
112 *
113 *  If TRUE, then the memory is allocated during initialization.
114 *  If FALSE, then the memory is allocated during initialization.
115 *
116 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
117 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
118 *
119 */
120
121#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
122
123/*
124 *  Does the RTEMS invoke the user's ISR with the vector number and
125 *  a pointer to the saved interrupt frame (1) or just the vector
126 *  number (0)?
127 *
128 */
129
130#define CPU_ISR_PASSES_FRAME_POINTER 1
131
132/*
133 *  Does the CPU have hardware floating point?
134 *
135 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
136 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
137 *
138 *  If there is a FP coprocessor such as the i387 or mc68881, then
139 *  the answer is TRUE.
140 *
141 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
142 *  It indicates whether or not this CPU model has FP support.  For
143 *  example, it would be possible to have an i386_nofp CPU model
144 *  which set this to false to indicate that you have an i386 without
145 *  an i387 and wish to leave floating point support out of RTEMS.
146 *
147 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
148 *  is software implemented floating point that must be context
149 *  switched.  The determination of whether or not this applies
150 *  is very tool specific and the state saved/restored is also
151 *  compiler specific.
152 *
153 *  Or1k Specific Information:
154 *
155 *  At this time there are no implementations of Or1k that are
156 *  expected to implement floating point. More importantly, the
157 *  floating point architecture is expected to change significantly
158 *  before such chips are fabricated.
159 */
160
161#define CPU_HARDWARE_FP     FALSE
162#define CPU_SOFTWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 *
172 */
173
174#define CPU_ALL_TASKS_ARE_FP     FALSE
175
176/*
177 *  Should the IDLE task have a floating point context?
178 *
179 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
180 *  and it has a floating point context which is switched in and out.
181 *  If FALSE, then the IDLE task does not have a floating point context.
182 *
183 *  Setting this to TRUE negatively impacts the time required to preempt
184 *  the IDLE task from an interrupt because the floating point context
185 *  must be saved as part of the preemption.
186 *
187 */
188
189#define CPU_IDLE_TASK_IS_FP      FALSE
190
191/*
192 *  Should the saving of the floating point registers be deferred
193 *  until a context switch is made to another different floating point
194 *  task?
195 *
196 *  If TRUE, then the floating point context will not be stored until
197 *  necessary.  It will remain in the floating point registers and not
198 *  disturned until another floating point task is switched to.
199 *
200 *  If FALSE, then the floating point context is saved when a floating
201 *  point task is switched out and restored when the next floating point
202 *  task is restored.  The state of the floating point registers between
203 *  those two operations is not specified.
204 *
205 *  If the floating point context does NOT have to be saved as part of
206 *  interrupt dispatching, then it should be safe to set this to TRUE.
207 *
208 *  Setting this flag to TRUE results in using a different algorithm
209 *  for deciding when to save and restore the floating point context.
210 *  The deferred FP switch algorithm minimizes the number of times
211 *  the FP context is saved and restored.  The FP context is not saved
212 *  until a context switch is made to another, different FP task.
213 *  Thus in a system with only one FP task, the FP context will never
214 *  be saved or restored.
215 *
216 */
217
218#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
219
220/*
221 *  Does this port provide a CPU dependent IDLE task implementation?
222 *
223 *  If TRUE, then the routine _CPU_Thread_Idle_body
224 *  must be provided and is the default IDLE thread body instead of
225 *  _CPU_Thread_Idle_body.
226 *
227 *  If FALSE, then use the generic IDLE thread body if the BSP does
228 *  not provide one.
229 *
230 *  This is intended to allow for supporting processors which have
231 *  a low power or idle mode.  When the IDLE thread is executed, then
232 *  the CPU can be powered down.
233 *
234 *  The order of precedence for selecting the IDLE thread body is:
235 *
236 *    1.  BSP provided
237 *    2.  CPU dependent (if provided)
238 *    3.  generic (if no BSP and no CPU dependent)
239 *
240 */
241
242#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
243
244/*
245 *  Does the stack grow up (toward higher addresses) or down
246 *  (toward lower addresses)?
247 *
248 *  If TRUE, then the grows upward.
249 *  If FALSE, then the grows toward smaller addresses.
250 *
251 */
252
253#define CPU_STACK_GROWS_UP               FALSE
254
255/* FIXME: Is this the right value? */
256#define CPU_CACHE_LINE_BYTES 32
257
258#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
259
260/*
261 *  Define what is required to specify how the network to host conversion
262 *  routines are handled.
263 *
264 *  Or1k Specific Information:
265 *
266 *  This version of RTEMS is designed specifically to run with
267 *  big endian architectures. If you want little endian, you'll
268 *  have to make the appropriate adjustments here and write
269 *  efficient routines for byte swapping. The Or1k architecture
270 *  doesn't do this very well.
271 */
272
273#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
274#define CPU_BIG_ENDIAN                           TRUE
275#define CPU_LITTLE_ENDIAN                        FALSE
276
277/*
278 *  The following defines the number of bits actually used in the
279 *  interrupt field of the task mode.  How those bits map to the
280 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
281 *
282 */
283
284#define CPU_MODES_INTERRUPT_MASK   0x00000001
285
286/*
287 *  Processor defined structures required for cpukit/score.
288 */
289
290
291/*
292 * Contexts
293 *
294 *  Generally there are 2 types of context to save.
295 *     1. Interrupt registers to save
296 *     2. Task level registers to save
297 *
298 *  This means we have the following 3 context items:
299 *     1. task level context stuff::  Context_Control
300 *     2. floating point task stuff:: Context_Control_fp
301 *     3. special interrupt level context :: Context_Control_interrupt
302 *
303 *  On some processors, it is cost-effective to save only the callee
304 *  preserved registers during a task context switch.  This means
305 *  that the ISR code needs to save those registers which do not
306 *  persist across function calls.  It is not mandatory to make this
307 *  distinctions between the caller/callee saves registers for the
308 *  purpose of minimizing context saved during task switch and on interrupts.
309 *  If the cost of saving extra registers is minimal, simplicity is the
310 *  choice.  Save the same context on interrupt entry as for tasks in
311 *  this case.
312 *
313 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
314 *  care should be used in designing the context area.
315 *
316 *  On some CPUs with hardware floating point support, the Context_Control_fp
317 *  structure will not be used or it simply consist of an array of a
318 *  fixed number of bytes.   This is done when the floating point context
319 *  is dumped by a "FP save context" type instruction and the format
320 *  is not really defined by the CPU.  In this case, there is no need
321 *  to figure out the exact format -- only the size.  Of course, although
322 *  this is enough information for RTEMS, it is probably not enough for
323 *  a debugger such as gdb.  But that is another problem.
324 *
325 *
326 */
327#ifndef ASM
328#ifdef OR1K_64BIT_ARCH
329#define or1kreg uint64_t
330#else
331#define or1kreg uint32_t
332#endif
333
334typedef struct {
335  uint32_t  r1;     /* Stack pointer */
336  uint32_t  r2;     /* Frame pointer */
337  uint32_t  r3;
338  uint32_t  r4;
339  uint32_t  r5;
340  uint32_t  r6;
341  uint32_t  r7;
342  uint32_t  r8;
343  uint32_t  r9;
344  uint32_t  r10;
345  uint32_t  r11;
346  uint32_t  r12;
347  uint32_t  r13;
348  uint32_t  r14;
349  uint32_t  r15;
350  uint32_t  r16;
351  uint32_t  r17;
352  uint32_t  r18;
353  uint32_t  r19;
354  uint32_t  r20;
355  uint32_t  r21;
356  uint32_t  r22;
357  uint32_t  r23;
358  uint32_t  r24;
359  uint32_t  r25;
360  uint32_t  r26;
361  uint32_t  r27;
362  uint32_t  r28;
363  uint32_t  r29;
364  uint32_t  r30;
365  uint32_t  r31;
366
367  uint32_t  sr;  /* Current supervision register non persistent values */
368  uint32_t  epcr;
369  uint32_t  eear;
370  uint32_t  esr;
371} Context_Control;
372
373#define _CPU_Context_Get_SP( _context ) \
374  (_context)->r1
375
376typedef struct {
377  /** FPU registers are listed here */
378  double      some_float_register;
379} Context_Control_fp;
380
381typedef Context_Control CPU_Interrupt_frame;
382
383/*
384 *  The size of the floating point context area.  On some CPUs this
385 *  will not be a "sizeof" because the format of the floating point
386 *  area is not defined -- only the size is.  This is usually on
387 *  CPUs with a "floating point save context" instruction.
388 *
389 *  Or1k Specific Information:
390 *
391 */
392
393#define CPU_CONTEXT_FP_SIZE  0
394
395/*
396 *  Amount of extra stack (above minimum stack size) required by
397 *  MPCI receive server thread.  Remember that in a multiprocessor
398 *  system this thread must exist and be able to process all directives.
399 *
400 */
401
402#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
403
404/*
405 *  Should be large enough to run all RTEMS tests.  This insures
406 *  that a "reasonable" small application should not have any problems.
407 *
408 */
409
410#define CPU_STACK_MINIMUM_SIZE  4096
411
412/*
413 *  CPU's worst alignment requirement for data types on a byte boundary.  This
414 *  alignment does not take into account the requirements for the stack.
415 *
416 */
417
418#define CPU_ALIGNMENT  8
419
420/*
421 *  This is defined if the port has a special way to report the ISR nesting
422 *  level.  Most ports maintain the variable _ISR_Nest_level.
423 */
424#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
425
426/**
427 * Size of a pointer.
428 *
429 * This must be an integer literal that can be used by the assembler.  This
430 * value will be used to calculate offsets of structure members.  These
431 * offsets will be used in assembler code.
432 */
433#define CPU_SIZEOF_POINTER         4
434
435/*
436 *  This number corresponds to the byte alignment requirement for the
437 *  heap handler.  This alignment requirement may be stricter than that
438 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
439 *  common for the heap to follow the same alignment requirement as
440 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
441 *  then this should be set to CPU_ALIGNMENT.
442 *
443 *  NOTE:  This does not have to be a power of 2 although it should be
444 *         a multiple of 2 greater than or equal to 2.  The requirement
445 *         to be a multiple of 2 is because the heap uses the least
446 *         significant field of the front and back flags to indicate
447 *         that a block is in use or free.  So you do not want any odd
448 *         length blocks really putting length data in that bit.
449 *
450 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
451 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
452 *         elements allocated from the heap meet all restrictions.
453 *
454 */
455
456#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
457
458/*
459 *  This number corresponds to the byte alignment requirement for memory
460 *  buffers allocated by the partition manager.  This alignment requirement
461 *  may be stricter than that for the data types alignment specified by
462 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
463 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
464 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
465 *
466 *  NOTE:  This does not have to be a power of 2.  It does have to
467 *         be greater or equal to than CPU_ALIGNMENT.
468 *
469 */
470
471#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
472
473/*
474 *  This number corresponds to the byte alignment requirement for the
475 *  stack.  This alignment requirement may be stricter than that for the
476 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
477 *  is strict enough for the stack, then this should be set to 0.
478 *
479 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
480 *
481 */
482
483#define CPU_STACK_ALIGNMENT        0
484
485/* ISR handler macros */
486
487/*
488 *  Support routine to initialize the RTEMS vector table after it is allocated.
489 *
490 *  NO_CPU Specific Information:
491 *
492 *  XXX document implementation including references if appropriate
493 */
494
495#define _CPU_Initialize_vectors()
496
497/*
498 *  Disable all interrupts for an RTEMS critical section.  The previous
499 *  level is returned in _level.
500 *
501 */
502
503static inline uint32_t or1k_interrupt_disable( void )
504{
505  uint32_t sr;
506  sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
507
508  _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_IEE));
509
510  return sr;
511}
512
513static inline void or1k_interrupt_enable(uint32_t level)
514{
515  uint32_t sr;
516
517  /* Enable interrupts and restore rs */
518  sr = level | CPU_OR1K_SPR_SR_IEE | CPU_OR1K_SPR_SR_TEE;
519  _OR1K_mtspr(CPU_OR1K_SPR_SR, sr);
520
521}
522
523#define _CPU_ISR_Disable( _level ) \
524    _level = or1k_interrupt_disable()
525
526
527/*
528 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
529 *  This indicates the end of an RTEMS critical section.  The parameter
530 *  _level is not modified.
531 *
532 */
533
534#define _CPU_ISR_Enable( _level )  \
535  or1k_interrupt_enable( _level )
536
537/*
538 *  This temporarily restores the interrupt to _level before immediately
539 *  disabling them again.  This is used to divide long RTEMS critical
540 *  sections into two or more parts.  The parameter _level is not
541 *  modified.
542 *
543 */
544
545#define _CPU_ISR_Flash( _level ) \
546  do{ \
547      _CPU_ISR_Enable( _level ); \
548      _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
549    } while(0)
550
551/*
552 *  Map interrupt level in task mode onto the hardware that the CPU
553 *  actually provides.  Currently, interrupt levels which do not
554 *  map onto the CPU in a generic fashion are undefined.  Someday,
555 *  it would be nice if these were "mapped" by the application
556 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
557 *  8 - 255 would be available for bsp/application specific meaning.
558 *  This could be used to manage a programmable interrupt controller
559 *  via the rtems_task_mode directive.
560 *
561 *  The get routine usually must be implemented as a subroutine.
562 *
563 */
564
565void _CPU_ISR_Set_level( uint32_t level );
566
567uint32_t _CPU_ISR_Get_level( void );
568
569/* end of ISR handler macros */
570
571/* Context handler macros */
572
573#define OR1K_FAST_CONTEXT_SWITCH_ENABLED FALSE
574/*
575 *  Initialize the context to a state suitable for starting a
576 *  task after a context restore operation.  Generally, this
577 *  involves:
578 *
579 *     - setting a starting address
580 *     - preparing the stack
581 *     - preparing the stack and frame pointers
582 *     - setting the proper interrupt level in the context
583 *     - initializing the floating point context
584 *
585 *  This routine generally does not set any unnecessary register
586 *  in the context.  The state of the "general data" registers is
587 *  undefined at task start time.
588 *
589 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
590 *        point thread.  This is typically only used on CPUs where the
591 *        FPU may be easily disabled by software such as on the SPARC
592 *        where the PSR contains an enable FPU bit.
593 *
594 */
595
596/**
597 * @brief Initializes the CPU context.
598 *
599 * The following steps are performed:
600 *  - setting a starting address
601 *  - preparing the stack
602 *  - preparing the stack and frame pointers
603 *  - setting the proper interrupt level in the context
604 *
605 * @param[in] context points to the context area
606 * @param[in] stack_area_begin is the low address of the allocated stack area
607 * @param[in] stack_area_size is the size of the stack area in bytes
608 * @param[in] new_level is the interrupt level for the task
609 * @param[in] entry_point is the task's entry point
610 * @param[in] is_fp is set to @c true if the task is a floating point task
611 * @param[in] tls_area is the thread-local storage (TLS) area
612 */
613void _CPU_Context_Initialize(
614  Context_Control *context,
615  void *stack_area_begin,
616  size_t stack_area_size,
617  uint32_t new_level,
618  void (*entry_point)( void ),
619  bool is_fp,
620  void *tls_area
621);
622
623/*
624 *  This routine is responsible for somehow restarting the currently
625 *  executing task.  If you are lucky, then all that is necessary
626 *  is restoring the context.  Otherwise, there will need to be
627 *  a special assembly routine which does something special in this
628 *  case.  Context_Restore should work most of the time.  It will
629 *  not work if restarting self conflicts with the stack frame
630 *  assumptions of restoring a context.
631 *
632 */
633
634#define _CPU_Context_Restart_self( _the_context ) \
635   _CPU_Context_restore( (_the_context) );
636
637/*
638 *  The purpose of this macro is to allow the initial pointer into
639 *  a floating point context area (used to save the floating point
640 *  context) to be at an arbitrary place in the floating point
641 *  context area.
642 *
643 *  This is necessary because some FP units are designed to have
644 *  their context saved as a stack which grows into lower addresses.
645 *  Other FP units can be saved by simply moving registers into offsets
646 *  from the base of the context area.  Finally some FP units provide
647 *  a "dump context" instruction which could fill in from high to low
648 *  or low to high based on the whim of the CPU designers.
649 *
650 */
651
652#define _CPU_Context_Fp_start( _base, _offset ) \
653   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
654
655#define _CPU_Context_Initialize_fp( _destination ) \
656  memset( *( _destination ), 0, CPU_CONTEXT_FP_SIZE );
657
658/* end of Context handler macros */
659
660/* Fatal Error manager macros */
661
662/*
663 *  This routine copies _error into a known place -- typically a stack
664 *  location or a register, optionally disables interrupts, and
665 *  halts/stops the CPU.
666 *
667 */
668
669#define _CPU_Fatal_halt(_source, _error ) \
670        printk("Fatal Error %d.%d Halted\n",_source, _error); \
671        _OR1KSIM_CPU_Halt(); \
672        for(;;)
673
674/* end of Fatal Error manager macros */
675
676/* Bitfield handler macros */
677
678/*
679 *  This routine sets _output to the bit number of the first bit
680 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
681 *  This type may be either 16 or 32 bits wide although only the 16
682 *  least significant bits will be used.
683 *
684 *  There are a number of variables in using a "find first bit" type
685 *  instruction.
686 *
687 *    (1) What happens when run on a value of zero?
688 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
689 *    (3) The numbering may be zero or one based.
690 *    (4) The "find first bit" instruction may search from MSB or LSB.
691 *
692 *  RTEMS guarantees that (1) will never happen so it is not a concern.
693 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
694 *  _CPU_Priority_bits_index().  These three form a set of routines
695 *  which must logically operate together.  Bits in the _value are
696 *  set and cleared based on masks built by _CPU_Priority_mask().
697 *  The basic major and minor values calculated by _Priority_Major()
698 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
699 *  to properly range between the values returned by the "find first bit"
700 *  instruction.  This makes it possible for _Priority_Get_highest() to
701 *  calculate the major and directly index into the minor table.
702 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
703 *  is the first bit found.
704 *
705 *  This entire "find first bit" and mapping process depends heavily
706 *  on the manner in which a priority is broken into a major and minor
707 *  components with the major being the 4 MSB of a priority and minor
708 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
709 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
710 *  to the lowest priority.
711 *
712 *  If your CPU does not have a "find first bit" instruction, then
713 *  there are ways to make do without it.  Here are a handful of ways
714 *  to implement this in software:
715 *
716 *    - a series of 16 bit test instructions
717 *    - a "binary search using if's"
718 *    - _number = 0
719 *      if _value > 0x00ff
720 *        _value >>=8
721 *        _number = 8;
722 *
723 *      if _value > 0x0000f
724 *        _value >=8
725 *        _number += 4
726 *
727 *      _number += bit_set_table[ _value ]
728 *
729 *    where bit_set_table[ 16 ] has values which indicate the first
730 *      bit set
731 *
732 */
733
734  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
735#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
736#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
737
738#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
739
740  /* Get a value between 0 and N where N is the bit size */
741  /* This routine makes use of the fact that CPUCFGR defines
742     OB32S to have value 32, and OB64S to have value 64. If
743     this ever changes then this routine will fail. */
744#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
745     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
746                   "l.andi  %0,%0,0x60  \n\t"\
747                   "l.ff1   %1,%1,r0    \n\t"\
748                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
749
750#endif
751
752/* end of Bitfield handler macros */
753
754/*
755 *  This routine builds the mask which corresponds to the bit fields
756 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
757 *  for that routine.
758 *
759 */
760
761#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
762
763#define _CPU_Priority_Mask( _bit_number ) \
764    (1 << _bit_number)
765
766#endif
767
768/*
769 *  This routine translates the bit numbers returned by
770 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
771 *  a major or minor component of a priority.  See the discussion
772 *  for that routine.
773 *
774 */
775
776#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
777
778#define _CPU_Priority_bits_index( _priority ) \
779  (_priority)
780
781#endif
782
783typedef struct {
784/* There is no CPU specific per-CPU state */
785} CPU_Per_CPU_control;
786#endif /* ASM */
787
788#define CPU_SIZEOF_POINTER 4
789#define CPU_PER_CPU_CONTROL_SIZE 0
790
791#ifndef ASM
792typedef uint32_t CPU_Counter_ticks;
793typedef uint16_t Priority_bit_map_Word;
794
795typedef struct {
796  uint32_t r[32];
797
798  /* The following registers must be saved if we have
799  fast context switch disabled and nested interrupt
800  levels are enabled.
801  */
802#if !OR1K_FAST_CONTEXT_SWITCH_ENABLED
803  uint32_t epcr; /* exception PC register */
804  uint32_t eear; /* exception effective address register */
805  uint32_t esr; /* exception supervision register */
806#endif
807
808} CPU_Exception_frame;
809
810/**
811 * @brief Prints the exception frame via printk().
812 *
813 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
814 */
815void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
816
817
818/* end of Priority handler macros */
819
820/* functions */
821
822/*
823 *  _CPU_Initialize
824 *
825 *  This routine performs CPU dependent initialization.
826 *
827 */
828
829void _CPU_Initialize(
830  void
831);
832
833/*
834 *  _CPU_ISR_install_raw_handler
835 *
836 *  This routine installs a "raw" interrupt handler directly into the
837 *  processor's vector table.
838 *
839 */
840
841void _CPU_ISR_install_raw_handler(
842  uint32_t    vector,
843  proc_ptr    new_handler,
844  proc_ptr   *old_handler
845);
846
847/*
848 *  _CPU_ISR_install_vector
849 *
850 *  This routine installs an interrupt vector.
851 *
852 *  NO_CPU Specific Information:
853 *
854 *  XXX document implementation including references if appropriate
855 */
856
857void _CPU_ISR_install_vector(
858  uint32_t    vector,
859  proc_ptr   new_handler,
860  proc_ptr   *old_handler
861);
862
863/*
864 *  _CPU_Install_interrupt_stack
865 *
866 *  This routine installs the hardware interrupt stack pointer.
867 *
868 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
869 *         is TRUE.
870 *
871 */
872
873void _CPU_Install_interrupt_stack( void );
874
875/*
876 *  _CPU_Thread_Idle_body
877 *
878 *  This routine is the CPU dependent IDLE thread body.
879 *
880 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
881 *         is TRUE.
882 *
883 */
884
885void *_CPU_Thread_Idle_body( uintptr_t ignored );
886
887/*
888 *  _CPU_Context_switch
889 *
890 *  This routine switches from the run context to the heir context.
891 *
892 *  Or1k Specific Information:
893 *
894 *  Please see the comments in the .c file for a description of how
895 *  this function works. There are several things to be aware of.
896 */
897
898void _CPU_Context_switch(
899  Context_Control  *run,
900  Context_Control  *heir
901);
902
903/*
904 *  _CPU_Context_restore
905 *
906 *  This routine is generally used only to restart self in an
907 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
908 *
909 *  NOTE: May be unnecessary to reload some registers.
910 *
911 */
912
913void _CPU_Context_restore(
914  Context_Control *new_context
915) RTEMS_NO_RETURN;
916
917/*
918 *  _CPU_Context_save_fp
919 *
920 *  This routine saves the floating point context passed to it.
921 *
922 */
923
924void _CPU_Context_save_fp(
925  void **fp_context_ptr
926);
927
928/*
929 *  _CPU_Context_restore_fp
930 *
931 *  This routine restores the floating point context passed to it.
932 *
933 */
934
935void _CPU_Context_restore_fp(
936  void **fp_context_ptr
937);
938
939/*  The following routine swaps the endian format of an unsigned int.
940 *  It must be static because it is referenced indirectly.
941 *
942 *  This version will work on any processor, but if there is a better
943 *  way for your CPU PLEASE use it.  The most common way to do this is to:
944 *
945 *     swap least significant two bytes with 16-bit rotate
946 *     swap upper and lower 16-bits
947 *     swap most significant two bytes with 16-bit rotate
948 *
949 *  Some CPUs have special instructions which swap a 32-bit quantity in
950 *  a single instruction (e.g. i486).  It is probably best to avoid
951 *  an "endian swapping control bit" in the CPU.  One good reason is
952 *  that interrupts would probably have to be disabled to insure that
953 *  an interrupt does not try to access the same "chunk" with the wrong
954 *  endian.  Another good reason is that on some CPUs, the endian bit
955 *  endianness for ALL fetches -- both code and data -- so the code
956 *  will be fetched incorrectly.
957 *
958 */
959
960void _CPU_Context_volatile_clobber( uintptr_t pattern );
961
962void _CPU_Context_validate( uintptr_t pattern );
963
964static inline unsigned int CPU_swap_u32(
965  unsigned int value
966)
967{
968  uint32_t   byte1, byte2, byte3, byte4, swapped;
969
970  byte4 = (value >> 24) & 0xff;
971  byte3 = (value >> 16) & 0xff;
972  byte2 = (value >> 8)  & 0xff;
973  byte1 =  value        & 0xff;
974
975  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
976  return( swapped );
977}
978
979#define CPU_swap_u16( value ) \
980  (((value&0xff) << 8) | ((value >> 8)&0xff))
981
982typedef uint32_t CPU_Counter_ticks;
983
984CPU_Counter_ticks _CPU_Counter_read( void );
985
986CPU_Counter_ticks _CPU_Counter_difference(
987  CPU_Counter_ticks second,
988  CPU_Counter_ticks first
989);
990
991#endif /* ASM */
992
993#ifdef __cplusplus
994}
995#endif
996
997#endif
Note: See TracBrowser for help on using the repository browser.