source: rtems/cpukit/score/cpu/or1k/rtems/score/cpu.h @ 1c846616

4.115
Last change on this file since 1c846616 was 1c846616, checked in by Hesham ALMatary <heshamelmatary@…>, on 04/02/15 at 13:11:26

or1k: Send halt signal to or1k simulators when rtems terminates

  • Property mode set to 100644
File size: 30.9 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains macros pertaining to the Opencores
7 *  or1k processor family.
8 *
9 *  COPYRIGHT (c) 2014 Hesham ALMatary <heshamelmatary@gmail.com>
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 *
17 *  This file adapted from no_cpu example of the RTEMS distribution.
18 *  The body has been modified for the Opencores OR1k implementation by
19 *  Chris Ziomkowski. <chris@asics.ws>
20 *
21 */
22
23#ifndef _OR1K_CPU_H
24#define _OR1K_CPU_H
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30
31#include <rtems/score/or1k.h>            /* pick up machine definitions */
32#include <rtems/score/or1k-utility.h>
33#include <rtems/score/types.h>
34#ifndef ASM
35#include <rtems/bspIo.h>
36#include <stdint.h>
37#include <stdio.h> /* for printk */
38#endif
39
40/* conditional compilation parameters */
41
42/*
43 *  Should the calls to _Thread_Enable_dispatch be inlined?
44 *
45 *  If TRUE, then they are inlined.
46 *  If FALSE, then a subroutine call is made.
47 *
48 *  Basically this is an example of the classic trade-off of size
49 *  versus speed.  Inlining the call (TRUE) typically increases the
50 *  size of RTEMS while speeding up the enabling of dispatching.
51 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
52 *  only be 0 or 1 unless you are in an interrupt handler and that
53 *  interrupt handler invokes the executive.]  When not inlined
54 *  something calls _Thread_Enable_dispatch which in turns calls
55 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
56 *  one subroutine call is avoided entirely.]
57 *
58 */
59
60#define CPU_INLINE_ENABLE_DISPATCH       FALSE
61
62/*
63 *  Does RTEMS manage a dedicated interrupt stack in software?
64 *
65 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
66 *  If FALSE, nothing is done.
67 *
68 *  If the CPU supports a dedicated interrupt stack in hardware,
69 *  then it is generally the responsibility of the BSP to allocate it
70 *  and set it up.
71 *
72 *  If the CPU does not support a dedicated interrupt stack, then
73 *  the porter has two options: (1) execute interrupts on the
74 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
75 *  interrupt stack.
76 *
77 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
78 *
79 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
80 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
81 *  possible that both are FALSE for a particular CPU.  Although it
82 *  is unclear what that would imply about the interrupt processing
83 *  procedure on that CPU.
84 *
85 *  Currently, for or1k port, _ISR_Handler is responsible for switching to
86 *  RTEMS dedicated interrupt task.
87 *
88 */
89
90#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
91
92/*
93 *  Does this CPU have hardware support for a dedicated interrupt stack?
94 *
95 *  If TRUE, then it must be installed during initialization.
96 *  If FALSE, then no installation is performed.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 *
106 */
107
108#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
109
110/*
111 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
112 *
113 *  If TRUE, then the memory is allocated during initialization.
114 *  If FALSE, then the memory is allocated during initialization.
115 *
116 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
117 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
118 *
119 */
120
121#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
122
123/*
124 *  Does the RTEMS invoke the user's ISR with the vector number and
125 *  a pointer to the saved interrupt frame (1) or just the vector
126 *  number (0)?
127 *
128 */
129
130#define CPU_ISR_PASSES_FRAME_POINTER 1
131
132/*
133 *  Does the CPU have hardware floating point?
134 *
135 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
136 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
137 *
138 *  If there is a FP coprocessor such as the i387 or mc68881, then
139 *  the answer is TRUE.
140 *
141 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
142 *  It indicates whether or not this CPU model has FP support.  For
143 *  example, it would be possible to have an i386_nofp CPU model
144 *  which set this to false to indicate that you have an i386 without
145 *  an i387 and wish to leave floating point support out of RTEMS.
146 *
147 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
148 *  is software implemented floating point that must be context
149 *  switched.  The determination of whether or not this applies
150 *  is very tool specific and the state saved/restored is also
151 *  compiler specific.
152 *
153 *  Or1k Specific Information:
154 *
155 *  At this time there are no implementations of Or1k that are
156 *  expected to implement floating point. More importantly, the
157 *  floating point architecture is expected to change significantly
158 *  before such chips are fabricated.
159 */
160
161#define CPU_HARDWARE_FP     FALSE
162#define CPU_SOFTWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 *
172 */
173
174#define CPU_ALL_TASKS_ARE_FP     FALSE
175
176/*
177 *  Should the IDLE task have a floating point context?
178 *
179 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
180 *  and it has a floating point context which is switched in and out.
181 *  If FALSE, then the IDLE task does not have a floating point context.
182 *
183 *  Setting this to TRUE negatively impacts the time required to preempt
184 *  the IDLE task from an interrupt because the floating point context
185 *  must be saved as part of the preemption.
186 *
187 */
188
189#define CPU_IDLE_TASK_IS_FP      FALSE
190
191/*
192 *  Should the saving of the floating point registers be deferred
193 *  until a context switch is made to another different floating point
194 *  task?
195 *
196 *  If TRUE, then the floating point context will not be stored until
197 *  necessary.  It will remain in the floating point registers and not
198 *  disturned until another floating point task is switched to.
199 *
200 *  If FALSE, then the floating point context is saved when a floating
201 *  point task is switched out and restored when the next floating point
202 *  task is restored.  The state of the floating point registers between
203 *  those two operations is not specified.
204 *
205 *  If the floating point context does NOT have to be saved as part of
206 *  interrupt dispatching, then it should be safe to set this to TRUE.
207 *
208 *  Setting this flag to TRUE results in using a different algorithm
209 *  for deciding when to save and restore the floating point context.
210 *  The deferred FP switch algorithm minimizes the number of times
211 *  the FP context is saved and restored.  The FP context is not saved
212 *  until a context switch is made to another, different FP task.
213 *  Thus in a system with only one FP task, the FP context will never
214 *  be saved or restored.
215 *
216 */
217
218#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
219
220/*
221 *  Does this port provide a CPU dependent IDLE task implementation?
222 *
223 *  If TRUE, then the routine _CPU_Thread_Idle_body
224 *  must be provided and is the default IDLE thread body instead of
225 *  _CPU_Thread_Idle_body.
226 *
227 *  If FALSE, then use the generic IDLE thread body if the BSP does
228 *  not provide one.
229 *
230 *  This is intended to allow for supporting processors which have
231 *  a low power or idle mode.  When the IDLE thread is executed, then
232 *  the CPU can be powered down.
233 *
234 *  The order of precedence for selecting the IDLE thread body is:
235 *
236 *    1.  BSP provided
237 *    2.  CPU dependent (if provided)
238 *    3.  generic (if no BSP and no CPU dependent)
239 *
240 */
241
242#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
243
244/*
245 *  Does the stack grow up (toward higher addresses) or down
246 *  (toward lower addresses)?
247 *
248 *  If TRUE, then the grows upward.
249 *  If FALSE, then the grows toward smaller addresses.
250 *
251 */
252
253#define CPU_STACK_GROWS_UP               FALSE
254
255/*
256 *  The following is the variable attribute used to force alignment
257 *  of critical RTEMS structures.  On some processors it may make
258 *  sense to have these aligned on tighter boundaries than
259 *  the minimum requirements of the compiler in order to have as
260 *  much of the critical data area as possible in a cache line.
261 *
262 *  The placement of this macro in the declaration of the variables
263 *  is based on the syntactically requirements of the GNU C
264 *  "__attribute__" extension.  For example with GNU C, use
265 *  the following to force a structures to a 32 byte boundary.
266 *
267 *      __attribute__ ((aligned (32)))
268 *
269 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
270 *         To benefit from using this, the data must be heavily
271 *         used so it will stay in the cache and used frequently enough
272 *         in the executive to justify turning this on.
273 *
274 */
275
276#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
277
278/*
279 *  Define what is required to specify how the network to host conversion
280 *  routines are handled.
281 *
282 *  Or1k Specific Information:
283 *
284 *  This version of RTEMS is designed specifically to run with
285 *  big endian architectures. If you want little endian, you'll
286 *  have to make the appropriate adjustments here and write
287 *  efficient routines for byte swapping. The Or1k architecture
288 *  doesn't do this very well.
289 */
290
291#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
292#define CPU_BIG_ENDIAN                           TRUE
293#define CPU_LITTLE_ENDIAN                        FALSE
294
295/*
296 *  The following defines the number of bits actually used in the
297 *  interrupt field of the task mode.  How those bits map to the
298 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
299 *
300 */
301
302#define CPU_MODES_INTERRUPT_MASK   0x00000001
303
304/*
305 *  Processor defined structures required for cpukit/score.
306 */
307
308
309/*
310 * Contexts
311 *
312 *  Generally there are 2 types of context to save.
313 *     1. Interrupt registers to save
314 *     2. Task level registers to save
315 *
316 *  This means we have the following 3 context items:
317 *     1. task level context stuff::  Context_Control
318 *     2. floating point task stuff:: Context_Control_fp
319 *     3. special interrupt level context :: Context_Control_interrupt
320 *
321 *  On some processors, it is cost-effective to save only the callee
322 *  preserved registers during a task context switch.  This means
323 *  that the ISR code needs to save those registers which do not
324 *  persist across function calls.  It is not mandatory to make this
325 *  distinctions between the caller/callee saves registers for the
326 *  purpose of minimizing context saved during task switch and on interrupts.
327 *  If the cost of saving extra registers is minimal, simplicity is the
328 *  choice.  Save the same context on interrupt entry as for tasks in
329 *  this case.
330 *
331 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
332 *  care should be used in designing the context area.
333 *
334 *  On some CPUs with hardware floating point support, the Context_Control_fp
335 *  structure will not be used or it simply consist of an array of a
336 *  fixed number of bytes.   This is done when the floating point context
337 *  is dumped by a "FP save context" type instruction and the format
338 *  is not really defined by the CPU.  In this case, there is no need
339 *  to figure out the exact format -- only the size.  Of course, although
340 *  this is enough information for RTEMS, it is probably not enough for
341 *  a debugger such as gdb.  But that is another problem.
342 *
343 *
344 */
345#ifndef ASM
346#ifdef OR1K_64BIT_ARCH
347#define or1kreg uint64_t
348#else
349#define or1kreg uint32_t
350#endif
351
352typedef struct {
353  uint32_t  r1;     /* Stack pointer */
354  uint32_t  r2;     /* Frame pointer */
355  uint32_t  r3;
356  uint32_t  r4;
357  uint32_t  r5;
358  uint32_t  r6;
359  uint32_t  r7;
360  uint32_t  r8;
361  uint32_t  r9;
362  uint32_t  r10;
363  uint32_t  r11;
364  uint32_t  r12;
365  uint32_t  r13;
366  uint32_t  r14;
367  uint32_t  r15;
368  uint32_t  r16;
369  uint32_t  r17;
370  uint32_t  r18;
371  uint32_t  r19;
372  uint32_t  r20;
373  uint32_t  r21;
374  uint32_t  r22;
375  uint32_t  r23;
376  uint32_t  r24;
377  uint32_t  r25;
378  uint32_t  r26;
379  uint32_t  r27;
380  uint32_t  r28;
381  uint32_t  r29;
382  uint32_t  r30;
383  uint32_t  r31;
384
385  uint32_t  sr;  /* Current supervision register non persistent values */
386  uint32_t  epcr;
387  uint32_t  eear;
388  uint32_t  esr;
389} Context_Control;
390
391#define _CPU_Context_Get_SP( _context ) \
392  (_context)->r1
393
394typedef struct {
395  /** FPU registers are listed here */
396  double      some_float_register;
397} Context_Control_fp;
398
399typedef Context_Control CPU_Interrupt_frame;
400
401/*
402 *  The size of the floating point context area.  On some CPUs this
403 *  will not be a "sizeof" because the format of the floating point
404 *  area is not defined -- only the size is.  This is usually on
405 *  CPUs with a "floating point save context" instruction.
406 *
407 *  Or1k Specific Information:
408 *
409 */
410
411#define CPU_CONTEXT_FP_SIZE  0
412SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
413
414/*
415 *  Amount of extra stack (above minimum stack size) required by
416 *  MPCI receive server thread.  Remember that in a multiprocessor
417 *  system this thread must exist and be able to process all directives.
418 *
419 */
420
421#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
422
423/*
424 *  Should be large enough to run all RTEMS tests.  This insures
425 *  that a "reasonable" small application should not have any problems.
426 *
427 */
428
429#define CPU_STACK_MINIMUM_SIZE  4096
430
431/*
432 *  CPU's worst alignment requirement for data types on a byte boundary.  This
433 *  alignment does not take into account the requirements for the stack.
434 *
435 */
436
437#define CPU_ALIGNMENT  8
438
439/*
440 *  This is defined if the port has a special way to report the ISR nesting
441 *  level.  Most ports maintain the variable _ISR_Nest_level.
442 */
443#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
444
445/**
446 * Size of a pointer.
447 *
448 * This must be an integer literal that can be used by the assembler.  This
449 * value will be used to calculate offsets of structure members.  These
450 * offsets will be used in assembler code.
451 */
452#define CPU_SIZEOF_POINTER         4
453
454/*
455 *  This number corresponds to the byte alignment requirement for the
456 *  heap handler.  This alignment requirement may be stricter than that
457 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
458 *  common for the heap to follow the same alignment requirement as
459 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
460 *  then this should be set to CPU_ALIGNMENT.
461 *
462 *  NOTE:  This does not have to be a power of 2 although it should be
463 *         a multiple of 2 greater than or equal to 2.  The requirement
464 *         to be a multiple of 2 is because the heap uses the least
465 *         significant field of the front and back flags to indicate
466 *         that a block is in use or free.  So you do not want any odd
467 *         length blocks really putting length data in that bit.
468 *
469 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
470 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
471 *         elements allocated from the heap meet all restrictions.
472 *
473 */
474
475#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
476
477/*
478 *  This number corresponds to the byte alignment requirement for memory
479 *  buffers allocated by the partition manager.  This alignment requirement
480 *  may be stricter than that for the data types alignment specified by
481 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
482 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
483 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
484 *
485 *  NOTE:  This does not have to be a power of 2.  It does have to
486 *         be greater or equal to than CPU_ALIGNMENT.
487 *
488 */
489
490#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
491
492/*
493 *  This number corresponds to the byte alignment requirement for the
494 *  stack.  This alignment requirement may be stricter than that for the
495 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
496 *  is strict enough for the stack, then this should be set to 0.
497 *
498 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
499 *
500 */
501
502#define CPU_STACK_ALIGNMENT        0
503
504/* ISR handler macros */
505
506/*
507 *  Support routine to initialize the RTEMS vector table after it is allocated.
508 *
509 *  NO_CPU Specific Information:
510 *
511 *  XXX document implementation including references if appropriate
512 */
513
514#define _CPU_Initialize_vectors()
515
516/*
517 *  Disable all interrupts for an RTEMS critical section.  The previous
518 *  level is returned in _level.
519 *
520 */
521
522static inline uint32_t or1k_interrupt_disable( void )
523{
524  uint32_t sr;
525  sr = _OR1K_mfspr(CPU_OR1K_SPR_SR);
526
527  _OR1K_mtspr(CPU_OR1K_SPR_SR, (sr & ~CPU_OR1K_SPR_SR_IEE));
528
529  return sr;
530}
531
532static inline void or1k_interrupt_enable(uint32_t level)
533{
534  uint32_t sr;
535
536  /* Enable interrupts and restore rs */
537  sr = level | CPU_OR1K_SPR_SR_IEE | CPU_OR1K_SPR_SR_TEE;
538  _OR1K_mtspr(CPU_OR1K_SPR_SR, sr);
539
540}
541
542#define _CPU_ISR_Disable( _level ) \
543    _level = or1k_interrupt_disable()
544
545
546/*
547 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
548 *  This indicates the end of an RTEMS critical section.  The parameter
549 *  _level is not modified.
550 *
551 */
552
553#define _CPU_ISR_Enable( _level )  \
554  or1k_interrupt_enable( _level )
555
556/*
557 *  This temporarily restores the interrupt to _level before immediately
558 *  disabling them again.  This is used to divide long RTEMS critical
559 *  sections into two or more parts.  The parameter _level is not
560 *  modified.
561 *
562 */
563
564#define _CPU_ISR_Flash( _level ) \
565  do{ \
566      _CPU_ISR_Enable( _level ); \
567      _OR1K_mtspr(CPU_OR1K_SPR_SR, (_level & ~CPU_OR1K_SPR_SR_IEE)); \
568    } while(0)
569
570/*
571 *  Map interrupt level in task mode onto the hardware that the CPU
572 *  actually provides.  Currently, interrupt levels which do not
573 *  map onto the CPU in a generic fashion are undefined.  Someday,
574 *  it would be nice if these were "mapped" by the application
575 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
576 *  8 - 255 would be available for bsp/application specific meaning.
577 *  This could be used to manage a programmable interrupt controller
578 *  via the rtems_task_mode directive.
579 *
580 *  The get routine usually must be implemented as a subroutine.
581 *
582 */
583
584void _CPU_ISR_Set_level( uint32_t level );
585
586uint32_t _CPU_ISR_Get_level( void );
587
588/* end of ISR handler macros */
589
590/* Context handler macros */
591
592#define OR1K_FAST_CONTEXT_SWITCH_ENABLED FALSE
593/*
594 *  Initialize the context to a state suitable for starting a
595 *  task after a context restore operation.  Generally, this
596 *  involves:
597 *
598 *     - setting a starting address
599 *     - preparing the stack
600 *     - preparing the stack and frame pointers
601 *     - setting the proper interrupt level in the context
602 *     - initializing the floating point context
603 *
604 *  This routine generally does not set any unnecessary register
605 *  in the context.  The state of the "general data" registers is
606 *  undefined at task start time.
607 *
608 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
609 *        point thread.  This is typically only used on CPUs where the
610 *        FPU may be easily disabled by software such as on the SPARC
611 *        where the PSR contains an enable FPU bit.
612 *
613 */
614
615/**
616 * @brief Initializes the CPU context.
617 *
618 * The following steps are performed:
619 *  - setting a starting address
620 *  - preparing the stack
621 *  - preparing the stack and frame pointers
622 *  - setting the proper interrupt level in the context
623 *
624 * @param[in] context points to the context area
625 * @param[in] stack_area_begin is the low address of the allocated stack area
626 * @param[in] stack_area_size is the size of the stack area in bytes
627 * @param[in] new_level is the interrupt level for the task
628 * @param[in] entry_point is the task's entry point
629 * @param[in] is_fp is set to @c true if the task is a floating point task
630 * @param[in] tls_area is the thread-local storage (TLS) area
631 */
632void _CPU_Context_Initialize(
633  Context_Control *context,
634  void *stack_area_begin,
635  size_t stack_area_size,
636  uint32_t new_level,
637  void (*entry_point)( void ),
638  bool is_fp,
639  void *tls_area
640);
641
642/*
643 *  This routine is responsible for somehow restarting the currently
644 *  executing task.  If you are lucky, then all that is necessary
645 *  is restoring the context.  Otherwise, there will need to be
646 *  a special assembly routine which does something special in this
647 *  case.  Context_Restore should work most of the time.  It will
648 *  not work if restarting self conflicts with the stack frame
649 *  assumptions of restoring a context.
650 *
651 */
652
653#define _CPU_Context_Restart_self( _the_context ) \
654   _CPU_Context_restore( (_the_context) );
655
656/*
657 *  The purpose of this macro is to allow the initial pointer into
658 *  a floating point context area (used to save the floating point
659 *  context) to be at an arbitrary place in the floating point
660 *  context area.
661 *
662 *  This is necessary because some FP units are designed to have
663 *  their context saved as a stack which grows into lower addresses.
664 *  Other FP units can be saved by simply moving registers into offsets
665 *  from the base of the context area.  Finally some FP units provide
666 *  a "dump context" instruction which could fill in from high to low
667 *  or low to high based on the whim of the CPU designers.
668 *
669 */
670
671#define _CPU_Context_Fp_start( _base, _offset ) \
672   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
673
674/*
675 *  This routine initializes the FP context area passed to it to.
676 *  There are a few standard ways in which to initialize the
677 *  floating point context.  The code included for this macro assumes
678 *  that this is a CPU in which a "initial" FP context was saved into
679 *  _CPU_Null_fp_context and it simply copies it to the destination
680 *  context passed to it.
681 *
682 *  Other models include (1) not doing anything, and (2) putting
683 *  a "null FP status word" in the correct place in the FP context.
684 *
685 */
686
687#define _CPU_Context_Initialize_fp( _destination ) \
688  { \
689   *(*(_destination)) = _CPU_Null_fp_context; \
690  }
691
692/* end of Context handler macros */
693
694/* Fatal Error manager macros */
695
696/*
697 *  This routine copies _error into a known place -- typically a stack
698 *  location or a register, optionally disables interrupts, and
699 *  halts/stops the CPU.
700 *
701 */
702
703#define _CPU_Fatal_halt(_source, _error ) \
704        printk("Fatal Error %d.%d Halted\n",_source, _error); \
705        _OR1KSIM_CPU_Halt(); \
706        for(;;)
707
708/* end of Fatal Error manager macros */
709
710/* Bitfield handler macros */
711
712/*
713 *  This routine sets _output to the bit number of the first bit
714 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
715 *  This type may be either 16 or 32 bits wide although only the 16
716 *  least significant bits will be used.
717 *
718 *  There are a number of variables in using a "find first bit" type
719 *  instruction.
720 *
721 *    (1) What happens when run on a value of zero?
722 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
723 *    (3) The numbering may be zero or one based.
724 *    (4) The "find first bit" instruction may search from MSB or LSB.
725 *
726 *  RTEMS guarantees that (1) will never happen so it is not a concern.
727 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
728 *  _CPU_Priority_bits_index().  These three form a set of routines
729 *  which must logically operate together.  Bits in the _value are
730 *  set and cleared based on masks built by _CPU_Priority_mask().
731 *  The basic major and minor values calculated by _Priority_Major()
732 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
733 *  to properly range between the values returned by the "find first bit"
734 *  instruction.  This makes it possible for _Priority_Get_highest() to
735 *  calculate the major and directly index into the minor table.
736 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
737 *  is the first bit found.
738 *
739 *  This entire "find first bit" and mapping process depends heavily
740 *  on the manner in which a priority is broken into a major and minor
741 *  components with the major being the 4 MSB of a priority and minor
742 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
743 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
744 *  to the lowest priority.
745 *
746 *  If your CPU does not have a "find first bit" instruction, then
747 *  there are ways to make do without it.  Here are a handful of ways
748 *  to implement this in software:
749 *
750 *    - a series of 16 bit test instructions
751 *    - a "binary search using if's"
752 *    - _number = 0
753 *      if _value > 0x00ff
754 *        _value >>=8
755 *        _number = 8;
756 *
757 *      if _value > 0x0000f
758 *        _value >=8
759 *        _number += 4
760 *
761 *      _number += bit_set_table[ _value ]
762 *
763 *    where bit_set_table[ 16 ] has values which indicate the first
764 *      bit set
765 *
766 */
767
768  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
769#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
770#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
771
772#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
773
774  /* Get a value between 0 and N where N is the bit size */
775  /* This routine makes use of the fact that CPUCFGR defines
776     OB32S to have value 32, and OB64S to have value 64. If
777     this ever changes then this routine will fail. */
778#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
779     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
780                   "l.andi  %0,%0,0x60  \n\t"\
781                   "l.ff1   %1,%1,r0    \n\t"\
782                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
783
784#endif
785
786/* end of Bitfield handler macros */
787
788/*
789 *  This routine builds the mask which corresponds to the bit fields
790 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
791 *  for that routine.
792 *
793 */
794
795#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
796
797#define _CPU_Priority_Mask( _bit_number ) \
798    (1 << _bit_number)
799
800#endif
801
802/*
803 *  This routine translates the bit numbers returned by
804 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
805 *  a major or minor component of a priority.  See the discussion
806 *  for that routine.
807 *
808 */
809
810#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
811
812#define _CPU_Priority_bits_index( _priority ) \
813  (_priority)
814
815#endif
816
817#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
818#define CPU_TIMESTAMP_USE_INT64 TRUE
819#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
820
821typedef struct {
822/* There is no CPU specific per-CPU state */
823} CPU_Per_CPU_control;
824#endif /* ASM */
825
826#define CPU_SIZEOF_POINTER 4
827#define CPU_PER_CPU_CONTROL_SIZE 0
828
829#ifndef ASM
830typedef uint32_t CPU_Counter_ticks;
831typedef uint16_t Priority_bit_map_Word;
832
833typedef struct {
834  uint32_t r[32];
835
836  /* The following registers must be saved if we have
837  fast context switch disabled and nested interrupt
838  levels are enabled.
839  */
840#if !OR1K_FAST_CONTEXT_SWITCH_ENABLED
841  uint32_t epcr; /* exception PC register */
842  uint32_t eear; /* exception effective address register */
843  uint32_t esr; /* exception supervision register */
844#endif
845
846} CPU_Exception_frame;
847
848/**
849 * @brief Prints the exception frame via printk().
850 *
851 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
852 */
853void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
854
855
856/* end of Priority handler macros */
857
858/* functions */
859
860/*
861 *  _CPU_Initialize
862 *
863 *  This routine performs CPU dependent initialization.
864 *
865 */
866
867void _CPU_Initialize(
868  void
869);
870
871/*
872 *  _CPU_ISR_install_raw_handler
873 *
874 *  This routine installs a "raw" interrupt handler directly into the
875 *  processor's vector table.
876 *
877 */
878
879void _CPU_ISR_install_raw_handler(
880  uint32_t    vector,
881  proc_ptr    new_handler,
882  proc_ptr   *old_handler
883);
884
885/*
886 *  _CPU_ISR_install_vector
887 *
888 *  This routine installs an interrupt vector.
889 *
890 *  NO_CPU Specific Information:
891 *
892 *  XXX document implementation including references if appropriate
893 */
894
895void _CPU_ISR_install_vector(
896  uint32_t    vector,
897  proc_ptr   new_handler,
898  proc_ptr   *old_handler
899);
900
901/*
902 *  _CPU_Install_interrupt_stack
903 *
904 *  This routine installs the hardware interrupt stack pointer.
905 *
906 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
907 *         is TRUE.
908 *
909 */
910
911void _CPU_Install_interrupt_stack( void );
912
913/*
914 *  _CPU_Thread_Idle_body
915 *
916 *  This routine is the CPU dependent IDLE thread body.
917 *
918 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
919 *         is TRUE.
920 *
921 */
922
923void *_CPU_Thread_Idle_body( uintptr_t ignored );
924
925/*
926 *  _CPU_Context_switch
927 *
928 *  This routine switches from the run context to the heir context.
929 *
930 *  Or1k Specific Information:
931 *
932 *  Please see the comments in the .c file for a description of how
933 *  this function works. There are several things to be aware of.
934 */
935
936void _CPU_Context_switch(
937  Context_Control  *run,
938  Context_Control  *heir
939);
940
941/*
942 *  _CPU_Context_restore
943 *
944 *  This routine is generally used only to restart self in an
945 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
946 *
947 *  NOTE: May be unnecessary to reload some registers.
948 *
949 */
950
951void _CPU_Context_restore(
952  Context_Control *new_context
953) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
954
955/*
956 *  _CPU_Context_save_fp
957 *
958 *  This routine saves the floating point context passed to it.
959 *
960 */
961
962void _CPU_Context_save_fp(
963  void **fp_context_ptr
964);
965
966/*
967 *  _CPU_Context_restore_fp
968 *
969 *  This routine restores the floating point context passed to it.
970 *
971 */
972
973void _CPU_Context_restore_fp(
974  void **fp_context_ptr
975);
976
977/*  The following routine swaps the endian format of an unsigned int.
978 *  It must be static because it is referenced indirectly.
979 *
980 *  This version will work on any processor, but if there is a better
981 *  way for your CPU PLEASE use it.  The most common way to do this is to:
982 *
983 *     swap least significant two bytes with 16-bit rotate
984 *     swap upper and lower 16-bits
985 *     swap most significant two bytes with 16-bit rotate
986 *
987 *  Some CPUs have special instructions which swap a 32-bit quantity in
988 *  a single instruction (e.g. i486).  It is probably best to avoid
989 *  an "endian swapping control bit" in the CPU.  One good reason is
990 *  that interrupts would probably have to be disabled to insure that
991 *  an interrupt does not try to access the same "chunk" with the wrong
992 *  endian.  Another good reason is that on some CPUs, the endian bit
993 *  endianness for ALL fetches -- both code and data -- so the code
994 *  will be fetched incorrectly.
995 *
996 */
997
998void _CPU_Context_volatile_clobber( uintptr_t pattern );
999
1000void _CPU_Context_validate( uintptr_t pattern );
1001
1002static inline unsigned int CPU_swap_u32(
1003  unsigned int value
1004)
1005{
1006  uint32_t   byte1, byte2, byte3, byte4, swapped;
1007
1008  byte4 = (value >> 24) & 0xff;
1009  byte3 = (value >> 16) & 0xff;
1010  byte2 = (value >> 8)  & 0xff;
1011  byte1 =  value        & 0xff;
1012
1013  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1014  return( swapped );
1015}
1016
1017#define CPU_swap_u16( value ) \
1018  (((value&0xff) << 8) | ((value >> 8)&0xff))
1019
1020typedef uint32_t CPU_Counter_ticks;
1021
1022CPU_Counter_ticks _CPU_Counter_read( void );
1023
1024CPU_Counter_ticks _CPU_Counter_difference(
1025  CPU_Counter_ticks second,
1026  CPU_Counter_ticks first
1027);
1028
1029#endif /* ASM */
1030
1031#ifdef __cplusplus
1032}
1033#endif
1034
1035#endif
Note: See TracBrowser for help on using the repository browser.