source: rtems/cpukit/score/cpu/epiphany/rtems/score/cpu.h @ fdfbb0a8

5
Last change on this file since fdfbb0a8 was fdfbb0a8, checked in by Joel Sherrill <joel@…>, on 04/25/17 at 18:39:50

epiphany/rtems/score/cpu.h: Fix printf() format warning

  • Property mode set to 100644
File size: 25.6 KB
RevLine 
[66a5000d]1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham ALMatary <hmka501@york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _EPIPHANY_CPU_H
36#define _EPIPHANY_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/epiphany.h> /* pick up machine definitions */
43#include <rtems/score/types.h>
44#ifndef ASM
45#include <rtems/bspIo.h>
46#include <stdint.h>
47#include <stdio.h> /* for printk */
48#endif
49
50/* conditional compilation parameters */
51
52/*
53 *  Does RTEMS manage a dedicated interrupt stack in software?
54 *
55 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
56 *  If FALSE, nothing is done.
57 *
58 *  If the CPU supports a dedicated interrupt stack in hardware,
59 *  then it is generally the responsibility of the BSP to allocate it
60 *  and set it up.
61 *
62 *  If the CPU does not support a dedicated interrupt stack, then
63 *  the porter has two options: (1) execute interrupts on the
64 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
65 *  interrupt stack.
66 *
67 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
68 *
69 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
70 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
71 *  possible that both are FALSE for a particular CPU.  Although it
72 *  is unclear what that would imply about the interrupt processing
73 *  procedure on that CPU.
74 *
75 *  Currently, for epiphany port, _ISR_Handler is responsible for switching to
76 *  RTEMS dedicated interrupt task.
77 *
78 */
79
80#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
81
82/*
83 *  Does this CPU have hardware support for a dedicated interrupt stack?
84 *
85 *  If TRUE, then it must be installed during initialization.
86 *  If FALSE, then no installation is performed.
87 *
88 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
89 *
90 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
91 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
92 *  possible that both are FALSE for a particular CPU.  Although it
93 *  is unclear what that would imply about the interrupt processing
94 *  procedure on that CPU.
95 *
96 */
97
98#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
99
100/*
101 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
102 *
103 *  If TRUE, then the memory is allocated during initialization.
104 *  If FALSE, then the memory is allocated during initialization.
105 *
106 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
107 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
108 *
109 */
110
111#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
112
113/*
114 *  Does the RTEMS invoke the user's ISR with the vector number and
115 *  a pointer to the saved interrupt frame (1) or just the vector
116 *  number (0)?
117 *
118 */
119
[141e16d]120#define CPU_ISR_PASSES_FRAME_POINTER TRUE
[66a5000d]121
122/*
123 *  Does the CPU have hardware floating point?
124 *
125 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
126 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
127 *
128 *  If there is a FP coprocessor such as the i387 or mc68881, then
129 *  the answer is TRUE.
130 *
131 *  The macro name "epiphany_HAS_FPU" should be made CPU specific.
132 *  It indicates whether or not this CPU model has FP support.  For
133 *  example, it would be possible to have an i386_nofp CPU model
134 *  which set this to false to indicate that you have an i386 without
135 *  an i387 and wish to leave floating point support out of RTEMS.
136 *
137 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
138 *  is software implemented floating point that must be context
139 *  switched.  The determination of whether or not this applies
140 *  is very tool specific and the state saved/restored is also
141 *  compiler specific.
142 *
143 *  epiphany Specific Information:
144 *
145 *  At this time there are no implementations of Epiphany that are
146 *  expected to implement floating point.
147 */
148
149#define CPU_HARDWARE_FP     FALSE
150#define CPU_SOFTWARE_FP     FALSE
151
152/*
153 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
154 *
155 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
156 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
157 *
158 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
159 *
160 */
161
162#define CPU_ALL_TASKS_ARE_FP     FALSE
163
164/*
165 *  Should the IDLE task have a floating point context?
166 *
167 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
168 *  and it has a floating point context which is switched in and out.
169 *  If FALSE, then the IDLE task does not have a floating point context.
170 *
171 *  Setting this to TRUE negatively impacts the time required to preempt
172 *  the IDLE task from an interrupt because the floating point context
173 *  must be saved as part of the preemption.
174 *
175 */
176
177#define CPU_IDLE_TASK_IS_FP      FALSE
178
179/*
180 *  Should the saving of the floating point registers be deferred
181 *  until a context switch is made to another different floating point
182 *  task?
183 *
184 *  If TRUE, then the floating point context will not be stored until
185 *  necessary.  It will remain in the floating point registers and not
186 *  disturned until another floating point task is switched to.
187 *
188 *  If FALSE, then the floating point context is saved when a floating
189 *  point task is switched out and restored when the next floating point
190 *  task is restored.  The state of the floating point registers between
191 *  those two operations is not specified.
192 *
193 *  If the floating point context does NOT have to be saved as part of
194 *  interrupt dispatching, then it should be safe to set this to TRUE.
195 *
196 *  Setting this flag to TRUE results in using a different algorithm
197 *  for deciding when to save and restore the floating point context.
198 *  The deferred FP switch algorithm minimizes the number of times
199 *  the FP context is saved and restored.  The FP context is not saved
200 *  until a context switch is made to another, different FP task.
201 *  Thus in a system with only one FP task, the FP context will never
202 *  be saved or restored.
203 *
204 */
205
206#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
207
[84e6f15]208#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
209
[66a5000d]210/*
211 *  Does this port provide a CPU dependent IDLE task implementation?
212 *
213 *  If TRUE, then the routine _CPU_Thread_Idle_body
214 *  must be provided and is the default IDLE thread body instead of
215 *  _CPU_Thread_Idle_body.
216 *
217 *  If FALSE, then use the generic IDLE thread body if the BSP does
218 *  not provide one.
219 *
220 *  This is intended to allow for supporting processors which have
221 *  a low power or idle mode.  When the IDLE thread is executed, then
222 *  the CPU can be powered down.
223 *
224 *  The order of precedence for selecting the IDLE thread body is:
225 *
226 *    1.  BSP provided
227 *    2.  CPU dependent (if provided)
228 *    3.  generic (if no BSP and no CPU dependent)
229 *
230 */
231
232#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
233
234/*
235 *  Does the stack grow up (toward higher addresses) or down
236 *  (toward lower addresses)?
237 *
238 *  If TRUE, then the grows upward.
239 *  If FALSE, then the grows toward smaller addresses.
240 *
241 */
242
243#define CPU_STACK_GROWS_UP               FALSE
244
[a8865f8]245/* FIXME: Is this the right value? */
246#define CPU_CACHE_LINE_BYTES 64
[66a5000d]247
[a8865f8]248#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
[66a5000d]249
250/*
251 *  Define what is required to specify how the network to host conversion
252 *  routines are handled.
253 *
254 *  epiphany Specific Information:
255 *
256 *  This version of RTEMS is designed specifically to run with
257 *  big endian architectures. If you want little endian, you'll
258 *  have to make the appropriate adjustments here and write
259 *  efficient routines for byte swapping. The epiphany architecture
260 *  doesn't do this very well.
261 */
262
263#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
264
265/*
266 *  The following defines the number of bits actually used in the
267 *  interrupt field of the task mode.  How those bits map to the
268 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
269 *
270 */
271
272#define CPU_MODES_INTERRUPT_MASK   0x00000001
273
274/*
275 *  Processor defined structures required for cpukit/score.
276 */
277
278/*
279 * Contexts
280 *
281 *  Generally there are 2 types of context to save.
282 *     1. Interrupt registers to save
283 *     2. Task level registers to save
284 *
285 *  This means we have the following 3 context items:
286 *     1. task level context stuff::  Context_Control
287 *     2. floating point task stuff:: Context_Control_fp
288 *     3. special interrupt level context :: Context_Control_interrupt
289 *
290 *  On some processors, it is cost-effective to save only the callee
291 *  preserved registers during a task context switch.  This means
292 *  that the ISR code needs to save those registers which do not
293 *  persist across function calls.  It is not mandatory to make this
294 *  distinctions between the caller/callee saves registers for the
295 *  purpose of minimizing context saved during task switch and on interrupts.
296 *  If the cost of saving extra registers is minimal, simplicity is the
297 *  choice.  Save the same context on interrupt entry as for tasks in
298 *  this case.
299 *
300 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
301 *  care should be used in designing the context area.
302 *
303 *  On some CPUs with hardware floating point support, the Context_Control_fp
304 *  structure will not be used or it simply consist of an array of a
305 *  fixed number of bytes.   This is done when the floating point context
306 *  is dumped by a "FP save context" type instruction and the format
307 *  is not really defined by the CPU.  In this case, there is no need
308 *  to figure out the exact format -- only the size.  Of course, although
309 *  this is enough information for RTEMS, it is probably not enough for
310 *  a debugger such as gdb.  But that is another problem.
311 *
312 *
313 */
314#ifndef ASM
315
316typedef struct {
317  uint32_t  r[64];
318
319  uint32_t status;
320  uint32_t config;
321  uint32_t iret;
322
323#ifdef RTEMS_SMP
324    /**
325     * @brief On SMP configurations the thread context must contain a boolean
326     * indicator to signal if this context is executing on a processor.
327     *
328     * This field must be updated during a context switch.  The context switch
329     * to the heir must wait until the heir context indicates that it is no
330     * longer executing on a processor.  The context switch must also check if
331     * a thread dispatch is necessary to honor updates of the heir thread for
332     * this processor.  This indicator must be updated using an atomic test and
333     * set operation to ensure that at most one processor uses the heir
334     * context at the same time.
335     *
336     * @code
337     * void _CPU_Context_switch(
338     *   Context_Control *executing,
339     *   Context_Control *heir
340     * )
341     * {
342     *   save( executing );
343     *
344     *   executing->is_executing = false;
345     *   memory_barrier();
346     *
347     *   if ( test_and_set( &heir->is_executing ) ) {
348     *     do {
349     *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
350     *
351     *       if ( cpu_self->dispatch_necessary ) {
352     *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
353     *       }
354     *     } while ( test_and_set( &heir->is_executing ) );
355     *   }
356     *
357     *   restore( heir );
358     * }
359     * @endcode
360     */
361    volatile bool is_executing;
362#endif
363} Context_Control;
364
365#define _CPU_Context_Get_SP( _context ) \
366  (_context)->r[13]
367
368typedef struct {
369  /** FPU registers are listed here */
370  double  some_float_register;
371} Context_Control_fp;
372
373typedef Context_Control CPU_Interrupt_frame;
374
375/*
376 *  The size of the floating point context area.  On some CPUs this
377 *  will not be a "sizeof" because the format of the floating point
378 *  area is not defined -- only the size is.  This is usually on
379 *  CPUs with a "floating point save context" instruction.
380 *
381 *  epiphany Specific Information:
382 *
383 */
384
385#define CPU_CONTEXT_FP_SIZE  0
386
387/*
388 *  Amount of extra stack (above minimum stack size) required by
389 *  MPCI receive server thread.  Remember that in a multiprocessor
390 *  system this thread must exist and be able to process all directives.
391 *
392 */
393
394#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
395
396/*
397 *  Should be large enough to run all RTEMS tests.  This insures
398 *  that a "reasonable" small application should not have any problems.
399 *
400 */
401
402#define CPU_STACK_MINIMUM_SIZE  4096
403
404/*
405 *  CPU's worst alignment requirement for data types on a byte boundary.  This
406 *  alignment does not take into account the requirements for the stack.
407 *
408 */
409
410#define CPU_ALIGNMENT 8
411
412/*
413 *  This is defined if the port has a special way to report the ISR nesting
414 *  level.  Most ports maintain the variable _ISR_Nest_level.
415 */
416#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
417
418/*
419 *  This number corresponds to the byte alignment requirement for the
420 *  heap handler.  This alignment requirement may be stricter than that
421 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
422 *  common for the heap to follow the same alignment requirement as
423 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
424 *  then this should be set to CPU_ALIGNMENT.
425 *
426 *  NOTE:  This does not have to be a power of 2 although it should be
427 *         a multiple of 2 greater than or equal to 2.  The requirement
428 *         to be a multiple of 2 is because the heap uses the least
429 *         significant field of the front and back flags to indicate
430 *         that a block is in use or free.  So you do not want any odd
431 *         length blocks really putting length data in that bit.
432 *
433 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
434 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
435 *         elements allocated from the heap meet all restrictions.
436 *
437 */
438
439#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
440
441/*
442 *  This number corresponds to the byte alignment requirement for memory
443 *  buffers allocated by the partition manager.  This alignment requirement
444 *  may be stricter than that for the data types alignment specified by
445 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
446 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
447 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
448 *
449 *  NOTE:  This does not have to be a power of 2.  It does have to
450 *         be greater or equal to than CPU_ALIGNMENT.
451 *
452 */
453
454#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
455
456/*
457 *  This number corresponds to the byte alignment requirement for the
458 *  stack.  This alignment requirement may be stricter than that for the
459 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
460 *  is strict enough for the stack, then this should be set to 0.
461 *
462 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
463 *
464 */
465
466#define CPU_STACK_ALIGNMENT        8
467
468/* ISR handler macros */
469
470/*
471 *  Support routine to initialize the RTEMS vector table after it is allocated.
472 *
473 *  NO_CPU Specific Information:
474 *
475 *  XXX document implementation including references if appropriate
476 */
477
478#define _CPU_Initialize_vectors()
479
480/*
481 *  Disable all interrupts for an RTEMS critical section.  The previous
482 *  level is returned in _level.
483 *
484 */
485
486static inline uint32_t epiphany_interrupt_disable( void )
487{
488  uint32_t sr;
489  __asm__ __volatile__ ("movfs %[sr], status \n" : [sr] "=r" (sr):);
490  __asm__ __volatile__("gid \n");
491  return sr;
492}
493
494static inline void epiphany_interrupt_enable(uint32_t level)
495{
496  __asm__ __volatile__("gie \n");
497  __asm__ __volatile__ ("movts status, %[level] \n" :: [level] "r" (level):);
498}
499
500#define _CPU_ISR_Disable( _level ) \
501    _level = epiphany_interrupt_disable()
502
503/*
504 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
505 *  This indicates the end of an RTEMS critical section.  The parameter
506 *  _level is not modified.
507 *
508 */
509
510#define _CPU_ISR_Enable( _level )  \
511  epiphany_interrupt_enable( _level )
512
513/*
514 *  This temporarily restores the interrupt to _level before immediately
515 *  disabling them again.  This is used to divide long RTEMS critical
516 *  sections into two or more parts.  The parameter _level is not
517 *  modified.
518 *
519 */
520
521#define _CPU_ISR_Flash( _level ) \
522  do{ \
523      if ( (_level & 0x2) != 0 ) \
524        _CPU_ISR_Enable( _level ); \
525      epiphany_interrupt_disable(); \
526    } while(0)
527
[408609f6]528RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
529{
530  return ( level & 0x2 ) != 0;
531}
532
[66a5000d]533/*
534 *  Map interrupt level in task mode onto the hardware that the CPU
535 *  actually provides.  Currently, interrupt levels which do not
536 *  map onto the CPU in a generic fashion are undefined.  Someday,
537 *  it would be nice if these were "mapped" by the application
538 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
539 *  8 - 255 would be available for bsp/application specific meaning.
540 *  This could be used to manage a programmable interrupt controller
541 *  via the rtems_task_mode directive.
542 *
543 *  The get routine usually must be implemented as a subroutine.
544 *
545 */
546
547void _CPU_ISR_Set_level( uint32_t level );
548
549uint32_t _CPU_ISR_Get_level( void );
550
551/* end of ISR handler macros */
552
553/* Context handler macros */
554
555/*
556 *  Initialize the context to a state suitable for starting a
557 *  task after a context restore operation.  Generally, this
558 *  involves:
559 *
560 *     - setting a starting address
561 *     - preparing the stack
562 *     - preparing the stack and frame pointers
563 *     - setting the proper interrupt level in the context
564 *     - initializing the floating point context
565 *
566 *  This routine generally does not set any unnecessary register
567 *  in the context.  The state of the "general data" registers is
568 *  undefined at task start time.
569 *
570 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
571 *        point thread.  This is typically only used on CPUs where the
572 *        FPU may be easily disabled by software such as on the SPARC
573 *        where the PSR contains an enable FPU bit.
574 *
575 */
576
577/**
578 * @brief Account for GCC red-zone
579 *
580 * The following macro is used when initializing task's stack
581 * to account for GCC red-zone.
582 */
583
584#define EPIPHANY_GCC_RED_ZONE_SIZE 128
585
586/**
587 * @brief Initializes the CPU context.
588 *
589 * The following steps are performed:
590 *  - setting a starting address
591 *  - preparing the stack
592 *  - preparing the stack and frame pointers
593 *  - setting the proper interrupt level in the context
594 *
595 * @param[in] context points to the context area
596 * @param[in] stack_area_begin is the low address of the allocated stack area
597 * @param[in] stack_area_size is the size of the stack area in bytes
598 * @param[in] new_level is the interrupt level for the task
599 * @param[in] entry_point is the task's entry point
600 * @param[in] is_fp is set to @c true if the task is a floating point task
601 * @param[in] tls_area is the thread-local storage (TLS) area
602 */
603void _CPU_Context_Initialize(
604  Context_Control *context,
605  void *stack_area_begin,
606  size_t stack_area_size,
607  uint32_t new_level,
608  void (*entry_point)( void ),
609  bool is_fp,
610  void *tls_area
611);
612
613/*
614 *  This routine is responsible for somehow restarting the currently
615 *  executing task.  If you are lucky, then all that is necessary
616 *  is restoring the context.  Otherwise, there will need to be
617 *  a special assembly routine which does something special in this
618 *  case.  Context_Restore should work most of the time.  It will
619 *  not work if restarting self conflicts with the stack frame
620 *  assumptions of restoring a context.
621 *
622 */
623
624#define _CPU_Context_Restart_self( _the_context ) \
625   _CPU_Context_restore( (_the_context) )
626
627#define _CPU_Context_Initialize_fp( _destination ) \
[af3847a]628  memset( *( _destination ), 0, CPU_CONTEXT_FP_SIZE );
[66a5000d]629
630/* end of Context handler macros */
631
632/* Fatal Error manager macros */
633
634/*
635 *  This routine copies _error into a known place -- typically a stack
636 *  location or a register, optionally disables interrupts, and
637 *  halts/stops the CPU.
638 *
639 */
640
[fdfbb0a8]641#include <inttypes.h>
642
[66a5000d]643#define _CPU_Fatal_halt(_source, _error ) \
[fdfbb0a8]644          printk("Fatal Error %d.%" PRIu32 " Halted\n",_source, _error); \
[66a5000d]645          asm("trap 3" :: "r" (_error)); \
646          for(;;)
647
648/* end of Fatal Error manager macros */
649
650#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
651
652#endif /* ASM */
653
654/**
655 * Size of a pointer.
656 *
657 * This must be an integer literal that can be used by the assembler.  This
658 * value will be used to calculate offsets of structure members.  These
659 * offsets will be used in assembler code.
660 */
661#define CPU_SIZEOF_POINTER 4
662#define CPU_EXCEPTION_FRAME_SIZE 260
663
[decff899]664#define CPU_MAXIMUM_PROCESSORS 32
665
[66a5000d]666#ifndef ASM
667
668typedef struct {
669  uint32_t r[62];
670  uint32_t status;
671  uint32_t config;
672  uint32_t iret;
673} CPU_Exception_frame;
674
675/**
676 * @brief Prints the exception frame via printk().
677 *
678 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
679 */
680void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
681
682
683/* end of Priority handler macros */
684
685/* functions */
686
687/*
688 *  _CPU_Initialize
689 *
690 *  This routine performs CPU dependent initialization.
691 *
692 */
693
694void _CPU_Initialize(
695  void
696);
697
698/*
699 *  _CPU_ISR_install_raw_handler
700 *
701 *  This routine installs a "raw" interrupt handler directly into the
702 *  processor's vector table.
703 *
704 */
705
706void _CPU_ISR_install_raw_handler(
707  uint32_t    vector,
708  proc_ptr    new_handler,
709  proc_ptr   *old_handler
710);
711
712/*
713 *  _CPU_ISR_install_vector
714 *
715 *  This routine installs an interrupt vector.
716 *
717 *  NO_CPU Specific Information:
718 *
719 *  XXX document implementation including references if appropriate
720 */
721
722void _CPU_ISR_install_vector(
723  uint32_t    vector,
724  proc_ptr   new_handler,
725  proc_ptr   *old_handler
726);
727
728/*
729 *  _CPU_Install_interrupt_stack
730 *
731 *  This routine installs the hardware interrupt stack pointer.
732 *
733 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
734 *         is TRUE.
735 *
736 */
737
738void _CPU_Install_interrupt_stack( void );
739
740/*
741 *  _CPU_Thread_Idle_body
742 *
743 *  This routine is the CPU dependent IDLE thread body.
744 *
745 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
746 *         is TRUE.
747 *
748 */
749
750void *_CPU_Thread_Idle_body( uintptr_t ignored );
751
752/*
753 *  _CPU_Context_switch
754 *
755 *  This routine switches from the run context to the heir context.
756 *
757 *  epiphany Specific Information:
758 *
759 *  Please see the comments in the .c file for a description of how
760 *  this function works. There are several things to be aware of.
761 */
762
763void _CPU_Context_switch(
764  Context_Control  *run,
765  Context_Control  *heir
766);
767
768/*
769 *  _CPU_Context_restore
770 *
771 *  This routine is generally used only to restart self in an
772 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
773 *
774 *  NOTE: May be unnecessary to reload some registers.
775 *
776 */
777
778void _CPU_Context_restore(
779  Context_Control *new_context
[143696a]780) RTEMS_NO_RETURN;
[66a5000d]781
782/*
783 *  _CPU_Context_save_fp
784 *
785 *  This routine saves the floating point context passed to it.
786 *
787 */
788
789void _CPU_Context_save_fp(
790  void **fp_context_ptr
791);
792
793/*
794 *  _CPU_Context_restore_fp
795 *
796 *  This routine restores the floating point context passed to it.
797 *
798 */
799
800void _CPU_Context_restore_fp(
801  void **fp_context_ptr
802);
803
804/*  The following routine swaps the endian format of an unsigned int.
805 *  It must be static because it is referenced indirectly.
806 *
807 *  This version will work on any processor, but if there is a better
808 *  way for your CPU PLEASE use it.  The most common way to do this is to:
809 *
810 *     swap least significant two bytes with 16-bit rotate
811 *     swap upper and lower 16-bits
812 *     swap most significant two bytes with 16-bit rotate
813 *
814 *  Some CPUs have special instructions which swap a 32-bit quantity in
815 *  a single instruction (e.g. i486).  It is probably best to avoid
816 *  an "endian swapping control bit" in the CPU.  One good reason is
817 *  that interrupts would probably have to be disabled to insure that
818 *  an interrupt does not try to access the same "chunk" with the wrong
819 *  endian.  Another good reason is that on some CPUs, the endian bit
820 *  endianness for ALL fetches -- both code and data -- so the code
821 *  will be fetched incorrectly.
822 *
823 */
824
825static inline unsigned int CPU_swap_u32(
826  unsigned int value
827)
828{
829  uint32_t   byte1, byte2, byte3, byte4, swapped;
830
831  byte4 = (value >> 24) & 0xff;
832  byte3 = (value >> 16) & 0xff;
833  byte2 = (value >> 8)  & 0xff;
834  byte1 =  value        & 0xff;
835
836  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
837  return( swapped );
838}
839
840#define CPU_swap_u16( value ) \
841  (((value&0xff) << 8) | ((value >> 8)&0xff))
842
843static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
844{
845  /* TODO */
846}
847
848static inline void _CPU_Context_validate( uintptr_t pattern )
849{
850  while (1) {
851    /* TODO */
852  }
853}
854
855typedef uint32_t CPU_Counter_ticks;
856
857CPU_Counter_ticks _CPU_Counter_read( void );
858
859static inline CPU_Counter_ticks _CPU_Counter_difference(
860  CPU_Counter_ticks second,
861  CPU_Counter_ticks first
862)
863{
864  return second - first;
865}
866
867#endif /* ASM */
868
869#ifdef __cplusplus
870}
871#endif
872
873#endif
Note: See TracBrowser for help on using the repository browser.