source: rtems/cpukit/score/cpu/epiphany/include/rtems/score/cpu.h @ bf39a9e

5
Last change on this file since bf39a9e was bf39a9e, checked in by Sebastian Huber <sebastian.huber@…>, on 12/06/19 at 19:24:37

score: Remove superfluous FP types/defines

Update #3835.

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/**
2 * @file
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham ALMatary <hmka501@york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _EPIPHANY_CPU_H
36#define _EPIPHANY_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/epiphany.h> /* pick up machine definitions */
44#ifndef ASM
45#include <rtems/bspIo.h>
46#include <stdint.h>
47#include <stdio.h> /* for printk */
48#endif
49   
50/**
51 * @addtogroup RTEMSScoreCPUEpiphany
52 */
53/**@{**/
54
55/* conditional compilation parameters */
56
57/*
58 *  Does the RTEMS invoke the user's ISR with the vector number and
59 *  a pointer to the saved interrupt frame (1) or just the vector
60 *  number (0)?
61 *
62 */
63
64#define CPU_ISR_PASSES_FRAME_POINTER TRUE
65
66#define CPU_HARDWARE_FP FALSE
67
68#define CPU_SOFTWARE_FP FALSE
69
70#define CPU_ALL_TASKS_ARE_FP FALSE
71
72#define CPU_IDLE_TASK_IS_FP FALSE
73
74#define CPU_USE_DEFERRED_FP_SWITCH FALSE
75
76#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
77
78/*
79 *  Does the stack grow up (toward higher addresses) or down
80 *  (toward lower addresses)?
81 *
82 *  If TRUE, then the grows upward.
83 *  If FALSE, then the grows toward smaller addresses.
84 *
85 */
86
87#define CPU_STACK_GROWS_UP               FALSE
88
89/* FIXME: Is this the right value? */
90#define CPU_CACHE_LINE_BYTES 64
91
92#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
93
94/*
95 *  The following defines the number of bits actually used in the
96 *  interrupt field of the task mode.  How those bits map to the
97 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
98 *
99 */
100
101#define CPU_MODES_INTERRUPT_MASK   0x00000001
102
103/*
104 *  Processor defined structures required for cpukit/score.
105 */
106
107/*
108 * Contexts
109 *
110 *  Generally there are 2 types of context to save.
111 *     1. Interrupt registers to save
112 *     2. Task level registers to save
113 *
114 *  This means we have the following 3 context items:
115 *     1. task level context stuff::  Context_Control
116 *     2. floating point task stuff:: Context_Control_fp
117 *     3. special interrupt level context :: Context_Control_interrupt
118 *
119 *  On some processors, it is cost-effective to save only the callee
120 *  preserved registers during a task context switch.  This means
121 *  that the ISR code needs to save those registers which do not
122 *  persist across function calls.  It is not mandatory to make this
123 *  distinctions between the caller/callee saves registers for the
124 *  purpose of minimizing context saved during task switch and on interrupts.
125 *  If the cost of saving extra registers is minimal, simplicity is the
126 *  choice.  Save the same context on interrupt entry as for tasks in
127 *  this case.
128 *
129 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
130 *  care should be used in designing the context area.
131 *
132 *  On some CPUs with hardware floating point support, the Context_Control_fp
133 *  structure will not be used or it simply consist of an array of a
134 *  fixed number of bytes.   This is done when the floating point context
135 *  is dumped by a "FP save context" type instruction and the format
136 *  is not really defined by the CPU.  In this case, there is no need
137 *  to figure out the exact format -- only the size.  Of course, although
138 *  this is enough information for RTEMS, it is probably not enough for
139 *  a debugger such as gdb.  But that is another problem.
140 *
141 *
142 */
143#ifndef ASM
144
145typedef struct {
146  uint32_t  r[64];
147
148  uint32_t status;
149  uint32_t config;
150  uint32_t iret;
151
152#ifdef RTEMS_SMP
153    /**
154     * @brief On SMP configurations the thread context must contain a boolean
155     * indicator to signal if this context is executing on a processor.
156     *
157     * This field must be updated during a context switch.  The context switch
158     * to the heir must wait until the heir context indicates that it is no
159     * longer executing on a processor.  The context switch must also check if
160     * a thread dispatch is necessary to honor updates of the heir thread for
161     * this processor.  This indicator must be updated using an atomic test and
162     * set operation to ensure that at most one processor uses the heir
163     * context at the same time.
164     *
165     * @code
166     * void _CPU_Context_switch(
167     *   Context_Control *executing,
168     *   Context_Control *heir
169     * )
170     * {
171     *   save( executing );
172     *
173     *   executing->is_executing = false;
174     *   memory_barrier();
175     *
176     *   if ( test_and_set( &heir->is_executing ) ) {
177     *     do {
178     *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
179     *
180     *       if ( cpu_self->dispatch_necessary ) {
181     *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
182     *       }
183     *     } while ( test_and_set( &heir->is_executing ) );
184     *   }
185     *
186     *   restore( heir );
187     * }
188     * @endcode
189     */
190    volatile bool is_executing;
191#endif
192} Context_Control;
193
194#define _CPU_Context_Get_SP( _context ) \
195  (_context)->r[13]
196
197typedef Context_Control CPU_Interrupt_frame;
198
199/*
200 *  Amount of extra stack (above minimum stack size) required by
201 *  MPCI receive server thread.  Remember that in a multiprocessor
202 *  system this thread must exist and be able to process all directives.
203 *
204 */
205
206#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
207
208/*
209 *  Should be large enough to run all RTEMS tests.  This insures
210 *  that a "reasonable" small application should not have any problems.
211 *
212 */
213
214#define CPU_STACK_MINIMUM_SIZE  4096
215
216/*
217 *  CPU's worst alignment requirement for data types on a byte boundary.  This
218 *  alignment does not take into account the requirements for the stack.
219 *
220 */
221
222#define CPU_ALIGNMENT 8
223
224/*
225 *  This is defined if the port has a special way to report the ISR nesting
226 *  level.  Most ports maintain the variable _ISR_Nest_level.
227 */
228#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
229
230/*
231 *  This number corresponds to the byte alignment requirement for the
232 *  heap handler.  This alignment requirement may be stricter than that
233 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
234 *  common for the heap to follow the same alignment requirement as
235 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
236 *  then this should be set to CPU_ALIGNMENT.
237 *
238 *  NOTE:  This does not have to be a power of 2 although it should be
239 *         a multiple of 2 greater than or equal to 2.  The requirement
240 *         to be a multiple of 2 is because the heap uses the least
241 *         significant field of the front and back flags to indicate
242 *         that a block is in use or free.  So you do not want any odd
243 *         length blocks really putting length data in that bit.
244 *
245 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
246 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
247 *         elements allocated from the heap meet all restrictions.
248 *
249 */
250
251#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
252
253/*
254 *  This number corresponds to the byte alignment requirement for the
255 *  stack.  This alignment requirement may be stricter than that for the
256 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
257 *  is strict enough for the stack, then this should be set to 0.
258 *
259 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
260 *
261 */
262
263#define CPU_STACK_ALIGNMENT        8
264
265#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
266
267/* ISR handler macros */
268
269/*
270 *  Support routine to initialize the RTEMS vector table after it is allocated.
271 *
272 *  NO_CPU Specific Information:
273 *
274 *  XXX document implementation including references if appropriate
275 */
276
277#define _CPU_Initialize_vectors()
278
279/*
280 *  Disable all interrupts for an RTEMS critical section.  The previous
281 *  level is returned in _level.
282 *
283 */
284
285static inline uint32_t epiphany_interrupt_disable( void )
286{
287  uint32_t sr;
288  __asm__ __volatile__ ("movfs %[sr], status \n" : [sr] "=r" (sr):);
289  __asm__ __volatile__("gid \n");
290  return sr;
291}
292
293static inline void epiphany_interrupt_enable(uint32_t level)
294{
295  __asm__ __volatile__("gie \n");
296  __asm__ __volatile__ ("movts status, %[level] \n" :: [level] "r" (level):);
297}
298
299#define _CPU_ISR_Disable( _level ) \
300    _level = epiphany_interrupt_disable()
301
302/*
303 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
304 *  This indicates the end of an RTEMS critical section.  The parameter
305 *  _level is not modified.
306 *
307 */
308
309#define _CPU_ISR_Enable( _level )  \
310  epiphany_interrupt_enable( _level )
311
312/*
313 *  This temporarily restores the interrupt to _level before immediately
314 *  disabling them again.  This is used to divide long RTEMS critical
315 *  sections into two or more parts.  The parameter _level is not
316 *  modified.
317 *
318 */
319
320#define _CPU_ISR_Flash( _level ) \
321  do{ \
322      if ( (_level & 0x2) != 0 ) \
323        _CPU_ISR_Enable( _level ); \
324      epiphany_interrupt_disable(); \
325    } while(0)
326
327RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
328{
329  return ( level & 0x2 ) != 0;
330}
331
332/*
333 *  Map interrupt level in task mode onto the hardware that the CPU
334 *  actually provides.  Currently, interrupt levels which do not
335 *  map onto the CPU in a generic fashion are undefined.  Someday,
336 *  it would be nice if these were "mapped" by the application
337 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
338 *  8 - 255 would be available for bsp/application specific meaning.
339 *  This could be used to manage a programmable interrupt controller
340 *  via the rtems_task_mode directive.
341 *
342 *  The get routine usually must be implemented as a subroutine.
343 *
344 */
345
346void _CPU_ISR_Set_level( uint32_t level );
347
348uint32_t _CPU_ISR_Get_level( void );
349
350/* end of ISR handler macros */
351
352/* Context handler macros */
353
354/*
355 *  Initialize the context to a state suitable for starting a
356 *  task after a context restore operation.  Generally, this
357 *  involves:
358 *
359 *     - setting a starting address
360 *     - preparing the stack
361 *     - preparing the stack and frame pointers
362 *     - setting the proper interrupt level in the context
363 *     - initializing the floating point context
364 *
365 *  This routine generally does not set any unnecessary register
366 *  in the context.  The state of the "general data" registers is
367 *  undefined at task start time.
368 *
369 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
370 *        point thread.  This is typically only used on CPUs where the
371 *        FPU may be easily disabled by software such as on the SPARC
372 *        where the PSR contains an enable FPU bit.
373 *
374 */
375
376/**
377 * @brief Account for GCC red-zone
378 *
379 * The following macro is used when initializing task's stack
380 * to account for GCC red-zone.
381 */
382
383#define EPIPHANY_GCC_RED_ZONE_SIZE 128
384
385/**
386 * @brief Initializes the CPU context.
387 *
388 * The following steps are performed:
389 *  - setting a starting address
390 *  - preparing the stack
391 *  - preparing the stack and frame pointers
392 *  - setting the proper interrupt level in the context
393 *
394 * @param[in] context points to the context area
395 * @param[in] stack_area_begin is the low address of the allocated stack area
396 * @param[in] stack_area_size is the size of the stack area in bytes
397 * @param[in] new_level is the interrupt level for the task
398 * @param[in] entry_point is the task's entry point
399 * @param[in] is_fp is set to @c true if the task is a floating point task
400 * @param[in] tls_area is the thread-local storage (TLS) area
401 */
402void _CPU_Context_Initialize(
403  Context_Control *context,
404  void *stack_area_begin,
405  size_t stack_area_size,
406  uint32_t new_level,
407  void (*entry_point)( void ),
408  bool is_fp,
409  void *tls_area
410);
411
412/*
413 *  This routine is responsible for somehow restarting the currently
414 *  executing task.  If you are lucky, then all that is necessary
415 *  is restoring the context.  Otherwise, there will need to be
416 *  a special assembly routine which does something special in this
417 *  case.  Context_Restore should work most of the time.  It will
418 *  not work if restarting self conflicts with the stack frame
419 *  assumptions of restoring a context.
420 *
421 */
422
423#define _CPU_Context_Restart_self( _the_context ) \
424   _CPU_Context_restore( (_the_context) )
425
426/* end of Context handler macros */
427
428/* Fatal Error manager macros */
429
430/*
431 *  This routine copies _error into a known place -- typically a stack
432 *  location or a register, optionally disables interrupts, and
433 *  halts/stops the CPU.
434 *
435 */
436
437#include <inttypes.h>
438
439#define _CPU_Fatal_halt(_source, _error ) \
440          printk("Fatal Error %d.%" PRIu32 " Halted\n",_source, _error); \
441          asm("trap 3" :: "r" (_error)); \
442          for(;;)
443
444/* end of Fatal Error manager macros */
445
446#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
447
448#endif /* ASM */
449
450/**
451 * Size of a pointer.
452 *
453 * This must be an integer literal that can be used by the assembler.  This
454 * value will be used to calculate offsets of structure members.  These
455 * offsets will be used in assembler code.
456 */
457#define CPU_SIZEOF_POINTER 4
458#define CPU_EXCEPTION_FRAME_SIZE 260
459
460#define CPU_MAXIMUM_PROCESSORS 32
461
462#ifndef ASM
463
464typedef struct {
465  uint32_t r[62];
466  uint32_t status;
467  uint32_t config;
468  uint32_t iret;
469} CPU_Exception_frame;
470
471/**
472 * @brief Prints the exception frame via printk().
473 *
474 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
475 */
476void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
477
478
479/* end of Priority handler macros */
480
481/* functions */
482
483/*
484 *  _CPU_Initialize
485 *
486 *  This routine performs CPU dependent initialization.
487 *
488 */
489
490void _CPU_Initialize(
491  void
492);
493
494void *_CPU_Thread_Idle_body( uintptr_t ignored );
495
496/*
497 *  _CPU_Context_switch
498 *
499 *  This routine switches from the run context to the heir context.
500 *
501 *  epiphany Specific Information:
502 *
503 *  Please see the comments in the .c file for a description of how
504 *  this function works. There are several things to be aware of.
505 */
506
507void _CPU_Context_switch(
508  Context_Control  *run,
509  Context_Control  *heir
510);
511
512/*
513 *  _CPU_Context_restore
514 *
515 *  This routine is generally used only to restart self in an
516 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
517 *
518 *  NOTE: May be unnecessary to reload some registers.
519 *
520 */
521
522void _CPU_Context_restore(
523  Context_Control *new_context
524) RTEMS_NO_RETURN;
525
526/*
527 *  _CPU_Context_save_fp
528 *
529 *  This routine saves the floating point context passed to it.
530 *
531 */
532
533void _CPU_Context_save_fp(
534  void **fp_context_ptr
535);
536
537/*
538 *  _CPU_Context_restore_fp
539 *
540 *  This routine restores the floating point context passed to it.
541 *
542 */
543
544void _CPU_Context_restore_fp(
545  void **fp_context_ptr
546);
547
548/*  The following routine swaps the endian format of an unsigned int.
549 *  It must be static because it is referenced indirectly.
550 *
551 *  This version will work on any processor, but if there is a better
552 *  way for your CPU PLEASE use it.  The most common way to do this is to:
553 *
554 *     swap least significant two bytes with 16-bit rotate
555 *     swap upper and lower 16-bits
556 *     swap most significant two bytes with 16-bit rotate
557 *
558 *  Some CPUs have special instructions which swap a 32-bit quantity in
559 *  a single instruction (e.g. i486).  It is probably best to avoid
560 *  an "endian swapping control bit" in the CPU.  One good reason is
561 *  that interrupts would probably have to be disabled to insure that
562 *  an interrupt does not try to access the same "chunk" with the wrong
563 *  endian.  Another good reason is that on some CPUs, the endian bit
564 *  endianness for ALL fetches -- both code and data -- so the code
565 *  will be fetched incorrectly.
566 *
567 */
568
569static inline unsigned int CPU_swap_u32(
570  unsigned int value
571)
572{
573  uint32_t   byte1, byte2, byte3, byte4, swapped;
574
575  byte4 = (value >> 24) & 0xff;
576  byte3 = (value >> 16) & 0xff;
577  byte2 = (value >> 8)  & 0xff;
578  byte1 =  value        & 0xff;
579
580  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
581  return( swapped );
582}
583
584#define CPU_swap_u16( value ) \
585  (((value&0xff) << 8) | ((value >> 8)&0xff))
586
587typedef uint32_t CPU_Counter_ticks;
588
589uint32_t _CPU_Counter_frequency( void );
590
591CPU_Counter_ticks _CPU_Counter_read( void );
592
593static inline CPU_Counter_ticks _CPU_Counter_difference(
594  CPU_Counter_ticks second,
595  CPU_Counter_ticks first
596)
597{
598  return second - first;
599}
600
601/** Type that can store a 32-bit integer or a pointer. */
602typedef uintptr_t CPU_Uint32ptr;
603
604#endif /* ASM */
605
606#ifdef __cplusplus
607}
608#endif
609
610#endif
611
612/**@}*/
Note: See TracBrowser for help on using the repository browser.