source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ 98f051e

Last change on this file since 98f051e was 98f051e, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 27, 2018 at 6:08:10 AM

riscv: Remove RISCV_GCC_RED_ZONE_SIZE

The current ABI says that there is no stack red zone:

https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md

"Procedures must not rely upon the persistence of stack-allocated data
whose addresses lie below the stack pointer."

Update #3433.

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * Copyright (c) 2018 embedded brains GmbH
7 *
8 * Copyright (c) 2015 University of York.
9 * Hesham Almatary <hesham@alumni.york.ac.uk>
10 *
11 * COPYRIGHT (c) 1989-1999.
12 * On-Line Applications Research Corporation (OAR).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#ifndef _RISCV_CPU_H
37#define _RISCV_CPU_H
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#include <rtems/score/basedefs.h>
44#include <rtems/score/riscv.h>
45
46#define RISCV_MSTATUS_MIE 0x8
47
48#define CPU_INLINE_ENABLE_DISPATCH       FALSE
49#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
50#define CPU_ISR_PASSES_FRAME_POINTER 1
51#define CPU_HARDWARE_FP                  FALSE
52#define CPU_SOFTWARE_FP                  FALSE
53#define CPU_ALL_TASKS_ARE_FP             FALSE
54#define CPU_IDLE_TASK_IS_FP              FALSE
55#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
56#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
57#define CPU_STACK_GROWS_UP               FALSE
58
59#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
60#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
61#define CPU_BIG_ENDIAN                           FALSE
62#define CPU_LITTLE_ENDIAN                        TRUE
63#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
64
65/*
66 *  Processor defined structures required for cpukit/score.
67 */
68
69#ifndef ASM
70
71typedef struct {
72  /* riscv has 32 xlen-bit (where xlen can be 32 or 64) general purpose registers (x0-x31)*/
73  unsigned long x[32];
74
75  /* Special purpose registers */
76  unsigned long mstatus;
77  unsigned long mcause;
78  unsigned long mepc;
79  uint32_t isr_dispatch_disable;
80#ifdef RTEMS_SMP
81  volatile bool is_executing;
82#endif
83} Context_Control;
84
85#define _CPU_Context_Get_SP( _context ) \
86  (_context)->x[2]
87
88typedef struct {
89  /** TODO FPU registers are listed here */
90  double  some_float_register;
91} Context_Control_fp;
92
93typedef Context_Control CPU_Interrupt_frame;
94
95#define CPU_CONTEXT_FP_SIZE  0
96Context_Control_fp  _CPU_Null_fp_context;
97
98#define CPU_CACHE_LINE_BYTES 64
99
100#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
101#if __riscv_xlen == 32
102#define CPU_STACK_MINIMUM_SIZE  4096
103#else
104#define CPU_STACK_MINIMUM_SIZE  4096 * 2
105#endif
106#define CPU_ALIGNMENT 8
107#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
108#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
109#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
110#define CPU_STACK_ALIGNMENT        8
111
112#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
113
114#define _CPU_Initialize_vectors()
115
116static inline uint32_t riscv_interrupt_disable( void )
117{
118  unsigned long mstatus;
119
120  __asm__ volatile (
121    "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) :
122      "=&r" ( mstatus )
123  );
124
125  return mstatus & RISCV_MSTATUS_MIE;
126}
127
128static inline void riscv_interrupt_enable( uint32_t level )
129{
130  __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) );
131}
132
133#define _CPU_ISR_Disable( _level ) \
134    _level = riscv_interrupt_disable()
135
136#define _CPU_ISR_Enable( _level )  \
137  riscv_interrupt_enable( _level )
138
139#define _CPU_ISR_Flash( _level ) \
140  do{ \
141      _CPU_ISR_Enable( _level ); \
142      riscv_interrupt_disable(); \
143    } while(0)
144
145RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
146{
147  return ( level & RISCV_MSTATUS_MIE ) != 0;
148}
149
150RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
151{
152  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
153    __asm__ volatile (
154      "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
155    );
156  } else {
157    __asm__ volatile (
158      "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
159    );
160  }
161}
162
163uint32_t _CPU_ISR_Get_level( void );
164
165/* end of ISR handler macros */
166
167void _CPU_Context_Initialize(
168  Context_Control *context,
169  void *stack_area_begin,
170  size_t stack_area_size,
171  unsigned long new_level,
172  void (*entry_point)( void ),
173  bool is_fp,
174  void *tls_area
175);
176
177#define _CPU_Context_Restart_self( _the_context ) \
178   _CPU_Context_restore( (_the_context) )
179
180
181#define _CPU_Context_Fp_start( _base, _offset ) \
182   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
183
184#define _CPU_Context_Initialize_fp( _destination ) \
185  { \
186   *(*(_destination)) = _CPU_Null_fp_context; \
187  }
188
189extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
190
191/* end of Fatal Error manager macros */
192
193#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
194#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
195
196#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
197
198#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
199  { \
200    (_output) = 0;   /* do something to prevent warnings */ \
201  }
202#endif
203
204/* end of Bitfield handler macros */
205
206/*
207 *  This routine builds the mask which corresponds to the bit fields
208 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
209 *  for that routine.
210 *
211 */
212
213#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
214
215#define _CPU_Priority_Mask( _bit_number ) \
216    (1 << _bit_number)
217
218#endif
219
220#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
221
222#define _CPU_Priority_bits_index( _priority ) \
223  (_priority)
224
225#endif
226
227#define CPU_MAXIMUM_PROCESSORS 32
228
229#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
230#define CPU_TIMESTAMP_USE_INT64 TRUE
231#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
232
233typedef struct {
234  /* There is no CPU specific per-CPU state */
235} CPU_Per_CPU_control;
236#endif /* ASM */
237
238#if __riscv_xlen == 32
239#define CPU_SIZEOF_POINTER 4
240
241/* 32-bit load/store instructions */
242#define LREG lw
243#define SREG sw
244
245#define CPU_EXCEPTION_FRAME_SIZE 128
246#else /* xlen = 64 */
247#define CPU_SIZEOF_POINTER 8
248
249/* 64-bit load/store instructions */
250#define LREG ld
251#define SREG sd
252
253#define CPU_EXCEPTION_FRAME_SIZE 256
254#endif
255
256#define CPU_PER_CPU_CONTROL_SIZE 0
257
258#ifndef ASM
259typedef uint16_t Priority_bit_map_Word;
260
261typedef struct {
262  unsigned long x[32];;
263} CPU_Exception_frame;
264
265/**
266 * @brief Prints the exception frame via printk().
267 *
268 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
269 */
270void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
271
272
273/* end of Priority handler macros */
274
275/* functions */
276
277/*
278 *  _CPU_Initialize
279 *
280 *  This routine performs CPU dependent initialization.
281 *
282 */
283
284void _CPU_Initialize(
285  void
286);
287
288/*
289 *  _CPU_ISR_install_raw_handler
290 *
291 *  This routine installs a "raw" interrupt handler directly into the
292 *  processor's vector table.
293 *
294 */
295
296void _CPU_ISR_install_raw_handler(
297  uint32_t    vector,
298  proc_ptr    new_handler,
299  proc_ptr   *old_handler
300);
301
302/*
303 *  _CPU_ISR_install_vector
304 *
305 *  This routine installs an interrupt vector.
306 *
307 *  NO_CPU Specific Information:
308 *
309 *  XXX document implementation including references if appropriate
310 */
311
312void _CPU_ISR_install_vector(
313  unsigned long    vector,
314  proc_ptr   new_handler,
315  proc_ptr   *old_handler
316);
317
318/*
319 *  _CPU_Thread_Idle_body
320 *
321 *  This routine is the CPU dependent IDLE thread body.
322 *
323 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
324 *         is TRUE.
325 *
326 */
327
328void *_CPU_Thread_Idle_body( uintptr_t ignored );
329
330/*
331 *  _CPU_Context_switch
332 *
333 *  This routine switches from the run context to the heir context.
334 *
335 *  RISCV Specific Information:
336 *
337 *  Please see the comments in the .c file for a description of how
338 *  this function works. There are several things to be aware of.
339 */
340
341void _CPU_Context_switch(
342  Context_Control  *run,
343  Context_Control  *heir
344);
345
346/*
347 *  _CPU_Context_restore
348 *
349 *  This routine is generally used only to restart self in an
350 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
351 *
352 *  NOTE: May be unnecessary to reload some registers.
353 *
354 */
355
356void _CPU_Context_restore(
357  Context_Control *new_context
358) RTEMS_NO_RETURN;
359
360/*
361 *  _CPU_Context_save_fp
362 *
363 *  This routine saves the floating point context passed to it.
364 *
365 */
366
367void _CPU_Context_save_fp(
368  void **fp_context_ptr
369);
370
371/*
372 *  _CPU_Context_restore_fp
373 *
374 *  This routine restores the floating point context passed to it.
375 *
376 */
377
378void _CPU_Context_restore_fp(
379  void **fp_context_ptr
380);
381
382/*  The following routine swaps the endian format of an unsigned int.
383 *  It must be static because it is referenced indirectly.
384 *
385 *  This version will work on any processor, but if there is a better
386 *  way for your CPU PLEASE use it.  The most common way to do this is to:
387 *
388 *     swap least significant two bytes with 16-bit rotate
389 *     swap upper and lower 16-bits
390 *     swap most significant two bytes with 16-bit rotate
391 *
392 *  Some CPUs have special instructions which swap a 32-bit quantity in
393 *  a single instruction (e.g. i486).  It is probably best to avoid
394 *  an "endian swapping control bit" in the CPU.  One good reason is
395 *  that interrupts would probably have to be disabled to insure that
396 *  an interrupt does not try to access the same "chunk" with the wrong
397 *  endian.  Another good reason is that on some CPUs, the endian bit
398 *  endianness for ALL fetches -- both code and data -- so the code
399 *  will be fetched incorrectly.
400 *
401 */
402
403static inline uint32_t CPU_swap_u32(
404  uint32_t value
405)
406{
407  uint32_t   byte1, byte2, byte3, byte4, swapped;
408
409  byte4 = (value >> 24) & 0xff;
410  byte3 = (value >> 16) & 0xff;
411  byte2 = (value >> 8)  & 0xff;
412  byte1 =  value        & 0xff;
413
414  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
415  return ( swapped );
416}
417
418#define CPU_swap_u16( value ) \
419  (((value&0xff) << 8) | ((value >> 8)&0xff))
420
421static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
422{
423  /* TODO */
424}
425
426static inline void _CPU_Context_validate( uintptr_t pattern )
427{
428  while (1) {
429    /* TODO */
430  }
431}
432
433typedef uint32_t CPU_Counter_ticks;
434
435uint32_t _CPU_Counter_frequency( void );
436
437CPU_Counter_ticks _CPU_Counter_read( void );
438
439#ifdef RTEMS_SMP
440
441uint32_t _CPU_SMP_Initialize( void );
442
443bool _CPU_SMP_Start_processor( uint32_t cpu_index );
444
445void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
446
447void _CPU_SMP_Prepare_start_multitasking( void );
448
449static inline uint32_t _CPU_SMP_Get_current_processor( void )
450{
451  unsigned long mhartid;
452
453  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
454
455  return (uint32_t) mhartid;
456}
457
458void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
459
460static inline void _CPU_SMP_Processor_event_broadcast( void )
461{
462  __asm__ volatile ( "" : : : "memory" );
463}
464
465static inline void _CPU_SMP_Processor_event_receive( void )
466{
467  __asm__ volatile ( "" : : : "memory" );
468}
469
470static inline bool _CPU_Context_Get_is_executing(
471  const Context_Control *context
472)
473{
474  return context->is_executing;
475}
476
477static inline void _CPU_Context_Set_is_executing(
478  Context_Control *context,
479  bool is_executing
480)
481{
482  context->is_executing = is_executing;
483}
484
485#endif /* RTEMS_SMP */
486
487/** Type that can store a 32-bit integer or a pointer. */
488typedef uintptr_t CPU_Uint32ptr;
489
490#endif /* ASM */
491
492#ifdef __cplusplus
493}
494#endif
495
496#endif
Note: See TracBrowser for help on using the repository browser.