source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ a8188730

5
Last change on this file since a8188730 was a8188730, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 27, 2018 at 7:43:39 AM

riscv: Fix _CPU_Context_Initialize() prototype

Update #3433.

  • Property mode set to 100644
File size: 11.3 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * Copyright (c) 2018 embedded brains GmbH
7 *
8 * Copyright (c) 2015 University of York.
9 * Hesham Almatary <hesham@alumni.york.ac.uk>
10 *
11 * COPYRIGHT (c) 1989-1999.
12 * On-Line Applications Research Corporation (OAR).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#ifndef _RISCV_CPU_H
37#define _RISCV_CPU_H
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#include <rtems/score/basedefs.h>
44#include <rtems/score/riscv.h>
45
46#define RISCV_MSTATUS_MIE 0x8
47
48#define CPU_INLINE_ENABLE_DISPATCH       FALSE
49#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
50
51#define CPU_ISR_PASSES_FRAME_POINTER FALSE
52
53#define CPU_HARDWARE_FP                  FALSE
54#define CPU_SOFTWARE_FP                  FALSE
55#define CPU_ALL_TASKS_ARE_FP             FALSE
56#define CPU_IDLE_TASK_IS_FP              FALSE
57#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
58#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
59#define CPU_STACK_GROWS_UP               FALSE
60
61#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
62#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
63#define CPU_BIG_ENDIAN                           FALSE
64#define CPU_LITTLE_ENDIAN                        TRUE
65#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
66
67#define CPU_CONTEXT_FP_SIZE  0
68
69#define CPU_PER_CPU_CONTROL_SIZE 0
70
71#define CPU_CACHE_LINE_BYTES 64
72
73#if __riscv_xlen == 32
74
75#define CPU_SIZEOF_POINTER 4
76
77#define CPU_STACK_MINIMUM_SIZE 4096
78
79#define CPU_EXCEPTION_FRAME_SIZE 128
80
81#elif __riscv_xlen == 64
82
83#define CPU_SIZEOF_POINTER 8
84
85#define CPU_STACK_MINIMUM_SIZE 8192
86
87#define CPU_EXCEPTION_FRAME_SIZE 256
88
89#endif /* __riscv_xlen */
90
91#define CPU_ALIGNMENT 8
92
93#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
94
95#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
96
97#define CPU_STACK_ALIGNMENT 16
98
99#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
100
101/*
102 *  Processor defined structures required for cpukit/score.
103 */
104
105#ifndef ASM
106
107typedef struct {
108  /* riscv has 32 xlen-bit (where xlen can be 32 or 64) general purpose registers (x0-x31)*/
109  unsigned long x[32];
110
111  uint32_t isr_dispatch_disable;
112#ifdef RTEMS_SMP
113  volatile bool is_executing;
114#endif
115} Context_Control;
116
117#define _CPU_Context_Get_SP( _context ) \
118  (_context)->x[2]
119
120typedef struct {
121  /** TODO FPU registers are listed here */
122  double  some_float_register;
123} Context_Control_fp;
124
125Context_Control_fp  _CPU_Null_fp_context;
126
127#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
128
129#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
130
131#define _CPU_Initialize_vectors()
132
133static inline uint32_t riscv_interrupt_disable( void )
134{
135  unsigned long mstatus;
136
137  __asm__ volatile (
138    "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) :
139      "=&r" ( mstatus )
140  );
141
142  return mstatus & RISCV_MSTATUS_MIE;
143}
144
145static inline void riscv_interrupt_enable( uint32_t level )
146{
147  __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) );
148}
149
150#define _CPU_ISR_Disable( _level ) \
151    _level = riscv_interrupt_disable()
152
153#define _CPU_ISR_Enable( _level )  \
154  riscv_interrupt_enable( _level )
155
156#define _CPU_ISR_Flash( _level ) \
157  do{ \
158      _CPU_ISR_Enable( _level ); \
159      riscv_interrupt_disable(); \
160    } while(0)
161
162RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
163{
164  return ( level & RISCV_MSTATUS_MIE ) != 0;
165}
166
167RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
168{
169  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
170    __asm__ volatile (
171      "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
172    );
173  } else {
174    __asm__ volatile (
175      "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
176    );
177  }
178}
179
180uint32_t _CPU_ISR_Get_level( void );
181
182/* end of ISR handler macros */
183
184void _CPU_Context_Initialize(
185  Context_Control *context,
186  void            *stack_area_begin,
187  size_t           stack_area_size,
188  uint32_t         new_level,
189  void          ( *entry_point )( void ),
190  bool             is_fp,
191  void            *tls_area
192);
193
194#define _CPU_Context_Restart_self( _the_context ) \
195   _CPU_Context_restore( (_the_context) )
196
197
198#define _CPU_Context_Fp_start( _base, _offset ) \
199   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
200
201#define _CPU_Context_Initialize_fp( _destination ) \
202  { \
203   *(*(_destination)) = _CPU_Null_fp_context; \
204  }
205
206extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
207
208/* end of Fatal Error manager macros */
209
210#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
211#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
212
213#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
214
215#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
216  { \
217    (_output) = 0;   /* do something to prevent warnings */ \
218  }
219#endif
220
221/* end of Bitfield handler macros */
222
223/*
224 *  This routine builds the mask which corresponds to the bit fields
225 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
226 *  for that routine.
227 *
228 */
229
230#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
231
232#define _CPU_Priority_Mask( _bit_number ) \
233    (1 << _bit_number)
234
235#endif
236
237#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
238
239#define _CPU_Priority_bits_index( _priority ) \
240  (_priority)
241
242#endif
243
244#define CPU_MAXIMUM_PROCESSORS 32
245
246#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
247#define CPU_TIMESTAMP_USE_INT64 TRUE
248#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
249
250typedef struct {
251  /* There is no CPU specific per-CPU state */
252} CPU_Per_CPU_control;
253
254typedef uint16_t Priority_bit_map_Word;
255
256typedef struct {
257  unsigned long x[32];;
258} CPU_Exception_frame;
259
260/**
261 * @brief Prints the exception frame via printk().
262 *
263 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
264 */
265void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
266
267
268/* end of Priority handler macros */
269
270/* functions */
271
272/*
273 *  _CPU_Initialize
274 *
275 *  This routine performs CPU dependent initialization.
276 *
277 */
278
279void _CPU_Initialize(
280  void
281);
282
283/*
284 *  _CPU_ISR_install_raw_handler
285 *
286 *  This routine installs a "raw" interrupt handler directly into the
287 *  processor's vector table.
288 *
289 */
290
291void _CPU_ISR_install_raw_handler(
292  uint32_t    vector,
293  proc_ptr    new_handler,
294  proc_ptr   *old_handler
295);
296
297/*
298 *  _CPU_ISR_install_vector
299 *
300 *  This routine installs an interrupt vector.
301 *
302 *  NO_CPU Specific Information:
303 *
304 *  XXX document implementation including references if appropriate
305 */
306
307void _CPU_ISR_install_vector(
308  unsigned long    vector,
309  proc_ptr   new_handler,
310  proc_ptr   *old_handler
311);
312
313/*
314 *  _CPU_Thread_Idle_body
315 *
316 *  This routine is the CPU dependent IDLE thread body.
317 *
318 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
319 *         is TRUE.
320 *
321 */
322
323void *_CPU_Thread_Idle_body( uintptr_t ignored );
324
325/*
326 *  _CPU_Context_switch
327 *
328 *  This routine switches from the run context to the heir context.
329 *
330 *  RISCV Specific Information:
331 *
332 *  Please see the comments in the .c file for a description of how
333 *  this function works. There are several things to be aware of.
334 */
335
336void _CPU_Context_switch(
337  Context_Control  *run,
338  Context_Control  *heir
339);
340
341/*
342 *  _CPU_Context_restore
343 *
344 *  This routine is generally used only to restart self in an
345 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
346 *
347 *  NOTE: May be unnecessary to reload some registers.
348 *
349 */
350
351void _CPU_Context_restore(
352  Context_Control *new_context
353) RTEMS_NO_RETURN;
354
355/*
356 *  _CPU_Context_save_fp
357 *
358 *  This routine saves the floating point context passed to it.
359 *
360 */
361
362void _CPU_Context_save_fp(
363  void **fp_context_ptr
364);
365
366/*
367 *  _CPU_Context_restore_fp
368 *
369 *  This routine restores the floating point context passed to it.
370 *
371 */
372
373void _CPU_Context_restore_fp(
374  void **fp_context_ptr
375);
376
377/*  The following routine swaps the endian format of an unsigned int.
378 *  It must be static because it is referenced indirectly.
379 *
380 *  This version will work on any processor, but if there is a better
381 *  way for your CPU PLEASE use it.  The most common way to do this is to:
382 *
383 *     swap least significant two bytes with 16-bit rotate
384 *     swap upper and lower 16-bits
385 *     swap most significant two bytes with 16-bit rotate
386 *
387 *  Some CPUs have special instructions which swap a 32-bit quantity in
388 *  a single instruction (e.g. i486).  It is probably best to avoid
389 *  an "endian swapping control bit" in the CPU.  One good reason is
390 *  that interrupts would probably have to be disabled to insure that
391 *  an interrupt does not try to access the same "chunk" with the wrong
392 *  endian.  Another good reason is that on some CPUs, the endian bit
393 *  endianness for ALL fetches -- both code and data -- so the code
394 *  will be fetched incorrectly.
395 *
396 */
397
398static inline uint32_t CPU_swap_u32(
399  uint32_t value
400)
401{
402  uint32_t   byte1, byte2, byte3, byte4, swapped;
403
404  byte4 = (value >> 24) & 0xff;
405  byte3 = (value >> 16) & 0xff;
406  byte2 = (value >> 8)  & 0xff;
407  byte1 =  value        & 0xff;
408
409  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
410  return ( swapped );
411}
412
413#define CPU_swap_u16( value ) \
414  (((value&0xff) << 8) | ((value >> 8)&0xff))
415
416void _CPU_Context_volatile_clobber( uintptr_t pattern );
417
418void _CPU_Context_validate( uintptr_t pattern );
419
420typedef uint32_t CPU_Counter_ticks;
421
422uint32_t _CPU_Counter_frequency( void );
423
424CPU_Counter_ticks _CPU_Counter_read( void );
425
426#ifdef RTEMS_SMP
427
428uint32_t _CPU_SMP_Initialize( void );
429
430bool _CPU_SMP_Start_processor( uint32_t cpu_index );
431
432void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
433
434void _CPU_SMP_Prepare_start_multitasking( void );
435
436static inline uint32_t _CPU_SMP_Get_current_processor( void )
437{
438  unsigned long mhartid;
439
440  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
441
442  return (uint32_t) mhartid;
443}
444
445void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
446
447static inline void _CPU_SMP_Processor_event_broadcast( void )
448{
449  __asm__ volatile ( "" : : : "memory" );
450}
451
452static inline void _CPU_SMP_Processor_event_receive( void )
453{
454  __asm__ volatile ( "" : : : "memory" );
455}
456
457static inline bool _CPU_Context_Get_is_executing(
458  const Context_Control *context
459)
460{
461  return context->is_executing;
462}
463
464static inline void _CPU_Context_Set_is_executing(
465  Context_Control *context,
466  bool is_executing
467)
468{
469  context->is_executing = is_executing;
470}
471
472#endif /* RTEMS_SMP */
473
474/** Type that can store a 32-bit integer or a pointer. */
475typedef uintptr_t CPU_Uint32ptr;
476
477#endif /* ASM */
478
479#ifdef __cplusplus
480}
481#endif
482
483#endif
Note: See TracBrowser for help on using the repository browser.