source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ e43994d

5
Last change on this file since e43994d was e43994d, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 27, 2018 at 8:05:50 AM

riscv: Optimize context switch and interrupts

Save/restore non-volatile registers in _CPU_Context_switch().

Save/restore volatile registers in _ISR_Handler().

Update #3433.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * Copyright (c) 2018 embedded brains GmbH
7 *
8 * Copyright (c) 2015 University of York.
9 * Hesham Almatary <hesham@alumni.york.ac.uk>
10 *
11 * COPYRIGHT (c) 1989-1999.
12 * On-Line Applications Research Corporation (OAR).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#ifndef _RISCV_CPU_H
37#define _RISCV_CPU_H
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#include <rtems/score/basedefs.h>
44#include <rtems/score/riscv.h>
45
46#define RISCV_MSTATUS_MIE 0x8
47
48#define CPU_INLINE_ENABLE_DISPATCH       FALSE
49#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
50
51#define CPU_ISR_PASSES_FRAME_POINTER FALSE
52
53#define CPU_HARDWARE_FP                  FALSE
54#define CPU_SOFTWARE_FP                  FALSE
55#define CPU_ALL_TASKS_ARE_FP             FALSE
56#define CPU_IDLE_TASK_IS_FP              FALSE
57#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
58#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
59#define CPU_STACK_GROWS_UP               FALSE
60
61#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
62#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
63#define CPU_BIG_ENDIAN                           FALSE
64#define CPU_LITTLE_ENDIAN                        TRUE
65#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
66
67#define CPU_CONTEXT_FP_SIZE  0
68
69#define CPU_PER_CPU_CONTROL_SIZE 0
70
71#define CPU_CACHE_LINE_BYTES 64
72
73#if __riscv_xlen == 32
74
75#define CPU_SIZEOF_POINTER 4
76
77#define CPU_STACK_MINIMUM_SIZE 4096
78
79#define CPU_EXCEPTION_FRAME_SIZE 128
80
81#elif __riscv_xlen == 64
82
83#define CPU_SIZEOF_POINTER 8
84
85#define CPU_STACK_MINIMUM_SIZE 8192
86
87#define CPU_EXCEPTION_FRAME_SIZE 256
88
89#endif /* __riscv_xlen */
90
91#define CPU_ALIGNMENT 8
92
93#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
94
95#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
96
97#define CPU_STACK_ALIGNMENT 16
98
99#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
100
101/*
102 *  Processor defined structures required for cpukit/score.
103 */
104
105#ifndef ASM
106
107typedef struct {
108#ifdef RTEMS_SMP
109  volatile uint32_t is_executing;
110#else
111  uint32_t reserved;
112#endif
113  uint32_t isr_dispatch_disable;
114  uintptr_t ra;
115  uintptr_t sp;
116  uintptr_t tp;
117  uintptr_t s0;
118  uintptr_t s1;
119  uintptr_t s2;
120  uintptr_t s3;
121  uintptr_t s4;
122  uintptr_t s5;
123  uintptr_t s6;
124  uintptr_t s7;
125  uintptr_t s8;
126  uintptr_t s9;
127  uintptr_t s10;
128  uintptr_t s11;
129} Context_Control;
130
131#define _CPU_Context_Get_SP( _context ) \
132  (_context)->sp
133
134typedef struct {
135  /** TODO FPU registers are listed here */
136  double  some_float_register;
137} Context_Control_fp;
138
139Context_Control_fp  _CPU_Null_fp_context;
140
141#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
142
143#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
144
145#define _CPU_Initialize_vectors()
146
147static inline uint32_t riscv_interrupt_disable( void )
148{
149  unsigned long mstatus;
150
151  __asm__ volatile (
152    "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) :
153      "=&r" ( mstatus )
154  );
155
156  return mstatus & RISCV_MSTATUS_MIE;
157}
158
159static inline void riscv_interrupt_enable( uint32_t level )
160{
161  __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) );
162}
163
164#define _CPU_ISR_Disable( _level ) \
165    _level = riscv_interrupt_disable()
166
167#define _CPU_ISR_Enable( _level )  \
168  riscv_interrupt_enable( _level )
169
170#define _CPU_ISR_Flash( _level ) \
171  do{ \
172      _CPU_ISR_Enable( _level ); \
173      riscv_interrupt_disable(); \
174    } while(0)
175
176RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
177{
178  return ( level & RISCV_MSTATUS_MIE ) != 0;
179}
180
181RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
182{
183  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
184    __asm__ volatile (
185      "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
186    );
187  } else {
188    __asm__ volatile (
189      "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
190    );
191  }
192}
193
194uint32_t _CPU_ISR_Get_level( void );
195
196/* end of ISR handler macros */
197
198void _CPU_Context_Initialize(
199  Context_Control *context,
200  void            *stack_area_begin,
201  size_t           stack_area_size,
202  uint32_t         new_level,
203  void          ( *entry_point )( void ),
204  bool             is_fp,
205  void            *tls_area
206);
207
208#define _CPU_Context_Restart_self( _the_context ) \
209   _CPU_Context_restore( (_the_context) )
210
211
212#define _CPU_Context_Fp_start( _base, _offset ) \
213   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
214
215#define _CPU_Context_Initialize_fp( _destination ) \
216  { \
217   *(*(_destination)) = _CPU_Null_fp_context; \
218  }
219
220extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
221
222/* end of Fatal Error manager macros */
223
224#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
225#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
226
227#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
228
229#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
230  { \
231    (_output) = 0;   /* do something to prevent warnings */ \
232  }
233#endif
234
235/* end of Bitfield handler macros */
236
237/*
238 *  This routine builds the mask which corresponds to the bit fields
239 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
240 *  for that routine.
241 *
242 */
243
244#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
245
246#define _CPU_Priority_Mask( _bit_number ) \
247    (1 << _bit_number)
248
249#endif
250
251#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
252
253#define _CPU_Priority_bits_index( _priority ) \
254  (_priority)
255
256#endif
257
258#define CPU_MAXIMUM_PROCESSORS 32
259
260#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
261#define CPU_TIMESTAMP_USE_INT64 TRUE
262#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
263
264typedef struct {
265  /* There is no CPU specific per-CPU state */
266} CPU_Per_CPU_control;
267
268typedef uint16_t Priority_bit_map_Word;
269
270typedef struct {
271  unsigned long x[32];;
272} CPU_Exception_frame;
273
274/**
275 * @brief Prints the exception frame via printk().
276 *
277 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
278 */
279void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
280
281
282/* end of Priority handler macros */
283
284/* functions */
285
286/*
287 *  _CPU_Initialize
288 *
289 *  This routine performs CPU dependent initialization.
290 *
291 */
292
293void _CPU_Initialize(
294  void
295);
296
297/*
298 *  _CPU_ISR_install_raw_handler
299 *
300 *  This routine installs a "raw" interrupt handler directly into the
301 *  processor's vector table.
302 *
303 */
304
305void _CPU_ISR_install_raw_handler(
306  uint32_t    vector,
307  proc_ptr    new_handler,
308  proc_ptr   *old_handler
309);
310
311/*
312 *  _CPU_ISR_install_vector
313 *
314 *  This routine installs an interrupt vector.
315 *
316 *  NO_CPU Specific Information:
317 *
318 *  XXX document implementation including references if appropriate
319 */
320
321void _CPU_ISR_install_vector(
322  unsigned long    vector,
323  proc_ptr   new_handler,
324  proc_ptr   *old_handler
325);
326
327/*
328 *  _CPU_Thread_Idle_body
329 *
330 *  This routine is the CPU dependent IDLE thread body.
331 *
332 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
333 *         is TRUE.
334 *
335 */
336
337void *_CPU_Thread_Idle_body( uintptr_t ignored );
338
339/*
340 *  _CPU_Context_switch
341 *
342 *  This routine switches from the run context to the heir context.
343 *
344 *  RISCV Specific Information:
345 *
346 *  Please see the comments in the .c file for a description of how
347 *  this function works. There are several things to be aware of.
348 */
349
350void _CPU_Context_switch(
351  Context_Control  *run,
352  Context_Control  *heir
353);
354
355/*
356 *  _CPU_Context_restore
357 *
358 *  This routine is generally used only to restart self in an
359 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
360 *
361 *  NOTE: May be unnecessary to reload some registers.
362 *
363 */
364
365void _CPU_Context_restore(
366  Context_Control *new_context
367) RTEMS_NO_RETURN;
368
369/*
370 *  _CPU_Context_save_fp
371 *
372 *  This routine saves the floating point context passed to it.
373 *
374 */
375
376void _CPU_Context_save_fp(
377  void **fp_context_ptr
378);
379
380/*
381 *  _CPU_Context_restore_fp
382 *
383 *  This routine restores the floating point context passed to it.
384 *
385 */
386
387void _CPU_Context_restore_fp(
388  void **fp_context_ptr
389);
390
391/*  The following routine swaps the endian format of an unsigned int.
392 *  It must be static because it is referenced indirectly.
393 *
394 *  This version will work on any processor, but if there is a better
395 *  way for your CPU PLEASE use it.  The most common way to do this is to:
396 *
397 *     swap least significant two bytes with 16-bit rotate
398 *     swap upper and lower 16-bits
399 *     swap most significant two bytes with 16-bit rotate
400 *
401 *  Some CPUs have special instructions which swap a 32-bit quantity in
402 *  a single instruction (e.g. i486).  It is probably best to avoid
403 *  an "endian swapping control bit" in the CPU.  One good reason is
404 *  that interrupts would probably have to be disabled to insure that
405 *  an interrupt does not try to access the same "chunk" with the wrong
406 *  endian.  Another good reason is that on some CPUs, the endian bit
407 *  endianness for ALL fetches -- both code and data -- so the code
408 *  will be fetched incorrectly.
409 *
410 */
411
412static inline uint32_t CPU_swap_u32(
413  uint32_t value
414)
415{
416  uint32_t   byte1, byte2, byte3, byte4, swapped;
417
418  byte4 = (value >> 24) & 0xff;
419  byte3 = (value >> 16) & 0xff;
420  byte2 = (value >> 8)  & 0xff;
421  byte1 =  value        & 0xff;
422
423  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
424  return ( swapped );
425}
426
427#define CPU_swap_u16( value ) \
428  (((value&0xff) << 8) | ((value >> 8)&0xff))
429
430void _CPU_Context_volatile_clobber( uintptr_t pattern );
431
432void _CPU_Context_validate( uintptr_t pattern );
433
434typedef uint32_t CPU_Counter_ticks;
435
436uint32_t _CPU_Counter_frequency( void );
437
438CPU_Counter_ticks _CPU_Counter_read( void );
439
440#ifdef RTEMS_SMP
441
442uint32_t _CPU_SMP_Initialize( void );
443
444bool _CPU_SMP_Start_processor( uint32_t cpu_index );
445
446void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
447
448void _CPU_SMP_Prepare_start_multitasking( void );
449
450static inline uint32_t _CPU_SMP_Get_current_processor( void )
451{
452  unsigned long mhartid;
453
454  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
455
456  return (uint32_t) mhartid;
457}
458
459void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
460
461static inline void _CPU_SMP_Processor_event_broadcast( void )
462{
463  __asm__ volatile ( "" : : : "memory" );
464}
465
466static inline void _CPU_SMP_Processor_event_receive( void )
467{
468  __asm__ volatile ( "" : : : "memory" );
469}
470
471static inline bool _CPU_Context_Get_is_executing(
472  const Context_Control *context
473)
474{
475  return context->is_executing;
476}
477
478static inline void _CPU_Context_Set_is_executing(
479  Context_Control *context,
480  bool is_executing
481)
482{
483  context->is_executing = is_executing;
484}
485
486#endif /* RTEMS_SMP */
487
488/** Type that can store a 32-bit integer or a pointer. */
489typedef uintptr_t CPU_Uint32ptr;
490
491#endif /* ASM */
492
493#ifdef __cplusplus
494}
495#endif
496
497#endif
Note: See TracBrowser for help on using the repository browser.