source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ bc3bdf2

5
Last change on this file since bc3bdf2 was bc3bdf2, checked in by Sebastian Huber <sebastian.huber@…>, on 06/28/18 at 12:59:38

riscv: Optimize and fix interrupt disable/enable

Use the atomic read and clear operation to disable interrupts.

Do not write the complete mstatus. Instead, set only the MIE bit
depending on the level parameter.

Update #3433.

  • Property mode set to 100644
File size: 11.8 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham Almatary <hesham@alumni.york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _RISCV_CPU_H
36#define _RISCV_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/riscv.h> /* pick up machine definitions */
44#include <rtems/score/riscv-utility.h>
45#ifndef ASM
46#include <rtems/bspIo.h>
47#include <stdint.h>
48#include <stdio.h> /* for printk */
49#endif
50
51#define RISCV_MSTATUS_MIE 0x8
52
53#define CPU_INLINE_ENABLE_DISPATCH       FALSE
54#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
55#define CPU_ISR_PASSES_FRAME_POINTER 1
56#define CPU_HARDWARE_FP                  FALSE
57#define CPU_SOFTWARE_FP                  FALSE
58#define CPU_ALL_TASKS_ARE_FP             FALSE
59#define CPU_IDLE_TASK_IS_FP              FALSE
60#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
61#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
62#define CPU_STACK_GROWS_UP               FALSE
63
64#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
65#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
66#define CPU_BIG_ENDIAN                           FALSE
67#define CPU_LITTLE_ENDIAN                        TRUE
68#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
69
70/*
71 *  Processor defined structures required for cpukit/score.
72 */
73
74#ifndef ASM
75
76typedef struct {
77  /* riscv has 32 xlen-bit (where xlen can be 32 or 64) general purpose registers (x0-x31)*/
78  unsigned long x[32];
79
80  /* Special purpose registers */
81  unsigned long mstatus;
82  unsigned long mcause;
83  unsigned long mepc;
84#ifdef RTEMS_SMP
85  volatile bool is_executing;
86#endif
87} Context_Control;
88
89#define _CPU_Context_Get_SP( _context ) \
90  (_context)->x[2]
91
92typedef struct {
93  /** TODO FPU registers are listed here */
94  double  some_float_register;
95} Context_Control_fp;
96
97typedef Context_Control CPU_Interrupt_frame;
98
99#define CPU_CONTEXT_FP_SIZE  0
100Context_Control_fp  _CPU_Null_fp_context;
101
102#define CPU_CACHE_LINE_BYTES 64
103
104#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
105#if __riscv_xlen == 32
106#define CPU_STACK_MINIMUM_SIZE  4096
107#else
108#define CPU_STACK_MINIMUM_SIZE  4096 * 2
109#endif
110#define CPU_ALIGNMENT 8
111#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
112#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
113#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
114#define CPU_STACK_ALIGNMENT        8
115
116#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
117
118#define _CPU_Initialize_vectors()
119
120static inline uint32_t riscv_interrupt_disable( void )
121{
122  unsigned long mstatus;
123
124  __asm__ volatile (
125    "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) :
126      "=&r" ( mstatus )
127  );
128
129  return mstatus & RISCV_MSTATUS_MIE;
130}
131
132static inline void riscv_interrupt_enable( uint32_t level )
133{
134  __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) );
135}
136
137#define _CPU_ISR_Disable( _level ) \
138    _level = riscv_interrupt_disable()
139
140#define _CPU_ISR_Enable( _level )  \
141  riscv_interrupt_enable( _level )
142
143#define _CPU_ISR_Flash( _level ) \
144  do{ \
145      _CPU_ISR_Enable( _level ); \
146      riscv_interrupt_disable(); \
147    } while(0)
148
149RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
150{
151  return ( level & RISCV_MSTATUS_MIE ) != 0;
152}
153
154RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
155{
156  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
157    __asm__ volatile (
158      "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
159    );
160  } else {
161    __asm__ volatile (
162      "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
163    );
164  }
165}
166
167uint32_t _CPU_ISR_Get_level( void );
168
169/* end of ISR handler macros */
170
171/* Context handler macros */
172#define RISCV_GCC_RED_ZONE_SIZE 128
173
174void _CPU_Context_Initialize(
175  Context_Control *context,
176  void *stack_area_begin,
177  size_t stack_area_size,
178  unsigned long new_level,
179  void (*entry_point)( void ),
180  bool is_fp,
181  void *tls_area
182);
183
184#define _CPU_Context_Restart_self( _the_context ) \
185   _CPU_Context_restore( (_the_context) )
186
187
188#define _CPU_Context_Fp_start( _base, _offset ) \
189   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
190
191#define _CPU_Context_Initialize_fp( _destination ) \
192  { \
193   *(*(_destination)) = _CPU_Null_fp_context; \
194  }
195
196extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
197
198/* end of Fatal Error manager macros */
199
200#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
201#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
202
203#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
204
205#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
206  { \
207    (_output) = 0;   /* do something to prevent warnings */ \
208  }
209#endif
210
211/* end of Bitfield handler macros */
212
213/*
214 *  This routine builds the mask which corresponds to the bit fields
215 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
216 *  for that routine.
217 *
218 */
219
220#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
221
222#define _CPU_Priority_Mask( _bit_number ) \
223    (1 << _bit_number)
224
225#endif
226
227#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
228
229#define _CPU_Priority_bits_index( _priority ) \
230  (_priority)
231
232#endif
233
234#define CPU_MAXIMUM_PROCESSORS 32
235
236#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
237#define CPU_TIMESTAMP_USE_INT64 TRUE
238#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
239
240typedef struct {
241  /* There is no CPU specific per-CPU state */
242} CPU_Per_CPU_control;
243#endif /* ASM */
244
245#if __riscv_xlen == 32
246#define CPU_SIZEOF_POINTER 4
247
248/* 32-bit load/store instructions */
249#define LREG lw
250#define SREG sw
251
252#define CPU_EXCEPTION_FRAME_SIZE 128
253#else /* xlen = 64 */
254#define CPU_SIZEOF_POINTER 8
255
256/* 64-bit load/store instructions */
257#define LREG ld
258#define SREG sd
259
260#define CPU_EXCEPTION_FRAME_SIZE 256
261#endif
262
263#define CPU_PER_CPU_CONTROL_SIZE 0
264
265#ifndef ASM
266typedef uint16_t Priority_bit_map_Word;
267
268typedef struct {
269  unsigned long x[32];;
270} CPU_Exception_frame;
271
272/**
273 * @brief Prints the exception frame via printk().
274 *
275 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
276 */
277void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
278
279
280/* end of Priority handler macros */
281
282/* functions */
283
284/*
285 *  _CPU_Initialize
286 *
287 *  This routine performs CPU dependent initialization.
288 *
289 */
290
291void _CPU_Initialize(
292  void
293);
294
295/*
296 *  _CPU_ISR_install_raw_handler
297 *
298 *  This routine installs a "raw" interrupt handler directly into the
299 *  processor's vector table.
300 *
301 */
302
303void _CPU_ISR_install_raw_handler(
304  uint32_t    vector,
305  proc_ptr    new_handler,
306  proc_ptr   *old_handler
307);
308
309/*
310 *  _CPU_ISR_install_vector
311 *
312 *  This routine installs an interrupt vector.
313 *
314 *  NO_CPU Specific Information:
315 *
316 *  XXX document implementation including references if appropriate
317 */
318
319void _CPU_ISR_install_vector(
320  unsigned long    vector,
321  proc_ptr   new_handler,
322  proc_ptr   *old_handler
323);
324
325/*
326 *  _CPU_Thread_Idle_body
327 *
328 *  This routine is the CPU dependent IDLE thread body.
329 *
330 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
331 *         is TRUE.
332 *
333 */
334
335void *_CPU_Thread_Idle_body( uintptr_t ignored );
336
337/*
338 *  _CPU_Context_switch
339 *
340 *  This routine switches from the run context to the heir context.
341 *
342 *  RISCV Specific Information:
343 *
344 *  Please see the comments in the .c file for a description of how
345 *  this function works. There are several things to be aware of.
346 */
347
348void _CPU_Context_switch(
349  Context_Control  *run,
350  Context_Control  *heir
351);
352
353/*
354 *  _CPU_Context_restore
355 *
356 *  This routine is generally used only to restart self in an
357 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
358 *
359 *  NOTE: May be unnecessary to reload some registers.
360 *
361 */
362
363void _CPU_Context_restore(
364  Context_Control *new_context
365) RTEMS_NO_RETURN;
366
367/*
368 *  _CPU_Context_save_fp
369 *
370 *  This routine saves the floating point context passed to it.
371 *
372 */
373
374void _CPU_Context_save_fp(
375  void **fp_context_ptr
376);
377
378/*
379 *  _CPU_Context_restore_fp
380 *
381 *  This routine restores the floating point context passed to it.
382 *
383 */
384
385void _CPU_Context_restore_fp(
386  void **fp_context_ptr
387);
388
389/*  The following routine swaps the endian format of an unsigned int.
390 *  It must be static because it is referenced indirectly.
391 *
392 *  This version will work on any processor, but if there is a better
393 *  way for your CPU PLEASE use it.  The most common way to do this is to:
394 *
395 *     swap least significant two bytes with 16-bit rotate
396 *     swap upper and lower 16-bits
397 *     swap most significant two bytes with 16-bit rotate
398 *
399 *  Some CPUs have special instructions which swap a 32-bit quantity in
400 *  a single instruction (e.g. i486).  It is probably best to avoid
401 *  an "endian swapping control bit" in the CPU.  One good reason is
402 *  that interrupts would probably have to be disabled to insure that
403 *  an interrupt does not try to access the same "chunk" with the wrong
404 *  endian.  Another good reason is that on some CPUs, the endian bit
405 *  endianness for ALL fetches -- both code and data -- so the code
406 *  will be fetched incorrectly.
407 *
408 */
409
410static inline uint32_t CPU_swap_u32(
411  uint32_t value
412)
413{
414  uint32_t   byte1, byte2, byte3, byte4, swapped;
415
416  byte4 = (value >> 24) & 0xff;
417  byte3 = (value >> 16) & 0xff;
418  byte2 = (value >> 8)  & 0xff;
419  byte1 =  value        & 0xff;
420
421  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
422  return ( swapped );
423}
424
425#define CPU_swap_u16( value ) \
426  (((value&0xff) << 8) | ((value >> 8)&0xff))
427
428static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
429{
430  /* TODO */
431}
432
433static inline void _CPU_Context_validate( uintptr_t pattern )
434{
435  while (1) {
436    /* TODO */
437  }
438}
439
440typedef uint32_t CPU_Counter_ticks;
441
442uint32_t _CPU_Counter_frequency( void );
443
444CPU_Counter_ticks _CPU_Counter_read( void );
445
446#ifdef RTEMS_SMP
447
448uint32_t _CPU_SMP_Initialize( void );
449
450bool _CPU_SMP_Start_processor( uint32_t cpu_index );
451
452void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
453
454void _CPU_SMP_Prepare_start_multitasking( void );
455
456static inline uint32_t _CPU_SMP_Get_current_processor( void )
457{
458  unsigned long mhartid;
459
460  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
461
462  return (uint32_t) mhartid;
463}
464
465void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
466
467static inline void _CPU_SMP_Processor_event_broadcast( void )
468{
469  __asm__ volatile ( "" : : : "memory" );
470}
471
472static inline void _CPU_SMP_Processor_event_receive( void )
473{
474  __asm__ volatile ( "" : : : "memory" );
475}
476
477static inline bool _CPU_Context_Get_is_executing(
478  const Context_Control *context
479)
480{
481  return context->is_executing;
482}
483
484static inline void _CPU_Context_Set_is_executing(
485  Context_Control *context,
486  bool is_executing
487)
488{
489  context->is_executing = is_executing;
490}
491
492#endif /* RTEMS_SMP */
493
494/** Type that can store a 32-bit integer or a pointer. */
495typedef uintptr_t CPU_Uint32ptr;
496
497#endif /* ASM */
498
499#ifdef __cplusplus
500}
501#endif
502
503#endif
Note: See TracBrowser for help on using the repository browser.