source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ 2086948a

Last change on this file since 2086948a was 2086948a, checked in by Sebastian Huber <sebastian.huber@…>, on May 11, 2018 at 4:54:59 AM

riscv: Add dummy SMP support

Update #3433.

  • Property mode set to 100644
File size: 11.8 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham Almatary <hesham@alumni.york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _RISCV_CPU_H
36#define _RISCV_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/riscv.h> /* pick up machine definitions */
44#include <rtems/score/riscv-utility.h>
45#ifndef ASM
46#include <rtems/bspIo.h>
47#include <stdint.h>
48#include <stdio.h> /* for printk */
49#endif
50
51#define CPU_INLINE_ENABLE_DISPATCH       FALSE
52#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
53#define CPU_ISR_PASSES_FRAME_POINTER 1
54#define CPU_HARDWARE_FP                  FALSE
55#define CPU_SOFTWARE_FP                  FALSE
56#define CPU_ALL_TASKS_ARE_FP             FALSE
57#define CPU_IDLE_TASK_IS_FP              FALSE
58#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
59#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
60#define CPU_STACK_GROWS_UP               FALSE
61
62#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
63#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
64#define CPU_BIG_ENDIAN                           FALSE
65#define CPU_LITTLE_ENDIAN                        TRUE
66#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
67
68/*
69 *  Processor defined structures required for cpukit/score.
70 */
71
72#ifndef ASM
73
74typedef struct {
75  /* riscv has 32 xlen-bit (where xlen can be 32 or 64) general purpose registers (x0-x31)*/
76  unsigned long x[32];
77
78  /* Special purpose registers */
79  unsigned long mstatus;
80  unsigned long mcause;
81  unsigned long mepc;
82#ifdef RTEMS_SMP
83  volatile bool is_executing;
84#endif
85} Context_Control;
86
87#define _CPU_Context_Get_SP( _context ) \
88  (_context)->x[2]
89
90typedef struct {
91  /** TODO FPU registers are listed here */
92  double  some_float_register;
93} Context_Control_fp;
94
95typedef Context_Control CPU_Interrupt_frame;
96
97#define CPU_CONTEXT_FP_SIZE  0
98Context_Control_fp  _CPU_Null_fp_context;
99
100#define CPU_CACHE_LINE_BYTES 64
101
102#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
103#if __riscv_xlen == 32
104#define CPU_STACK_MINIMUM_SIZE  4096
105#else
106#define CPU_STACK_MINIMUM_SIZE  4096 * 2
107#endif
108#define CPU_ALIGNMENT 8
109#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
110#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
111#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
112#define CPU_STACK_ALIGNMENT        8
113
114#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
115
116#define _CPU_Initialize_vectors()
117
118/*
119 *  Disable all interrupts for an RTEMS critical section.  The previous
120 *  level is returned in _level.
121 *
122 */
123
124static inline unsigned long riscv_interrupt_disable( void )
125{
126  unsigned long status = read_csr(mstatus);
127  clear_csr(mstatus, MSTATUS_MIE);
128  return status;
129}
130
131static inline void riscv_interrupt_enable(unsigned long level)
132{
133  write_csr(mstatus, level);
134}
135
136#define _CPU_ISR_Disable( _level ) \
137    _level = riscv_interrupt_disable()
138
139#define _CPU_ISR_Enable( _level )  \
140  riscv_interrupt_enable( _level )
141
142#define _CPU_ISR_Flash( _level ) \
143  do{ \
144      _CPU_ISR_Enable( _level ); \
145      riscv_interrupt_disable(); \
146    } while(0)
147
148RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
149{
150  return ( level & MSTATUS_MIE ) != 0;
151}
152
153RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
154{
155  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
156    __asm__ volatile (
157      "csrrs zero, mstatus, " RTEMS_XSTRING( MSTATUS_MIE )
158    );
159  } else {
160    __asm__ volatile (
161      "csrrc zero, mstatus, " RTEMS_XSTRING( MSTATUS_MIE )
162    );
163  }
164}
165
166uint32_t _CPU_ISR_Get_level( void );
167
168/* end of ISR handler macros */
169
170/* Context handler macros */
171#define RISCV_GCC_RED_ZONE_SIZE 128
172
173void _CPU_Context_Initialize(
174  Context_Control *context,
175  void *stack_area_begin,
176  size_t stack_area_size,
177  unsigned long new_level,
178  void (*entry_point)( void ),
179  bool is_fp,
180  void *tls_area
181);
182
183#define _CPU_Context_Restart_self( _the_context ) \
184   _CPU_Context_restore( (_the_context) )
185
186
187#define _CPU_Context_Fp_start( _base, _offset ) \
188   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
189
190#define _CPU_Context_Initialize_fp( _destination ) \
191  { \
192   *(*(_destination)) = _CPU_Null_fp_context; \
193  }
194
195extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
196
197/* end of Fatal Error manager macros */
198
199#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
200#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
201
202#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
203
204#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
205  { \
206    (_output) = 0;   /* do something to prevent warnings */ \
207  }
208#endif
209
210/* end of Bitfield handler macros */
211
212/*
213 *  This routine builds the mask which corresponds to the bit fields
214 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
215 *  for that routine.
216 *
217 */
218
219#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
220
221#define _CPU_Priority_Mask( _bit_number ) \
222    (1 << _bit_number)
223
224#endif
225
226#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
227
228#define _CPU_Priority_bits_index( _priority ) \
229  (_priority)
230
231#endif
232
233#define CPU_MAXIMUM_PROCESSORS 32
234
235#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
236#define CPU_TIMESTAMP_USE_INT64 TRUE
237#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
238
239typedef struct {
240  /* There is no CPU specific per-CPU state */
241} CPU_Per_CPU_control;
242#endif /* ASM */
243
244#if __riscv_xlen == 32
245#define CPU_SIZEOF_POINTER 4
246
247/* 32-bit load/store instructions */
248#define LREG lw
249#define SREG sw
250
251#define CPU_EXCEPTION_FRAME_SIZE 128
252#else /* xlen = 64 */
253#define CPU_SIZEOF_POINTER 8
254
255/* 64-bit load/store instructions */
256#define LREG ld
257#define SREG sd
258
259#define CPU_EXCEPTION_FRAME_SIZE 256
260#endif
261
262#define CPU_PER_CPU_CONTROL_SIZE 0
263
264#ifndef ASM
265typedef uint16_t Priority_bit_map_Word;
266
267typedef struct {
268  unsigned long x[32];;
269} CPU_Exception_frame;
270
271/**
272 * @brief Prints the exception frame via printk().
273 *
274 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
275 */
276void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
277
278
279/* end of Priority handler macros */
280
281/* functions */
282
283/*
284 *  _CPU_Initialize
285 *
286 *  This routine performs CPU dependent initialization.
287 *
288 */
289
290void _CPU_Initialize(
291  void
292);
293
294/*
295 *  _CPU_ISR_install_raw_handler
296 *
297 *  This routine installs a "raw" interrupt handler directly into the
298 *  processor's vector table.
299 *
300 */
301
302void _CPU_ISR_install_raw_handler(
303  uint32_t    vector,
304  proc_ptr    new_handler,
305  proc_ptr   *old_handler
306);
307
308/*
309 *  _CPU_ISR_install_vector
310 *
311 *  This routine installs an interrupt vector.
312 *
313 *  NO_CPU Specific Information:
314 *
315 *  XXX document implementation including references if appropriate
316 */
317
318void _CPU_ISR_install_vector(
319  unsigned long    vector,
320  proc_ptr   new_handler,
321  proc_ptr   *old_handler
322);
323
324/*
325 *  _CPU_Thread_Idle_body
326 *
327 *  This routine is the CPU dependent IDLE thread body.
328 *
329 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
330 *         is TRUE.
331 *
332 */
333
334void *_CPU_Thread_Idle_body( uintptr_t ignored );
335
336/*
337 *  _CPU_Context_switch
338 *
339 *  This routine switches from the run context to the heir context.
340 *
341 *  RISCV Specific Information:
342 *
343 *  Please see the comments in the .c file for a description of how
344 *  this function works. There are several things to be aware of.
345 */
346
347void _CPU_Context_switch(
348  Context_Control  *run,
349  Context_Control  *heir
350);
351
352/*
353 *  _CPU_Context_restore
354 *
355 *  This routine is generally used only to restart self in an
356 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
357 *
358 *  NOTE: May be unnecessary to reload some registers.
359 *
360 */
361
362void _CPU_Context_restore(
363  Context_Control *new_context
364) RTEMS_NO_RETURN;
365
366/*
367 *  _CPU_Context_save_fp
368 *
369 *  This routine saves the floating point context passed to it.
370 *
371 */
372
373void _CPU_Context_save_fp(
374  void **fp_context_ptr
375);
376
377/*
378 *  _CPU_Context_restore_fp
379 *
380 *  This routine restores the floating point context passed to it.
381 *
382 */
383
384void _CPU_Context_restore_fp(
385  void **fp_context_ptr
386);
387
388/*  The following routine swaps the endian format of an unsigned int.
389 *  It must be static because it is referenced indirectly.
390 *
391 *  This version will work on any processor, but if there is a better
392 *  way for your CPU PLEASE use it.  The most common way to do this is to:
393 *
394 *     swap least significant two bytes with 16-bit rotate
395 *     swap upper and lower 16-bits
396 *     swap most significant two bytes with 16-bit rotate
397 *
398 *  Some CPUs have special instructions which swap a 32-bit quantity in
399 *  a single instruction (e.g. i486).  It is probably best to avoid
400 *  an "endian swapping control bit" in the CPU.  One good reason is
401 *  that interrupts would probably have to be disabled to insure that
402 *  an interrupt does not try to access the same "chunk" with the wrong
403 *  endian.  Another good reason is that on some CPUs, the endian bit
404 *  endianness for ALL fetches -- both code and data -- so the code
405 *  will be fetched incorrectly.
406 *
407 */
408
409static inline uint32_t CPU_swap_u32(
410  uint32_t value
411)
412{
413  uint32_t   byte1, byte2, byte3, byte4, swapped;
414
415  byte4 = (value >> 24) & 0xff;
416  byte3 = (value >> 16) & 0xff;
417  byte2 = (value >> 8)  & 0xff;
418  byte1 =  value        & 0xff;
419
420  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
421  return ( swapped );
422}
423
424#define CPU_swap_u16( value ) \
425  (((value&0xff) << 8) | ((value >> 8)&0xff))
426
427static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
428{
429  /* TODO */
430}
431
432static inline void _CPU_Context_validate( uintptr_t pattern )
433{
434  while (1) {
435    /* TODO */
436  }
437}
438
439typedef uint32_t CPU_Counter_ticks;
440
441uint32_t _CPU_Counter_frequency( void );
442
443CPU_Counter_ticks _CPU_Counter_read( void );
444
445#ifdef RTEMS_SMP
446
447uint32_t _CPU_SMP_Initialize( void );
448
449bool _CPU_SMP_Start_processor( uint32_t cpu_index );
450
451void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
452
453void _CPU_SMP_Prepare_start_multitasking( void );
454
455static inline uint32_t _CPU_SMP_Get_current_processor( void )
456{
457  unsigned long mhartid;
458
459  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
460
461  return (uint32_t) mhartid;
462}
463
464void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
465
466static inline void _CPU_SMP_Processor_event_broadcast( void )
467{
468  __asm__ volatile ( "" : : : "memory" );
469}
470
471static inline void _CPU_SMP_Processor_event_receive( void )
472{
473  __asm__ volatile ( "" : : : "memory" );
474}
475
476static inline bool _CPU_Context_Get_is_executing(
477  const Context_Control *context
478)
479{
480  return context->is_executing;
481}
482
483static inline void _CPU_Context_Set_is_executing(
484  Context_Control *context,
485  bool is_executing
486)
487{
488  context->is_executing = is_executing;
489}
490
491#endif /* RTEMS_SMP */
492
493/** Type that can store a 32-bit integer or a pointer. */
494typedef uintptr_t CPU_Uint32ptr;
495
496#endif /* ASM */
497
498#ifdef __cplusplus
499}
500#endif
501
502#endif
Note: See TracBrowser for help on using the repository browser.