source: rtems/cpukit/score/cpu/riscv32/rtems/score/cpu.h @ 660db8c8

Last change on this file since 660db8c8 was 660db8c8, checked in by Hesham Almatary <hesham@…>, on Oct 26, 2017 at 11:12:41 PM

cpukit: Add basic riscv32 architecture port v3

Limitations:

  • NO FPU support [TODO]

Update #3109

  • Property mode set to 100644
File size: 15.1 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham Almatary <hesham@alumni.york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _RISCV_CPU_H
36#define _RISCV_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/riscv.h> /* pick up machine definitions */
43#include <rtems/score/types.h>
44#include <rtems/score/riscv-utility.h>
45#ifndef ASM
46#include <rtems/bspIo.h>
47#include <stdint.h>
48#include <stdio.h> /* for printk */
49#endif
50
51#define CPU_INLINE_ENABLE_DISPATCH       FALSE
52#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
53#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
54#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
55#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
56#define CPU_ISR_PASSES_FRAME_POINTER 1
57#define CPU_HARDWARE_FP                  FALSE
58#define CPU_SOFTWARE_FP                  FALSE
59#define CPU_ALL_TASKS_ARE_FP             FALSE
60#define CPU_IDLE_TASK_IS_FP              FALSE
61#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
62#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
63#define CPU_STACK_GROWS_UP               FALSE
64
65#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
66#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
67#define CPU_BIG_ENDIAN                           FALSE
68#define CPU_LITTLE_ENDIAN                        TRUE
69#define CPU_MODES_INTERRUPT_MASK   0x00000001
70
71/*
72 *  Processor defined structures required for cpukit/score.
73 */
74
75#ifndef ASM
76
77typedef struct {
78  /* riscv32 has 32 32-bit general purpose registers (x0-x31). */
79  uint32_t  x[32];
80
81  /* Special purpose registers */
82  uint32_t  mstatus;
83  uint32_t  mcause;
84  uint32_t  mepc;
85#ifdef RTEMS_SMP
86  /**
87   * @brief On SMP configurations the thread context must contain a boolean
88   * indicator to signal if this context is executing on a processor.
89   *
90   * This field must be updated during a context switch.  The context switch
91   * to the heir must wait until the heir context indicates that it is no
92   * longer executing on a processor.  The context switch must also check if
93   * a thread dispatch is necessary to honor updates of the heir thread for
94   * this processor.  This indicator must be updated using an atomic test and
95   * set operation to ensure that at most one processor uses the heir
96   * context at the same time.
97   *
98   * @code
99   * void _CPU_Context_switch(
100   *   Context_Control *executing,
101   *   Context_Control *heir
102   * )
103   * {
104   *   save( executing );
105   *
106   *   executing->is_executing = false;
107   *   memory_barrier();
108   *
109   *   if ( test_and_set( &heir->is_executing ) ) {
110   *     do {
111   *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
112   *
113   *       if ( cpu_self->dispatch_necessary ) {
114   *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
115   *       }
116   *     } while ( test_and_set( &heir->is_executing ) );
117   *   }
118   *
119   *   restore( heir );
120   * }
121   * @endcode
122   */
123  volatile bool is_executing;
124#endif
125} Context_Control;
126
127#define _CPU_Context_Get_SP( _context ) \
128  (_context)->x[2]
129
130typedef struct {
131  /** TODO FPU registers are listed here */
132  double  some_float_register;
133} Context_Control_fp;
134
135typedef Context_Control CPU_Interrupt_frame;
136
137#define CPU_CONTEXT_FP_SIZE  0
138Context_Control_fp  _CPU_Null_fp_context;
139
140#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
141#define CPU_STACK_MINIMUM_SIZE  4096
142#define CPU_ALIGNMENT 8
143#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
144#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
145#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
146#define CPU_STACK_ALIGNMENT        8
147#define _CPU_Initialize_vectors()
148
149/*
150 *  Disable all interrupts for an RTEMS critical section.  The previous
151 *  level is returned in _level.
152 *
153 */
154
155static inline uint32_t riscv_interrupt_disable( void )
156{
157  register uint32_t status = read_csr(mstatus);
158  clear_csr(mstatus, MSTATUS_MIE);
159  return status;
160}
161
162static inline void riscv_interrupt_enable(uint32_t level)
163{
164  write_csr(mstatus, level);
165}
166
167#define _CPU_ISR_Disable( _level ) \
168    _level = riscv_interrupt_disable()
169
170#define _CPU_ISR_Enable( _level )  \
171  riscv_interrupt_enable( _level )
172
173#define _CPU_ISR_Flash( _level ) \
174  do{ \
175      _CPU_ISR_Enable( _level ); \
176      riscv_interrupt_disable(); \
177    } while(0)
178
179RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
180{
181  return ( level & MSTATUS_MIE ) != 0;
182}
183
184void _CPU_ISR_Set_level( uint32_t level );
185
186uint32_t _CPU_ISR_Get_level( void );
187
188/* end of ISR handler macros */
189
190/* Context handler macros */
191#define RISCV_GCC_RED_ZONE_SIZE 128
192
193void _CPU_Context_Initialize(
194  Context_Control *context,
195  void *stack_area_begin,
196  size_t stack_area_size,
197  uint32_t new_level,
198  void (*entry_point)( void ),
199  bool is_fp,
200  void *tls_area
201);
202
203#define _CPU_Context_Restart_self( _the_context ) \
204   _CPU_Context_restore( (_the_context) )
205
206
207#define _CPU_Context_Fp_start( _base, _offset ) \
208   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
209
210#define _CPU_Context_Initialize_fp( _destination ) \
211  { \
212   *(*(_destination)) = _CPU_Null_fp_context; \
213  }
214
215extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
216RTEMS_NO_RETURN;
217
218/* end of Fatal Error manager macros */
219
220#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
221#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
222
223#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
224
225#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
226  { \
227    (_output) = 0;   /* do something to prevent warnings */ \
228  }
229#endif
230
231/* end of Bitfield handler macros */
232
233/*
234 *  This routine builds the mask which corresponds to the bit fields
235 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
236 *  for that routine.
237 *
238 */
239
240#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
241
242#define _CPU_Priority_Mask( _bit_number ) \
243    (1 << _bit_number)
244
245#endif
246
247#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
248
249#define _CPU_Priority_bits_index( _priority ) \
250  (_priority)
251
252#endif
253
254#define CPU_MAXIMUM_PROCESSORS 32
255
256#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
257#define CPU_TIMESTAMP_USE_INT64 TRUE
258#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
259
260typedef struct {
261  /* There is no CPU specific per-CPU state */
262} CPU_Per_CPU_control;
263#endif /* ASM */
264
265#define CPU_SIZEOF_POINTER 4
266#define CPU_EXCEPTION_FRAME_SIZE 128
267#define CPU_PER_CPU_CONTROL_SIZE 0
268
269#ifndef ASM
270typedef uint16_t Priority_bit_map_Word;
271
272typedef struct {
273  uint32_t x[32];;
274} CPU_Exception_frame;
275
276/**
277 * @brief Prints the exception frame via printk().
278 *
279 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
280 */
281void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
282
283
284/* end of Priority handler macros */
285
286/* functions */
287
288/*
289 *  _CPU_Initialize
290 *
291 *  This routine performs CPU dependent initialization.
292 *
293 */
294
295void _CPU_Initialize(
296  void
297);
298
299/*
300 *  _CPU_ISR_install_raw_handler
301 *
302 *  This routine installs a "raw" interrupt handler directly into the
303 *  processor's vector table.
304 *
305 */
306
307void _CPU_ISR_install_raw_handler(
308  uint32_t    vector,
309  proc_ptr    new_handler,
310  proc_ptr   *old_handler
311);
312
313/*
314 *  _CPU_ISR_install_vector
315 *
316 *  This routine installs an interrupt vector.
317 *
318 *  NO_CPU Specific Information:
319 *
320 *  XXX document implementation including references if appropriate
321 */
322
323void _CPU_ISR_install_vector(
324  uint32_t    vector,
325  proc_ptr   new_handler,
326  proc_ptr   *old_handler
327);
328
329/*
330 *  _CPU_Install_interrupt_stack
331 *
332 *  This routine installs the hardware interrupt stack pointer.
333 *
334 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
335 *         is TRUE.
336 *
337 */
338
339void _CPU_Install_interrupt_stack( void );
340
341/*
342 *  _CPU_Thread_Idle_body
343 *
344 *  This routine is the CPU dependent IDLE thread body.
345 *
346 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
347 *         is TRUE.
348 *
349 */
350
351void *_CPU_Thread_Idle_body( uintptr_t ignored );
352
353/*
354 *  _CPU_Context_switch
355 *
356 *  This routine switches from the run context to the heir context.
357 *
358 *  RISCV Specific Information:
359 *
360 *  Please see the comments in the .c file for a description of how
361 *  this function works. There are several things to be aware of.
362 */
363
364void _CPU_Context_switch(
365  Context_Control  *run,
366  Context_Control  *heir
367);
368
369/*
370 *  _CPU_Context_restore
371 *
372 *  This routine is generally used only to restart self in an
373 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
374 *
375 *  NOTE: May be unnecessary to reload some registers.
376 *
377 */
378
379void _CPU_Context_restore(
380  Context_Control *new_context
381) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
382
383/*
384 *  _CPU_Context_save_fp
385 *
386 *  This routine saves the floating point context passed to it.
387 *
388 */
389
390void _CPU_Context_save_fp(
391  void **fp_context_ptr
392);
393
394/*
395 *  _CPU_Context_restore_fp
396 *
397 *  This routine restores the floating point context passed to it.
398 *
399 */
400
401void _CPU_Context_restore_fp(
402  void **fp_context_ptr
403);
404
405/*  The following routine swaps the endian format of an unsigned int.
406 *  It must be static because it is referenced indirectly.
407 *
408 *  This version will work on any processor, but if there is a better
409 *  way for your CPU PLEASE use it.  The most common way to do this is to:
410 *
411 *     swap least significant two bytes with 16-bit rotate
412 *     swap upper and lower 16-bits
413 *     swap most significant two bytes with 16-bit rotate
414 *
415 *  Some CPUs have special instructions which swap a 32-bit quantity in
416 *  a single instruction (e.g. i486).  It is probably best to avoid
417 *  an "endian swapping control bit" in the CPU.  One good reason is
418 *  that interrupts would probably have to be disabled to insure that
419 *  an interrupt does not try to access the same "chunk" with the wrong
420 *  endian.  Another good reason is that on some CPUs, the endian bit
421 *  endianness for ALL fetches -- both code and data -- so the code
422 *  will be fetched incorrectly.
423 *
424 */
425
426static inline unsigned int CPU_swap_u32(
427  unsigned int value
428)
429{
430  uint32_t   byte1, byte2, byte3, byte4, swapped;
431
432  byte4 = (value >> 24) & 0xff;
433  byte3 = (value >> 16) & 0xff;
434  byte2 = (value >> 8)  & 0xff;
435  byte1 =  value        & 0xff;
436
437  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
438  return ( swapped );
439}
440
441#define CPU_swap_u16( value ) \
442  (((value&0xff) << 8) | ((value >> 8)&0xff))
443
444static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
445{
446  /* TODO */
447}
448
449static inline void _CPU_Context_validate( uintptr_t pattern )
450{
451  while (1) {
452    /* TODO */
453  }
454}
455
456typedef uint32_t CPU_Counter_ticks;
457
458CPU_Counter_ticks _CPU_Counter_read( void );
459
460#ifdef RTEMS_SMP
461/**
462 * @brief Performs CPU specific SMP initialization in the context of the boot
463 * processor.
464 *
465 * This function is invoked on the boot processor during system
466 * initialization.  All interrupt stacks are allocated at this point in case
467 * the CPU port allocates the interrupt stacks.  This function is called
468 * before _CPU_SMP_Start_processor() or _CPU_SMP_Finalize_initialization() is
469 * used.
470 *
471 * @return The count of physically or virtually available processors.
472 * Depending on the configuration the application may use not all processors.
473 */
474uint32_t _CPU_SMP_Initialize( void );
475
476/**
477 * @brief Starts a processor specified by its index.
478 *
479 * This function is invoked on the boot processor during system
480 * initialization.
481 *
482 * This function will be called after _CPU_SMP_Initialize().
483 *
484 * @param[in] cpu_index The processor index.
485 *
486 * @retval true Successful operation.
487 * @retval false Unable to start this processor.
488 */
489bool _CPU_SMP_Start_processor( uint32_t cpu_index );
490
491/**
492 * @brief Performs final steps of CPU specific SMP initialization in the
493 * context of the boot processor.
494 *
495 * This function is invoked on the boot processor during system
496 * initialization.
497 *
498 * This function will be called after all processors requested by the
499 * application have been started.
500 *
501 * @param[in] cpu_count The minimum value of the count of processors
502 * requested by the application configuration and the count of physically or
503 * virtually available processors.
504 */
505void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
506
507/**
508 * @brief Returns the index of the current processor.
509 *
510 * An architecture specific method must be used to obtain the index of the
511 * current processor in the system.  The set of processor indices is the
512 * range of integers starting with zero up to the processor count minus one.
513 */
514uint32_t _CPU_SMP_Get_current_processor( void );
515
516/**
517 * @brief Sends an inter-processor interrupt to the specified target
518 * processor.
519 *
520 * This operation is undefined for target processor indices out of range.
521 *
522 * @param[in] target_processor_index The target processor index.
523 */
524void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
525
526/**
527 * @brief Broadcasts a processor event.
528 *
529 * Some architectures provide a low-level synchronization primitive for
530 * processors in a multi-processor environment.  Processors waiting for this
531 * event may go into a low-power state and stop generating system bus
532 * transactions.  This function must ensure that preceding store operations
533 * can be observed by other processors.
534 *
535 * @see _CPU_SMP_Processor_event_receive().
536 */
537void _CPU_SMP_Processor_event_broadcast( void );
538
539/**
540 * @brief Receives a processor event.
541 *
542 * This function will wait for the processor event and may wait forever if no
543 * such event arrives.
544 *
545 * @see _CPU_SMP_Processor_event_broadcast().
546 */
547static inline void _CPU_SMP_Processor_event_receive( void )
548{
549  __asm__ volatile ( "" : : : "memory" );
550}
551
552/**
553 * @brief Gets the is executing indicator of the thread context.
554 *
555 * @param[in] context The context.
556 */
557static inline bool _CPU_Context_Get_is_executing(
558  const Context_Control *context
559)
560{
561  return context->is_executing;
562}
563
564/**
565 * @brief Sets the is executing indicator of the thread context.
566 *
567 * @param[in] context The context.
568 * @param[in] is_executing The new value for the is executing indicator.
569 */
570static inline void _CPU_Context_Set_is_executing(
571  Context_Control *context,
572  bool is_executing
573)
574{
575  context->is_executing = is_executing;
576}
577#endif /* RTEMS_SMP */
578
579#endif /* ASM */
580
581#ifdef __cplusplus
582}
583#endif
584
585#endif
Note: See TracBrowser for help on using the repository browser.