source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ 7c3b0df1

Last change on this file since 7c3b0df1 was 7c3b0df1, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 22, 2018 at 11:30:49 AM

riscv: Implement ISR set/get level

Fix prototypes.

Update #3433.

  • Property mode set to 100644
File size: 15.5 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham Almatary <hesham@alumni.york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _RISCV_CPU_H
36#define _RISCV_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/riscv.h> /* pick up machine definitions */
44#include <rtems/score/riscv-utility.h>
45#ifndef ASM
46#include <rtems/bspIo.h>
47#include <stdint.h>
48#include <stdio.h> /* for printk */
49#endif
50
51#define CPU_INLINE_ENABLE_DISPATCH       FALSE
52#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
53#define CPU_ISR_PASSES_FRAME_POINTER 1
54#define CPU_HARDWARE_FP                  FALSE
55#define CPU_SOFTWARE_FP                  FALSE
56#define CPU_ALL_TASKS_ARE_FP             FALSE
57#define CPU_IDLE_TASK_IS_FP              FALSE
58#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
59#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
60#define CPU_STACK_GROWS_UP               FALSE
61
62#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
63#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
64#define CPU_BIG_ENDIAN                           FALSE
65#define CPU_LITTLE_ENDIAN                        TRUE
66#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
67
68/*
69 *  Processor defined structures required for cpukit/score.
70 */
71
72#ifndef ASM
73
74typedef struct {
75  /* riscv has 32 xlen-bit (where xlen can be 32 or 64) general purpose registers (x0-x31)*/
76  unsigned long x[32];
77
78  /* Special purpose registers */
79  unsigned long mstatus;
80  unsigned long mcause;
81  unsigned long mepc;
82#ifdef RTEMS_SMP
83  /**
84   * @brief On SMP configurations the thread context must contain a boolean
85   * indicator to signal if this context is executing on a processor.
86   *
87   * This field must be updated during a context switch.  The context switch
88   * to the heir must wait until the heir context indicates that it is no
89   * longer executing on a processor.  The context switch must also check if
90   * a thread dispatch is necessary to honor updates of the heir thread for
91   * this processor.  This indicator must be updated using an atomic test and
92   * set operation to ensure that at most one processor uses the heir
93   * context at the same time.
94   *
95   * @code
96   * void _CPU_Context_switch(
97   *   Context_Control *executing,
98   *   Context_Control *heir
99   * )
100   * {
101   *   save( executing );
102   *
103   *   executing->is_executing = false;
104   *   memory_barrier();
105   *
106   *   if ( test_and_set( &heir->is_executing ) ) {
107   *     do {
108   *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
109   *
110   *       if ( cpu_self->dispatch_necessary ) {
111   *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
112   *       }
113   *     } while ( test_and_set( &heir->is_executing ) );
114   *   }
115   *
116   *   restore( heir );
117   * }
118   * @endcode
119   */
120  volatile bool is_executing;
121#endif
122} Context_Control;
123
124#define _CPU_Context_Get_SP( _context ) \
125  (_context)->x[2]
126
127typedef struct {
128  /** TODO FPU registers are listed here */
129  double  some_float_register;
130} Context_Control_fp;
131
132typedef Context_Control CPU_Interrupt_frame;
133
134#define CPU_CONTEXT_FP_SIZE  0
135Context_Control_fp  _CPU_Null_fp_context;
136
137#define CPU_CACHE_LINE_BYTES 64
138
139#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
140#if __riscv_xlen == 32
141#define CPU_STACK_MINIMUM_SIZE  4096
142#else
143#define CPU_STACK_MINIMUM_SIZE  4096 * 2
144#endif
145#define CPU_ALIGNMENT 8
146#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
147#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
148#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
149#define CPU_STACK_ALIGNMENT        8
150
151#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
152
153#define _CPU_Initialize_vectors()
154
155/*
156 *  Disable all interrupts for an RTEMS critical section.  The previous
157 *  level is returned in _level.
158 *
159 */
160
161static inline unsigned long riscv_interrupt_disable( void )
162{
163  unsigned long status = read_csr(mstatus);
164  clear_csr(mstatus, MSTATUS_MIE);
165  return status;
166}
167
168static inline void riscv_interrupt_enable(unsigned long level)
169{
170  write_csr(mstatus, level);
171}
172
173#define _CPU_ISR_Disable( _level ) \
174    _level = riscv_interrupt_disable()
175
176#define _CPU_ISR_Enable( _level )  \
177  riscv_interrupt_enable( _level )
178
179#define _CPU_ISR_Flash( _level ) \
180  do{ \
181      _CPU_ISR_Enable( _level ); \
182      riscv_interrupt_disable(); \
183    } while(0)
184
185RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
186{
187  return ( level & MSTATUS_MIE ) != 0;
188}
189
190RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
191{
192  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
193    __asm__ volatile (
194      "csrrs zero, mstatus, " RTEMS_XSTRING( MSTATUS_MIE )
195    );
196  } else {
197    __asm__ volatile (
198      "csrrc zero, mstatus, " RTEMS_XSTRING( MSTATUS_MIE )
199    );
200  }
201}
202
203uint32_t _CPU_ISR_Get_level( void );
204
205/* end of ISR handler macros */
206
207/* Context handler macros */
208#define RISCV_GCC_RED_ZONE_SIZE 128
209
210void _CPU_Context_Initialize(
211  Context_Control *context,
212  void *stack_area_begin,
213  size_t stack_area_size,
214  unsigned long new_level,
215  void (*entry_point)( void ),
216  bool is_fp,
217  void *tls_area
218);
219
220#define _CPU_Context_Restart_self( _the_context ) \
221   _CPU_Context_restore( (_the_context) )
222
223
224#define _CPU_Context_Fp_start( _base, _offset ) \
225   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
226
227#define _CPU_Context_Initialize_fp( _destination ) \
228  { \
229   *(*(_destination)) = _CPU_Null_fp_context; \
230  }
231
232extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
233
234/* end of Fatal Error manager macros */
235
236#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
237#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
238
239#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
240
241#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
242  { \
243    (_output) = 0;   /* do something to prevent warnings */ \
244  }
245#endif
246
247/* end of Bitfield handler macros */
248
249/*
250 *  This routine builds the mask which corresponds to the bit fields
251 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
252 *  for that routine.
253 *
254 */
255
256#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
257
258#define _CPU_Priority_Mask( _bit_number ) \
259    (1 << _bit_number)
260
261#endif
262
263#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
264
265#define _CPU_Priority_bits_index( _priority ) \
266  (_priority)
267
268#endif
269
270#define CPU_MAXIMUM_PROCESSORS 32
271
272#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
273#define CPU_TIMESTAMP_USE_INT64 TRUE
274#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
275
276typedef struct {
277  /* There is no CPU specific per-CPU state */
278} CPU_Per_CPU_control;
279#endif /* ASM */
280
281#if __riscv_xlen == 32
282#define CPU_SIZEOF_POINTER 4
283
284/* 32-bit load/store instructions */
285#define LREG lw
286#define SREG sw
287
288#define CPU_EXCEPTION_FRAME_SIZE 128
289#else /* xlen = 64 */
290#define CPU_SIZEOF_POINTER 8
291
292/* 64-bit load/store instructions */
293#define LREG ld
294#define SREG sd
295
296#define CPU_EXCEPTION_FRAME_SIZE 256
297#endif
298
299#define CPU_PER_CPU_CONTROL_SIZE 0
300
301#ifndef ASM
302typedef uint16_t Priority_bit_map_Word;
303
304typedef struct {
305  unsigned long x[32];;
306} CPU_Exception_frame;
307
308/**
309 * @brief Prints the exception frame via printk().
310 *
311 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
312 */
313void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
314
315
316/* end of Priority handler macros */
317
318/* functions */
319
320/*
321 *  _CPU_Initialize
322 *
323 *  This routine performs CPU dependent initialization.
324 *
325 */
326
327void _CPU_Initialize(
328  void
329);
330
331/*
332 *  _CPU_ISR_install_raw_handler
333 *
334 *  This routine installs a "raw" interrupt handler directly into the
335 *  processor's vector table.
336 *
337 */
338
339void _CPU_ISR_install_raw_handler(
340  uint32_t    vector,
341  proc_ptr    new_handler,
342  proc_ptr   *old_handler
343);
344
345/*
346 *  _CPU_ISR_install_vector
347 *
348 *  This routine installs an interrupt vector.
349 *
350 *  NO_CPU Specific Information:
351 *
352 *  XXX document implementation including references if appropriate
353 */
354
355void _CPU_ISR_install_vector(
356  unsigned long    vector,
357  proc_ptr   new_handler,
358  proc_ptr   *old_handler
359);
360
361/*
362 *  _CPU_Thread_Idle_body
363 *
364 *  This routine is the CPU dependent IDLE thread body.
365 *
366 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
367 *         is TRUE.
368 *
369 */
370
371void *_CPU_Thread_Idle_body( uintptr_t ignored );
372
373/*
374 *  _CPU_Context_switch
375 *
376 *  This routine switches from the run context to the heir context.
377 *
378 *  RISCV Specific Information:
379 *
380 *  Please see the comments in the .c file for a description of how
381 *  this function works. There are several things to be aware of.
382 */
383
384void _CPU_Context_switch(
385  Context_Control  *run,
386  Context_Control  *heir
387);
388
389/*
390 *  _CPU_Context_restore
391 *
392 *  This routine is generally used only to restart self in an
393 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
394 *
395 *  NOTE: May be unnecessary to reload some registers.
396 *
397 */
398
399void _CPU_Context_restore(
400  Context_Control *new_context
401) RTEMS_NO_RETURN;
402
403/*
404 *  _CPU_Context_save_fp
405 *
406 *  This routine saves the floating point context passed to it.
407 *
408 */
409
410void _CPU_Context_save_fp(
411  void **fp_context_ptr
412);
413
414/*
415 *  _CPU_Context_restore_fp
416 *
417 *  This routine restores the floating point context passed to it.
418 *
419 */
420
421void _CPU_Context_restore_fp(
422  void **fp_context_ptr
423);
424
425/*  The following routine swaps the endian format of an unsigned int.
426 *  It must be static because it is referenced indirectly.
427 *
428 *  This version will work on any processor, but if there is a better
429 *  way for your CPU PLEASE use it.  The most common way to do this is to:
430 *
431 *     swap least significant two bytes with 16-bit rotate
432 *     swap upper and lower 16-bits
433 *     swap most significant two bytes with 16-bit rotate
434 *
435 *  Some CPUs have special instructions which swap a 32-bit quantity in
436 *  a single instruction (e.g. i486).  It is probably best to avoid
437 *  an "endian swapping control bit" in the CPU.  One good reason is
438 *  that interrupts would probably have to be disabled to insure that
439 *  an interrupt does not try to access the same "chunk" with the wrong
440 *  endian.  Another good reason is that on some CPUs, the endian bit
441 *  endianness for ALL fetches -- both code and data -- so the code
442 *  will be fetched incorrectly.
443 *
444 */
445
446static inline uint32_t CPU_swap_u32(
447  uint32_t value
448)
449{
450  uint32_t   byte1, byte2, byte3, byte4, swapped;
451
452  byte4 = (value >> 24) & 0xff;
453  byte3 = (value >> 16) & 0xff;
454  byte2 = (value >> 8)  & 0xff;
455  byte1 =  value        & 0xff;
456
457  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
458  return ( swapped );
459}
460
461#define CPU_swap_u16( value ) \
462  (((value&0xff) << 8) | ((value >> 8)&0xff))
463
464static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
465{
466  /* TODO */
467}
468
469static inline void _CPU_Context_validate( uintptr_t pattern )
470{
471  while (1) {
472    /* TODO */
473  }
474}
475
476typedef uint32_t CPU_Counter_ticks;
477
478uint32_t _CPU_Counter_frequency( void );
479
480CPU_Counter_ticks _CPU_Counter_read( void );
481
482#ifdef RTEMS_SMP
483/**
484 * @brief Performs CPU specific SMP initialization in the context of the boot
485 * processor.
486 *
487 * This function is invoked on the boot processor during system
488 * initialization.  All interrupt stacks are allocated at this point in case
489 * the CPU port allocates the interrupt stacks.  This function is called
490 * before _CPU_SMP_Start_processor() or _CPU_SMP_Finalize_initialization() is
491 * used.
492 *
493 * @return The count of physically or virtually available processors.
494 * Depending on the configuration the application may use not all processors.
495 */
496uint32_t _CPU_SMP_Initialize( void );
497
498/**
499 * @brief Starts a processor specified by its index.
500 *
501 * This function is invoked on the boot processor during system
502 * initialization.
503 *
504 * This function will be called after _CPU_SMP_Initialize().
505 *
506 * @param[in] cpu_index The processor index.
507 *
508 * @retval true Successful operation.
509 * @retval false Unable to start this processor.
510 */
511bool _CPU_SMP_Start_processor( uint32_t cpu_index );
512
513/**
514 * @brief Performs final steps of CPU specific SMP initialization in the
515 * context of the boot processor.
516 *
517 * This function is invoked on the boot processor during system
518 * initialization.
519 *
520 * This function will be called after all processors requested by the
521 * application have been started.
522 *
523 * @param[in] cpu_count The minimum value of the count of processors
524 * requested by the application configuration and the count of physically or
525 * virtually available processors.
526 */
527void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
528
529/**
530 * @brief Returns the index of the current processor.
531 *
532 * An architecture specific method must be used to obtain the index of the
533 * current processor in the system.  The set of processor indices is the
534 * range of integers starting with zero up to the processor count minus one.
535 */
536uint32_t _CPU_SMP_Get_current_processor( void );
537
538/**
539 * @brief Sends an inter-processor interrupt to the specified target
540 * processor.
541 *
542 * This operation is undefined for target processor indices out of range.
543 *
544 * @param[in] target_processor_index The target processor index.
545 */
546void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
547
548/**
549 * @brief Broadcasts a processor event.
550 *
551 * Some architectures provide a low-level synchronization primitive for
552 * processors in a multi-processor environment.  Processors waiting for this
553 * event may go into a low-power state and stop generating system bus
554 * transactions.  This function must ensure that preceding store operations
555 * can be observed by other processors.
556 *
557 * @see _CPU_SMP_Processor_event_receive().
558 */
559void _CPU_SMP_Processor_event_broadcast( void );
560
561/**
562 * @brief Receives a processor event.
563 *
564 * This function will wait for the processor event and may wait forever if no
565 * such event arrives.
566 *
567 * @see _CPU_SMP_Processor_event_broadcast().
568 */
569static inline void _CPU_SMP_Processor_event_receive( void )
570{
571  __asm__ volatile ( "" : : : "memory" );
572}
573
574/**
575 * @brief Gets the is executing indicator of the thread context.
576 *
577 * @param[in] context The context.
578 */
579static inline bool _CPU_Context_Get_is_executing(
580  const Context_Control *context
581)
582{
583  return context->is_executing;
584}
585
586/**
587 * @brief Sets the is executing indicator of the thread context.
588 *
589 * @param[in] context The context.
590 * @param[in] is_executing The new value for the is executing indicator.
591 */
592static inline void _CPU_Context_Set_is_executing(
593  Context_Control *context,
594  bool is_executing
595)
596{
597  context->is_executing = is_executing;
598}
599#endif /* RTEMS_SMP */
600
601/** Type that can store a 32-bit integer or a pointer. */
602typedef uintptr_t CPU_Uint32ptr;
603
604#endif /* ASM */
605
606#ifdef __cplusplus
607}
608#endif
609
610#endif
Note: See TracBrowser for help on using the repository browser.