source: rtems/cpukit/score/cpu/riscv/include/rtems/score/cpu.h @ afb60eb

5
Last change on this file since afb60eb was afb60eb, checked in by Sebastian Huber <sebastian.huber@…>, on 06/27/18 at 12:46:06

riscv: Remove dead code

Update #3433.

  • Property mode set to 100644
File size: 10.6 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * Copyright (c) 2018 embedded brains GmbH
7 *
8 * Copyright (c) 2015 University of York.
9 * Hesham Almatary <hesham@alumni.york.ac.uk>
10 *
11 * COPYRIGHT (c) 1989-1999.
12 * On-Line Applications Research Corporation (OAR).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#ifndef _RISCV_CPU_H
37#define _RISCV_CPU_H
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#include <rtems/score/basedefs.h>
44#include <rtems/score/riscv.h>
45
46#define RISCV_MSTATUS_MIE 0x8
47
48#define CPU_INLINE_ENABLE_DISPATCH       FALSE
49#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
50
51#define CPU_ISR_PASSES_FRAME_POINTER FALSE
52
53#define CPU_HARDWARE_FP                  FALSE
54#define CPU_SOFTWARE_FP                  FALSE
55#define CPU_ALL_TASKS_ARE_FP             FALSE
56#define CPU_IDLE_TASK_IS_FP              FALSE
57#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
58#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
59#define CPU_STACK_GROWS_UP               FALSE
60
61#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
62#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
63#define CPU_BIG_ENDIAN                           FALSE
64#define CPU_LITTLE_ENDIAN                        TRUE
65#define CPU_MODES_INTERRUPT_MASK   0x0000000000000001
66
67#define CPU_CONTEXT_FP_SIZE  0
68
69#define CPU_PER_CPU_CONTROL_SIZE 0
70
71#define CPU_CACHE_LINE_BYTES 64
72
73#if __riscv_xlen == 32
74
75#define CPU_SIZEOF_POINTER 4
76
77#define CPU_STACK_MINIMUM_SIZE 4096
78
79#elif __riscv_xlen == 64
80
81#define CPU_SIZEOF_POINTER 8
82
83#define CPU_STACK_MINIMUM_SIZE 8192
84
85#endif /* __riscv_xlen */
86
87#define CPU_ALIGNMENT 8
88
89#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
90
91#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
92
93#define CPU_STACK_ALIGNMENT 16
94
95#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
96
97/*
98 *  Processor defined structures required for cpukit/score.
99 */
100
101#ifndef ASM
102
103typedef struct {
104#ifdef RTEMS_SMP
105  volatile uint32_t is_executing;
106#else
107  uint32_t reserved;
108#endif
109  uint32_t isr_dispatch_disable;
110  uintptr_t ra;
111  uintptr_t sp;
112  uintptr_t tp;
113  uintptr_t s0;
114  uintptr_t s1;
115  uintptr_t s2;
116  uintptr_t s3;
117  uintptr_t s4;
118  uintptr_t s5;
119  uintptr_t s6;
120  uintptr_t s7;
121  uintptr_t s8;
122  uintptr_t s9;
123  uintptr_t s10;
124  uintptr_t s11;
125} Context_Control;
126
127#define _CPU_Context_Get_SP( _context ) \
128  (_context)->sp
129
130typedef struct {
131  /** TODO FPU registers are listed here */
132  double  some_float_register;
133} Context_Control_fp;
134
135Context_Control_fp  _CPU_Null_fp_context;
136
137#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
138
139#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
140
141#define _CPU_Initialize_vectors()
142
143static inline uint32_t riscv_interrupt_disable( void )
144{
145  unsigned long mstatus;
146
147  __asm__ volatile (
148    "csrrc %0, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE ) :
149      "=&r" ( mstatus )
150  );
151
152  return mstatus & RISCV_MSTATUS_MIE;
153}
154
155static inline void riscv_interrupt_enable( uint32_t level )
156{
157  __asm__ volatile ( "csrrs zero, mstatus, %0" : : "r" ( level ) );
158}
159
160#define _CPU_ISR_Disable( _level ) \
161    _level = riscv_interrupt_disable()
162
163#define _CPU_ISR_Enable( _level )  \
164  riscv_interrupt_enable( _level )
165
166#define _CPU_ISR_Flash( _level ) \
167  do{ \
168      _CPU_ISR_Enable( _level ); \
169      riscv_interrupt_disable(); \
170    } while(0)
171
172RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( unsigned long level )
173{
174  return ( level & RISCV_MSTATUS_MIE ) != 0;
175}
176
177RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level( uint32_t level )
178{
179  if ( ( level & CPU_MODES_INTERRUPT_MASK) == 0 ) {
180    __asm__ volatile (
181      "csrrs zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
182    );
183  } else {
184    __asm__ volatile (
185      "csrrc zero, mstatus, " RTEMS_XSTRING( RISCV_MSTATUS_MIE )
186    );
187  }
188}
189
190uint32_t _CPU_ISR_Get_level( void );
191
192/* end of ISR handler macros */
193
194void _CPU_Context_Initialize(
195  Context_Control *context,
196  void            *stack_area_begin,
197  size_t           stack_area_size,
198  uint32_t         new_level,
199  void          ( *entry_point )( void ),
200  bool             is_fp,
201  void            *tls_area
202);
203
204#define _CPU_Context_Restart_self( _the_context ) \
205   _CPU_Context_restore( (_the_context) )
206
207
208#define _CPU_Context_Fp_start( _base, _offset ) \
209   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
210
211#define _CPU_Context_Initialize_fp( _destination ) \
212  { \
213   *(*(_destination)) = _CPU_Null_fp_context; \
214  }
215
216extern void _CPU_Fatal_halt(uint32_t source, uint32_t error) RTEMS_NO_RETURN;
217
218#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
219
220#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
221
222#define CPU_MAXIMUM_PROCESSORS 32
223
224typedef struct {
225  /* There is no CPU specific per-CPU state */
226} CPU_Per_CPU_control;
227
228typedef uint16_t Priority_bit_map_Word;
229
230typedef struct {
231  unsigned long x[32];;
232} CPU_Exception_frame;
233
234/**
235 * @brief Prints the exception frame via printk().
236 *
237 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
238 */
239void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
240
241
242/* end of Priority handler macros */
243
244/* functions */
245
246/*
247 *  _CPU_Initialize
248 *
249 *  This routine performs CPU dependent initialization.
250 *
251 */
252
253void _CPU_Initialize(
254  void
255);
256
257/*
258 *  _CPU_ISR_install_raw_handler
259 *
260 *  This routine installs a "raw" interrupt handler directly into the
261 *  processor's vector table.
262 *
263 */
264
265void _CPU_ISR_install_raw_handler(
266  uint32_t    vector,
267  proc_ptr    new_handler,
268  proc_ptr   *old_handler
269);
270
271/*
272 *  _CPU_ISR_install_vector
273 *
274 *  This routine installs an interrupt vector.
275 *
276 *  NO_CPU Specific Information:
277 *
278 *  XXX document implementation including references if appropriate
279 */
280
281void _CPU_ISR_install_vector(
282  unsigned long    vector,
283  proc_ptr   new_handler,
284  proc_ptr   *old_handler
285);
286
287/*
288 *  _CPU_Thread_Idle_body
289 *
290 *  This routine is the CPU dependent IDLE thread body.
291 *
292 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
293 *         is TRUE.
294 *
295 */
296
297void *_CPU_Thread_Idle_body( uintptr_t ignored );
298
299/*
300 *  _CPU_Context_switch
301 *
302 *  This routine switches from the run context to the heir context.
303 *
304 *  RISCV Specific Information:
305 *
306 *  Please see the comments in the .c file for a description of how
307 *  this function works. There are several things to be aware of.
308 */
309
310void _CPU_Context_switch(
311  Context_Control  *run,
312  Context_Control  *heir
313);
314
315/*
316 *  _CPU_Context_restore
317 *
318 *  This routine is generally used only to restart self in an
319 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
320 *
321 *  NOTE: May be unnecessary to reload some registers.
322 *
323 */
324
325void _CPU_Context_restore(
326  Context_Control *new_context
327) RTEMS_NO_RETURN;
328
329/*
330 *  _CPU_Context_save_fp
331 *
332 *  This routine saves the floating point context passed to it.
333 *
334 */
335
336void _CPU_Context_save_fp(
337  void **fp_context_ptr
338);
339
340/*
341 *  _CPU_Context_restore_fp
342 *
343 *  This routine restores the floating point context passed to it.
344 *
345 */
346
347void _CPU_Context_restore_fp(
348  void **fp_context_ptr
349);
350
351/*  The following routine swaps the endian format of an unsigned int.
352 *  It must be static because it is referenced indirectly.
353 *
354 *  This version will work on any processor, but if there is a better
355 *  way for your CPU PLEASE use it.  The most common way to do this is to:
356 *
357 *     swap least significant two bytes with 16-bit rotate
358 *     swap upper and lower 16-bits
359 *     swap most significant two bytes with 16-bit rotate
360 *
361 *  Some CPUs have special instructions which swap a 32-bit quantity in
362 *  a single instruction (e.g. i486).  It is probably best to avoid
363 *  an "endian swapping control bit" in the CPU.  One good reason is
364 *  that interrupts would probably have to be disabled to insure that
365 *  an interrupt does not try to access the same "chunk" with the wrong
366 *  endian.  Another good reason is that on some CPUs, the endian bit
367 *  endianness for ALL fetches -- both code and data -- so the code
368 *  will be fetched incorrectly.
369 *
370 */
371
372static inline uint32_t CPU_swap_u32(
373  uint32_t value
374)
375{
376  uint32_t   byte1, byte2, byte3, byte4, swapped;
377
378  byte4 = (value >> 24) & 0xff;
379  byte3 = (value >> 16) & 0xff;
380  byte2 = (value >> 8)  & 0xff;
381  byte1 =  value        & 0xff;
382
383  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
384  return ( swapped );
385}
386
387#define CPU_swap_u16( value ) \
388  (((value&0xff) << 8) | ((value >> 8)&0xff))
389
390void _CPU_Context_volatile_clobber( uintptr_t pattern );
391
392void _CPU_Context_validate( uintptr_t pattern );
393
394typedef uint32_t CPU_Counter_ticks;
395
396uint32_t _CPU_Counter_frequency( void );
397
398CPU_Counter_ticks _CPU_Counter_read( void );
399
400#ifdef RTEMS_SMP
401
402uint32_t _CPU_SMP_Initialize( void );
403
404bool _CPU_SMP_Start_processor( uint32_t cpu_index );
405
406void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
407
408void _CPU_SMP_Prepare_start_multitasking( void );
409
410static inline uint32_t _CPU_SMP_Get_current_processor( void )
411{
412  unsigned long mhartid;
413
414  __asm__ volatile ( "csrr %0, mhartid" : "=&r" ( mhartid ) );
415
416  return (uint32_t) mhartid;
417}
418
419void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
420
421static inline void _CPU_SMP_Processor_event_broadcast( void )
422{
423  __asm__ volatile ( "" : : : "memory" );
424}
425
426static inline void _CPU_SMP_Processor_event_receive( void )
427{
428  __asm__ volatile ( "" : : : "memory" );
429}
430
431static inline bool _CPU_Context_Get_is_executing(
432  const Context_Control *context
433)
434{
435  return context->is_executing;
436}
437
438static inline void _CPU_Context_Set_is_executing(
439  Context_Control *context,
440  bool is_executing
441)
442{
443  context->is_executing = is_executing;
444}
445
446#endif /* RTEMS_SMP */
447
448/** Type that can store a 32-bit integer or a pointer. */
449typedef uintptr_t CPU_Uint32ptr;
450
451#endif /* ASM */
452
453#ifdef __cplusplus
454}
455#endif
456
457#endif
Note: See TracBrowser for help on using the repository browser.