source: rtems/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h @ bf39a9e

5
Last change on this file since bf39a9e was bf39a9e, checked in by Sebastian Huber <sebastian.huber@…>, on 12/06/19 at 19:24:37

score: Remove superfluous FP types/defines

Update #3835.

  • Property mode set to 100644
File size: 9.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief x86_64 Dependent Source
5 *
6 * This include file contains information pertaining to the x86_64 processor.
7 */
8
9/*
10 * Copyright (c) 2018.
11 * Amaan Cheval <amaan.cheval@gmail.com>
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _RTEMS_SCORE_CPU_H
36#define _RTEMS_SCORE_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/cpu_asm.h>
44#include <rtems/score/x86_64.h>
45
46#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
47#define CPU_ISR_PASSES_FRAME_POINTER FALSE
48#define CPU_HARDWARE_FP FALSE
49#define CPU_SOFTWARE_FP FALSE
50#define CPU_ALL_TASKS_ARE_FP FALSE
51#define CPU_IDLE_TASK_IS_FP FALSE
52#define CPU_USE_DEFERRED_FP_SWITCH FALSE
53#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
54#define CPU_STACK_GROWS_UP               FALSE
55
56#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED(64)
57#define CPU_CACHE_LINE_BYTES 64
58#define CPU_MODES_INTERRUPT_MASK   0x00000001
59#define CPU_MAXIMUM_PROCESSORS 32
60
61#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
62#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
63
64#ifndef ASM
65
66typedef struct {
67  uint64_t rflags;
68
69  /**
70   * Callee-saved registers as listed in the SysV ABI document:
71   * https://github.com/hjl-tools/x86-psABI/wiki/X86-psABI
72   */
73  uint64_t rbx;
74  void    *rsp;
75  void    *rbp;
76  uint64_t r12;
77  uint64_t r13;
78  uint64_t r14;
79  uint64_t r15;
80
81  // XXX: FS segment descriptor for TLS
82
83#ifdef RTEMS_SMP
84    volatile bool is_executing;
85#endif
86} Context_Control;
87
88#define _CPU_Context_Get_SP( _context ) \
89  (_context)->rsp
90
91/*
92 * Caller-saved registers for interrupt frames
93 */
94typedef struct {
95  /**
96   * @note: rdi is a caller-saved register too, but it's used in function calls
97   * and is hence saved separately on the stack;
98   *
99   * @see DISTINCT_INTERRUPT_ENTRY
100   * @see _ISR_Handler
101   */
102
103  uint64_t rax;
104  uint64_t rcx;
105  uint64_t rdx;
106  uint64_t rsi;
107  uint64_t r8;
108  uint64_t r9;
109  uint64_t r10;
110  uint64_t r11;
111
112  /*
113   * This holds the rsp just before _ISR_Handler is called; it's needed because
114   * in the handler, we align the stack to make further calls, and we're not
115   * sure how alignment may move the stack-pointer around, leaving no way to get
116   * back to the stack, and therefore the interrupt frame.
117   */
118  uint64_t saved_rsp;
119
120  /* XXX:
121   * - FS segment selector for TLS
122   * - x87 status word?
123   * - MMX?
124   * - XMM?
125   */
126} CPU_Interrupt_frame;
127
128#endif /* !ASM */
129
130#define CPU_INTERRUPT_FRAME_SIZE 72
131
132/*
133 * When SMP is enabled, percpuasm.c has a similar assert, but since we use the
134 * interrupt frame regardless of SMP, we'll confirm it here.
135 */
136#ifndef ASM
137  RTEMS_STATIC_ASSERT(
138    sizeof(CPU_Interrupt_frame) == CPU_INTERRUPT_FRAME_SIZE,
139    CPU_INTERRUPT_FRAME_SIZE
140  );
141#endif
142
143#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
144#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
145#define CPU_STACK_MINIMUM_SIZE          (1024*4)
146#define CPU_SIZEOF_POINTER         8
147#define CPU_ALIGNMENT              8
148#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
149#define CPU_STACK_ALIGNMENT        16
150#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
151
152/*
153 *  ISR handler macros
154 */
155
156#ifndef ASM
157
158#define _CPU_Initialize_vectors()
159
160#define _CPU_ISR_Enable(_level)                             \
161{                                                           \
162  amd64_enable_interrupts();                                \
163  _level = 0;                                               \
164  (void) _level; /* Prevent -Wunused-but-set-variable */    \
165}
166
167#define _CPU_ISR_Disable(_level)                            \
168{                                                           \
169  amd64_enable_interrupts();                                \
170  _level = 1;                                               \
171  (void) _level; /* Prevent -Wunused-but-set-variable */    \
172}
173
174#define _CPU_ISR_Flash(_level)                              \
175{                                                           \
176  amd64_enable_interrupts();                                \
177  amd64_disable_interrupts();                               \
178  _level = 1;                                               \
179  (void) _level; /* Prevent -Wunused-but-set-variable */    \
180}
181
182RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled(uint32_t level)
183{
184  return (level & EFLAGS_INTR_ENABLE) != 0;
185}
186
187RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level)
188{
189  if ( new_level ) {
190    amd64_disable_interrupts();
191  }
192  else {
193    amd64_enable_interrupts();
194  }
195}
196
197RTEMS_INLINE_ROUTINE uint32_t _CPU_ISR_Get_level(void)
198{
199  uint64_t rflags;
200
201  __asm__ volatile ( "pushf; \
202                      popq %0"
203                     : "=rm" (rflags)
204  );
205
206  uint32_t level = (rflags & EFLAGS_INTR_ENABLE) ? 0 : 1;
207  return level;
208}
209
210/* end of ISR handler macros */
211
212/* Context handler macros */
213#define _CPU_Context_Destroy( _the_thread, _the_context ) \
214  { \
215  }
216
217void _CPU_Context_Initialize(
218  Context_Control *the_context,
219  void *stack_area_begin,
220  size_t stack_area_size,
221  uint32_t new_level,
222  void (*entry_point)( void ),
223  bool is_fp,
224  void *tls_area
225);
226
227#define _CPU_Context_Restart_self( _the_context ) \
228   _CPU_Context_restore( (_the_context) );
229
230#define _CPU_Context_Initialize_fp( _destination )  \
231  { \
232   *(*(_destination)) = _CPU_Null_fp_context; \
233  }
234
235/* end of Context handler macros */
236
237/* Fatal Error manager macros */
238
239#define _CPU_Fatal_halt( _source, _error ) \
240  { \
241  }
242
243/* end of Fatal Error manager macros */
244
245/* Bitfield handler macros */
246
247#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
248
249#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
250#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
251  { \
252    (_output) = 0;   /* do something to prevent warnings */ \
253  }
254#endif
255
256/* end of Bitfield handler macros */
257
258#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
259#define _CPU_Priority_Mask( _bit_number ) \
260  ( 1 << (_bit_number) )
261#endif
262
263#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
264#define _CPU_Priority_bits_index( _priority ) \
265  (_priority)
266#endif
267
268/* end of Priority handler macros */
269
270/* functions */
271
272void _CPU_Initialize(void);
273
274void *_CPU_Thread_Idle_body( uintptr_t ignored );
275
276void _CPU_Context_switch(
277  Context_Control  *run,
278  Context_Control  *heir
279);
280
281void _CPU_Context_restore(
282  Context_Control *new_context
283) RTEMS_NO_RETURN;
284
285typedef struct {
286  uint32_t processor_state_register;
287  uint32_t integer_registers [1];
288  double float_registers [1];
289} CPU_Exception_frame;
290
291void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
292
293static inline uint32_t CPU_swap_u32(
294  uint32_t value
295)
296{
297  uint32_t byte1, byte2, byte3, byte4, swapped;
298
299  byte4 = (value >> 24) & 0xff;
300  byte3 = (value >> 16) & 0xff;
301  byte2 = (value >> 8)  & 0xff;
302  byte1 =  value        & 0xff;
303
304  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
305  return swapped;
306}
307
308#define CPU_swap_u16( value ) \
309  (((value&0xff) << 8) | ((value >> 8)&0xff))
310
311typedef uint32_t CPU_Counter_ticks;
312
313uint32_t _CPU_Counter_frequency( void );
314
315CPU_Counter_ticks _CPU_Counter_read( void );
316
317
318static inline CPU_Counter_ticks _CPU_Counter_difference(
319  CPU_Counter_ticks second,
320  CPU_Counter_ticks first
321)
322{
323  return second - first;
324}
325
326#ifdef RTEMS_SMP
327   *
328  uint32_t _CPU_SMP_Initialize( void );
329
330  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
331
332  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
333
334  void _CPU_SMP_Prepare_start_multitasking( void );
335
336  static inline uint32_t _CPU_SMP_Get_current_processor( void )
337  {
338    return 123;
339  }
340
341  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
342
343  static inline void _CPU_SMP_Processor_event_broadcast( void )
344  {
345    __asm__ volatile ( "" : : : "memory" );
346  }
347
348  static inline void _CPU_SMP_Processor_event_receive( void )
349  {
350    __asm__ volatile ( "" : : : "memory" );
351  }
352
353  static inline bool _CPU_Context_Get_is_executing(
354    const Context_Control *context
355  )
356    return context->is_executing;
357  }
358
359  static inline void _CPU_Context_Set_is_executing(
360    Context_Control *context,
361    bool is_executing
362  )
363  {
364  }
365
366#endif /* RTEMS_SMP */
367
368typedef uintptr_t CPU_Uint32ptr;
369
370#ifdef __cplusplus
371}
372#endif
373
374#endif /* ASM */
375
376#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.