source: rtems/cpukit/score/cpu/i386/include/rtems/score/cpu.h @ 8776bb9

5
Last change on this file since 8776bb9 was 8776bb9, checked in by Sebastian Huber <sebastian.huber@…>, on 09/26/18 at 04:34:54

score: Remove CPU_PROVIDES_IDLE_THREAD_BODY

Remove the CPU_PROVIDES_IDLE_THREAD_BODY option to avoid unnecessary
conditional compilation.

Close #3539.

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/basedefs.h>
31#if defined(RTEMS_PARAVIRT)
32#include <rtems/score/paravirt.h>
33#endif
34#include <rtems/score/i386.h>
35
36/* conditional compilation parameters */
37
38/*
39 *  Does the CPU follow the simple vectored interrupt model?
40 *
41 *  If TRUE, then RTEMS allocates the vector table it internally manages.
42 *  If FALSE, then the BSP is assumed to allocate and manage the vector
43 *  table
44 *
45 *  PowerPC Specific Information:
46 *
47 *  The PowerPC and x86 were the first to use the PIC interrupt model.
48 *  They do not use the simple vectored interrupt model.
49 */
50#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
51
52/*
53 *  Does the RTEMS invoke the user's ISR with the vector number and
54 *  a pointer to the saved interrupt frame (1) or just the vector
55 *  number (0)?
56 */
57
58#define CPU_ISR_PASSES_FRAME_POINTER FALSE
59
60/*
61 *  Some family members have no FP, some have an FPU such as the i387
62 *  for the i386, others have it built in (i486DX, Pentium).
63 */
64
65#ifdef __SSE__
66#define CPU_HARDWARE_FP                  TRUE
67#define CPU_SOFTWARE_FP                  FALSE
68
69#define CPU_ALL_TASKS_ARE_FP             TRUE
70#define CPU_IDLE_TASK_IS_FP              TRUE
71#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
72#else /* __SSE__ */
73
74#if ( I386_HAS_FPU == 1 )
75#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
76#else
77#define CPU_HARDWARE_FP     FALSE
78#endif
79#define CPU_SOFTWARE_FP     FALSE
80
81#define CPU_ALL_TASKS_ARE_FP             FALSE
82#define CPU_IDLE_TASK_IS_FP              FALSE
83#if defined(RTEMS_SMP)
84  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
85#else
86  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
87#endif
88#endif /* __SSE__ */
89
90#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
91
92#define CPU_STACK_GROWS_UP               FALSE
93
94/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
95#define CPU_CACHE_LINE_BYTES 64
96
97#define CPU_STRUCTURE_ALIGNMENT
98
99#define CPU_MAXIMUM_PROCESSORS 32
100
101#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
102#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
103#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
104#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
105#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
106#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
107#define I386_CONTEXT_CONTROL_GS_0_OFFSET 24
108#define I386_CONTEXT_CONTROL_GS_1_OFFSET 28
109
110#ifdef RTEMS_SMP
111  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 32
112#endif
113
114/* structures */
115
116#ifndef ASM
117
118/*
119 *  Basic integer context for the i386 family.
120 */
121
122typedef struct {
123  uint32_t    eflags;     /* extended flags register                   */
124  void       *esp;        /* extended stack pointer register           */
125  void       *ebp;        /* extended base pointer register            */
126  uint32_t    ebx;        /* extended bx register                      */
127  uint32_t    esi;        /* extended source index register            */
128  uint32_t    edi;        /* extended destination index flags register */
129  segment_descriptors gs; /* gs segment descriptor                     */
130#ifdef RTEMS_SMP
131  volatile bool is_executing;
132#endif
133}   Context_Control;
134
135#define _CPU_Context_Get_SP( _context ) \
136  (_context)->esp
137
138#ifdef RTEMS_SMP
139  static inline bool _CPU_Context_Get_is_executing(
140    const Context_Control *context
141  )
142  {
143    return context->is_executing;
144  }
145
146  static inline void _CPU_Context_Set_is_executing(
147    Context_Control *context,
148    bool is_executing
149  )
150  {
151    context->is_executing = is_executing;
152  }
153#endif
154
155/*
156 *  FP context save area for the i387 numeric coprocessors.
157 */
158#ifdef __SSE__
159/* All FPU and SSE registers are volatile; hence, as long
160 * as we are within normally executing C code (including
161 * a task switch) there is no need for saving/restoring
162 * any of those registers.
163 * We must save/restore the full FPU/SSE context across
164 * interrupts and exceptions, however:
165 *   -  after ISR execution a _Thread_Dispatch() may happen
166 *      and it is therefore necessary to save the FPU/SSE
167 *      registers to be restored when control is returned
168 *      to the interrupted task.
169 *   -  gcc may implicitly use FPU/SSE instructions in
170 *      an ISR.
171 *
172 * Even though there is no explicit mentioning of the FPU
173 * control word in the SYSV ABI (i386) being non-volatile
174 * we maintain MXCSR and the FPU control-word for each task.
175 */
176typedef struct {
177        uint32_t  mxcsr;
178        uint16_t  fpucw;
179} Context_Control_fp;
180
181#else
182
183typedef struct {
184  uint8_t     fp_save_area[108];    /* context size area for I80387 */
185                                    /*  28 bytes for environment    */
186} Context_Control_fp;
187
188#endif
189
190
191/*
192 *  The following structure defines the set of information saved
193 *  on the current stack by RTEMS upon receipt of execptions.
194 *
195 * idtIndex is either the interrupt number or the trap/exception number.
196 * faultCode is the code pushed by the processor on some exceptions.
197 *
198 * Since the first registers are directly pushed by the CPU they
199 * may not respect 16-byte stack alignment, which is, however,
200 * mandatory for the SSE register area.
201 * Therefore, these registers are stored at an aligned address
202 * and a pointer is stored in the CPU_Exception_frame.
203 * If the executive was compiled without SSE support then
204 * this pointer is NULL.
205 */
206
207struct Context_Control_sse;
208
209typedef struct {
210  struct Context_Control_sse *fp_ctxt;
211  uint32_t    edi;
212  uint32_t    esi;
213  uint32_t    ebp;
214  uint32_t    esp0;
215  uint32_t    ebx;
216  uint32_t    edx;
217  uint32_t    ecx;
218  uint32_t    eax;
219  uint32_t    idtIndex;
220  uint32_t    faultCode;
221  uint32_t    eip;
222  uint32_t    cs;
223  uint32_t    eflags;
224} CPU_Exception_frame;
225
226#ifdef __SSE__
227typedef struct Context_Control_sse {
228  uint16_t  fcw;
229  uint16_t  fsw;
230  uint8_t   ftw;
231  uint8_t   res_1;
232  uint16_t  fop;
233  uint32_t  fpu_ip;
234  uint16_t  cs;
235  uint16_t  res_2;
236  uint32_t  fpu_dp;
237  uint16_t  ds;
238  uint16_t  res_3;
239  uint32_t  mxcsr;
240  uint32_t  mxcsr_mask;
241  struct {
242        uint8_t fpreg[10];
243        uint8_t res_4[ 6];
244  } fp_mmregs[8];
245  uint8_t   xmmregs[8][16];
246  uint8_t   res_5[224];
247} Context_Control_sse
248__attribute__((aligned(16)))
249;
250#endif
251
252typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
253extern cpuExcHandlerType _currentExcHandler;
254extern void rtems_exception_init_mngt(void);
255
256#ifdef RTEMS_SMP
257  /* Throw compile-time error to indicate incomplete support */
258  #error "i386 targets do not support SMP.\
259 See: https://devel.rtems.org/ticket/3335"
260
261  /*
262   * This size must match the size of the CPU_Interrupt_frame, which must be
263   * used in the SMP context switch code, which is incomplete at the moment.
264   */
265  #define CPU_INTERRUPT_FRAME_SIZE 4
266#endif
267
268/*
269 * This port does not pass any frame info to the
270 * interrupt handler.
271 */
272
273typedef struct {
274  uint32_t todo_replace_with_apt_registers;
275} CPU_Interrupt_frame;
276
277typedef enum {
278  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
279  I386_EXCEPTION_DEBUG               = 1,
280  I386_EXCEPTION_NMI                 = 2,
281  I386_EXCEPTION_BREAKPOINT          = 3,
282  I386_EXCEPTION_OVERFLOW            = 4,
283  I386_EXCEPTION_BOUND               = 5,
284  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
285  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
286  I386_EXCEPTION_DOUBLE_FAULT        = 8,
287  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
288  I386_EXCEPTION_INVALID_TSS         = 10,
289  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
290  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
291  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
292  I386_EXCEPTION_PAGE_FAULT          = 14,
293  I386_EXCEPTION_INTEL_RES15         = 15,
294  I386_EXCEPTION_FLOAT_ERROR         = 16,
295  I386_EXCEPTION_ALIGN_CHECK         = 17,
296  I386_EXCEPTION_MACHINE_CHECK       = 18,
297  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
298
299} Intel_symbolic_exception_name;
300
301
302/*
303 *  context size area for floating point
304 *
305 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
306 */
307
308#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
309
310/* variables */
311
312extern Context_Control_fp _CPU_Null_fp_context;
313
314#endif /* ASM */
315
316/* constants */
317
318/*
319 *  This defines the number of levels and the mask used to pick those
320 *  bits out of a thread mode.
321 */
322
323#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
324
325/*
326 *  extra stack required by the MPCI receive server thread
327 */
328
329#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
330
331/*
332 *  This is defined if the port has a special way to report the ISR nesting
333 *  level.  Most ports maintain the variable _ISR_Nest_level.
334 */
335
336#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
337
338/*
339 *  Minimum size of a thread's stack.
340 */
341
342#define CPU_STACK_MINIMUM_SIZE          4096
343
344#define CPU_SIZEOF_POINTER 4
345
346/*
347 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
348 */
349
350#define CPU_ALIGNMENT                    4
351#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
352
353/*
354 *  On i386 thread stacks require no further alignment after allocation
355 *  from the Workspace. However, since gcc maintains 16-byte alignment
356 *  we try to respect that. If you find an option to let gcc squeeze
357 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
358 *  doesn't waste much space since this only determines the *initial*
359 *  alignment.
360 */
361
362#define CPU_STACK_ALIGNMENT             16
363
364#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
365
366/* macros */
367
368#ifndef ASM
369/*
370 *  ISR handler macros
371 *
372 *  These macros perform the following functions:
373 *     + initialize the RTEMS vector table
374 *     + disable all maskable CPU interrupts
375 *     + restore previous interrupt level (enable)
376 *     + temporarily restore interrupts (flash)
377 *     + set a particular level
378 */
379
380#if !defined(I386_DISABLE_INLINE_ISR_DISABLE_ENABLE)
381#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
382
383#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
384
385#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
386
387#define _CPU_ISR_Set_level( _new_level ) \
388  { \
389    if ( _new_level ) __asm__ volatile ( "cli" ); \
390    else              __asm__ volatile ( "sti" ); \
391  }
392#else
393#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
394#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
395#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
396#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
397#endif
398
399RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
400{
401  return ( level & EFLAGS_INTR_ENABLE ) != 0;
402}
403
404uint32_t   _CPU_ISR_Get_level( void );
405
406/*  Make sure interrupt stack has space for ISR
407 *  'vector' arg at the top and that it is aligned
408 *  properly.
409 */
410
411#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
412        do {                                        \
413                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
414        } while (0)
415
416#endif /* ASM */
417
418/* end of ISR handler macros */
419
420/*
421 *  Context handler macros
422 *
423 *  These macros perform the following functions:
424 *     + initialize a context area
425 *     + restart the current thread
426 *     + calculate the initial pointer into a FP context area
427 *     + initialize an FP context area
428 */
429
430#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
431#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
432
433#ifndef ASM
434
435void _CPU_Context_Initialize(
436  Context_Control *the_context,
437  void *stack_area_begin,
438  size_t stack_area_size,
439  uint32_t new_level,
440  void (*entry_point)( void ),
441  bool is_fp,
442  void *tls_area
443);
444
445#define _CPU_Context_Restart_self( _the_context ) \
446   _CPU_Context_restore( (_the_context) );
447
448#if defined(RTEMS_SMP)
449  uint32_t _CPU_SMP_Initialize( void );
450
451  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
452
453  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
454
455  void _CPU_SMP_Prepare_start_multitasking( void );
456
457  uint32_t _CPU_SMP_Get_current_processor( void );
458
459  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
460
461  static inline void _CPU_SMP_Processor_event_broadcast( void )
462  {
463    __asm__ volatile ( "" : : : "memory" );
464  }
465
466  static inline void _CPU_SMP_Processor_event_receive( void )
467  {
468    __asm__ volatile ( "" : : : "memory" );
469  }
470#endif
471
472#define _CPU_Context_Initialize_fp( _fp_area ) \
473  { \
474    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
475  }
476
477/* end of Context handler macros */
478
479/*
480 *  Fatal Error manager macros
481 *
482 *  These macros perform the following functions:
483 *    + disable interrupts and halt the CPU
484 */
485
486extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
487  RTEMS_NO_RETURN;
488
489#endif /* ASM */
490
491/* end of Fatal Error manager macros */
492
493/*
494 *  Bitfield handler macros
495 *
496 *  These macros perform the following functions:
497 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
498 */
499
500#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
501
502#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
503  { \
504    uint16_t __value_in_register = ( _value ); \
505    uint16_t __output = 0; \
506    __asm__ volatile ( "bsfw    %0,%1 " \
507                    : "=r" ( __value_in_register ), "=r" ( __output ) \
508                    : "0"  ( __value_in_register ), "1"  ( __output ) \
509    ); \
510    ( _output ) = __output; \
511  }
512
513/* end of Bitfield handler macros */
514
515/*
516 *  Priority handler macros
517 *
518 *  These macros perform the following functions:
519 *    + return a mask with the bit for this major/minor portion of
520 *      of thread priority set.
521 *    + translate the bit number returned by "Bitfield_find_first_bit"
522 *      into an index into the thread ready chain bit maps
523 */
524
525#define _CPU_Priority_Mask( _bit_number ) \
526  ( 1 << (_bit_number) )
527
528#define _CPU_Priority_bits_index( _priority ) \
529  (_priority)
530
531/* functions */
532
533#ifndef ASM
534/*
535 *  _CPU_Initialize
536 *
537 *  This routine performs CPU dependent initialization.
538 */
539
540void _CPU_Initialize(void);
541
542/*
543 *  _CPU_ISR_install_raw_handler
544 *
545 *  This routine installs a "raw" interrupt handler directly into the
546 *  processor's vector table.
547 */
548
549void _CPU_ISR_install_raw_handler(
550  uint32_t    vector,
551  proc_ptr    new_handler,
552  proc_ptr   *old_handler
553);
554
555/*
556 *  _CPU_ISR_install_vector
557 *
558 *  This routine installs an interrupt vector.
559 */
560
561void _CPU_ISR_install_vector(
562  uint32_t    vector,
563  proc_ptr    new_handler,
564  proc_ptr   *old_handler
565);
566
567void *_CPU_Thread_Idle_body( uintptr_t ignored );
568
569/*
570 *  _CPU_Context_switch
571 *
572 *  This routine switches from the run context to the heir context.
573 */
574
575void _CPU_Context_switch(
576  Context_Control  *run,
577  Context_Control  *heir
578);
579
580/*
581 *  _CPU_Context_restore
582 *
583 *  This routine is generally used only to restart self in an
584 *  efficient manner and avoid stack conflicts.
585 */
586
587void _CPU_Context_restore(
588  Context_Control *new_context
589) RTEMS_NO_RETURN;
590
591/*
592 *  _CPU_Context_save_fp
593 *
594 *  This routine saves the floating point context passed to it.
595 */
596
597#ifdef __SSE__
598#define _CPU_Context_save_fp(fp_context_pp) \
599  do {                                      \
600    __asm__ __volatile__(                   \
601      "fstcw %0"                            \
602      :"=m"((*(fp_context_pp))->fpucw)      \
603    );                                      \
604        __asm__ __volatile__(                   \
605      "stmxcsr %0"                          \
606      :"=m"((*(fp_context_pp))->mxcsr)      \
607    );                                      \
608  } while (0)
609#else
610void _CPU_Context_save_fp(
611  Context_Control_fp **fp_context_ptr
612);
613#endif
614
615/*
616 *  _CPU_Context_restore_fp
617 *
618 *  This routine restores the floating point context passed to it.
619 */
620#ifdef __SSE__
621#define _CPU_Context_restore_fp(fp_context_pp) \
622  do {                                         \
623    __asm__ __volatile__(                      \
624      "fldcw %0"                               \
625      ::"m"((*(fp_context_pp))->fpucw)         \
626      :"fpcr"                                  \
627    );                                         \
628    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
629  } while (0)
630#else
631void _CPU_Context_restore_fp(
632  Context_Control_fp **fp_context_ptr
633);
634#endif
635
636#ifdef __SSE__
637#define _CPU_Context_Initialization_at_thread_begin() \
638  do {                                                \
639    __asm__ __volatile__(                             \
640      "finit"                                         \
641      :                                               \
642      :                                               \
643      :"st","st(1)","st(2)","st(3)",                  \
644       "st(4)","st(5)","st(6)","st(7)",               \
645       "fpsr","fpcr"                                  \
646    );                                                \
647        if ( _Thread_Executing->fp_context ) {            \
648          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
649   }                                                  \
650  } while (0)
651#endif
652
653void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
654
655typedef uint32_t CPU_Counter_ticks;
656
657uint32_t _CPU_Counter_frequency( void );
658
659CPU_Counter_ticks _CPU_Counter_read( void );
660
661static inline CPU_Counter_ticks _CPU_Counter_difference(
662  CPU_Counter_ticks second,
663  CPU_Counter_ticks first
664)
665{
666  return second - first;
667}
668
669/** Type that can store a 32-bit integer or a pointer. */
670typedef uintptr_t CPU_Uint32ptr;
671
672#endif /* ASM */
673
674#ifdef __cplusplus
675}
676#endif
677
678#endif
Note: See TracBrowser for help on using the repository browser.