source: rtems/cpukit/score/cpu/i386/include/rtems/score/cpu.h @ e9fb3133

5
Last change on this file since e9fb3133 was e9fb3133, checked in by Amaan Cheval <amaan.cheval@…>, on 03/13/18 at 16:14:13

i386/smp: Define CPU_Interrupt_frame as non-void struct

This change, excluding the #error directive, lets us make progress towards
compiling i386 targets with --enable-smp.

The #error directive needs to be there since the CPU_Interrupt_frame is used by
the SMP context switching code, and this placeholder struct, if used, would only
lead to more subtle bugs and errors. With the directive, the SMP context
switching code can be improved separately.

Updates #3331

  • Property mode set to 100644
File size: 18.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/basedefs.h>
31#if defined(RTEMS_PARAVIRT)
32#include <rtems/score/paravirt.h>
33#endif
34#include <rtems/score/i386.h>
35
36/* conditional compilation parameters */
37
38/*
39 *  Does the CPU follow the simple vectored interrupt model?
40 *
41 *  If TRUE, then RTEMS allocates the vector table it internally manages.
42 *  If FALSE, then the BSP is assumed to allocate and manage the vector
43 *  table
44 *
45 *  PowerPC Specific Information:
46 *
47 *  The PowerPC and x86 were the first to use the PIC interrupt model.
48 *  They do not use the simple vectored interrupt model.
49 */
50#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
51
52/*
53 *  i386 has an RTEMS allocated and managed interrupt stack.
54 */
55
56#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
57#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
58#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
59
60/*
61 *  Does the RTEMS invoke the user's ISR with the vector number and
62 *  a pointer to the saved interrupt frame (1) or just the vector
63 *  number (0)?
64 */
65
66#define CPU_ISR_PASSES_FRAME_POINTER FALSE
67
68/*
69 *  Some family members have no FP, some have an FPU such as the i387
70 *  for the i386, others have it built in (i486DX, Pentium).
71 */
72
73#ifdef __SSE__
74#define CPU_HARDWARE_FP                  TRUE
75#define CPU_SOFTWARE_FP                  FALSE
76
77#define CPU_ALL_TASKS_ARE_FP             TRUE
78#define CPU_IDLE_TASK_IS_FP              TRUE
79#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
80#else /* __SSE__ */
81
82#if ( I386_HAS_FPU == 1 )
83#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
84#else
85#define CPU_HARDWARE_FP     FALSE
86#endif
87#define CPU_SOFTWARE_FP     FALSE
88
89#define CPU_ALL_TASKS_ARE_FP             FALSE
90#define CPU_IDLE_TASK_IS_FP              FALSE
91#if defined(RTEMS_SMP)
92  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
93#else
94  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
95#endif
96#endif /* __SSE__ */
97
98#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
99
100#define CPU_STACK_GROWS_UP               FALSE
101
102/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
103#define CPU_CACHE_LINE_BYTES 64
104
105#define CPU_STRUCTURE_ALIGNMENT
106
107/*
108 *  Does this port provide a CPU dependent IDLE task implementation?
109 *
110 *  If TRUE, then the routine _CPU_Thread_Idle_body
111 *  must be provided and is the default IDLE thread body instead of
112 *  _CPU_Thread_Idle_body.
113 *
114 *  If FALSE, then use the generic IDLE thread body if the BSP does
115 *  not provide one.
116 */
117
118#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
119
120#define CPU_MAXIMUM_PROCESSORS 32
121
122#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
123#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
124#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
125#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
126#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
127#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
128#define I386_CONTEXT_CONTROL_GS_0_OFFSET 24
129#define I386_CONTEXT_CONTROL_GS_1_OFFSET 28
130
131#ifdef RTEMS_SMP
132  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 32
133#endif
134
135/* structures */
136
137#ifndef ASM
138
139/*
140 *  Basic integer context for the i386 family.
141 */
142
143typedef struct {
144  uint32_t    eflags;     /* extended flags register                   */
145  void       *esp;        /* extended stack pointer register           */
146  void       *ebp;        /* extended base pointer register            */
147  uint32_t    ebx;        /* extended bx register                      */
148  uint32_t    esi;        /* extended source index register            */
149  uint32_t    edi;        /* extended destination index flags register */
150  segment_descriptors gs; /* gs segment descriptor                     */
151#ifdef RTEMS_SMP
152  volatile bool is_executing;
153#endif
154}   Context_Control;
155
156#define _CPU_Context_Get_SP( _context ) \
157  (_context)->esp
158
159#ifdef RTEMS_SMP
160  static inline bool _CPU_Context_Get_is_executing(
161    const Context_Control *context
162  )
163  {
164    return context->is_executing;
165  }
166
167  static inline void _CPU_Context_Set_is_executing(
168    Context_Control *context,
169    bool is_executing
170  )
171  {
172    context->is_executing = is_executing;
173  }
174#endif
175
176/*
177 *  FP context save area for the i387 numeric coprocessors.
178 */
179#ifdef __SSE__
180/* All FPU and SSE registers are volatile; hence, as long
181 * as we are within normally executing C code (including
182 * a task switch) there is no need for saving/restoring
183 * any of those registers.
184 * We must save/restore the full FPU/SSE context across
185 * interrupts and exceptions, however:
186 *   -  after ISR execution a _Thread_Dispatch() may happen
187 *      and it is therefore necessary to save the FPU/SSE
188 *      registers to be restored when control is returned
189 *      to the interrupted task.
190 *   -  gcc may implicitly use FPU/SSE instructions in
191 *      an ISR.
192 *
193 * Even though there is no explicit mentioning of the FPU
194 * control word in the SYSV ABI (i386) being non-volatile
195 * we maintain MXCSR and the FPU control-word for each task.
196 */
197typedef struct {
198        uint32_t  mxcsr;
199        uint16_t  fpucw;
200} Context_Control_fp;
201
202#else
203
204typedef struct {
205  uint8_t     fp_save_area[108];    /* context size area for I80387 */
206                                    /*  28 bytes for environment    */
207} Context_Control_fp;
208
209#endif
210
211
212/*
213 *  The following structure defines the set of information saved
214 *  on the current stack by RTEMS upon receipt of execptions.
215 *
216 * idtIndex is either the interrupt number or the trap/exception number.
217 * faultCode is the code pushed by the processor on some exceptions.
218 *
219 * Since the first registers are directly pushed by the CPU they
220 * may not respect 16-byte stack alignment, which is, however,
221 * mandatory for the SSE register area.
222 * Therefore, these registers are stored at an aligned address
223 * and a pointer is stored in the CPU_Exception_frame.
224 * If the executive was compiled without SSE support then
225 * this pointer is NULL.
226 */
227
228struct Context_Control_sse;
229
230typedef struct {
231  struct Context_Control_sse *fp_ctxt;
232  uint32_t    edi;
233  uint32_t    esi;
234  uint32_t    ebp;
235  uint32_t    esp0;
236  uint32_t    ebx;
237  uint32_t    edx;
238  uint32_t    ecx;
239  uint32_t    eax;
240  uint32_t    idtIndex;
241  uint32_t    faultCode;
242  uint32_t    eip;
243  uint32_t    cs;
244  uint32_t    eflags;
245} CPU_Exception_frame;
246
247#ifdef __SSE__
248typedef struct Context_Control_sse {
249  uint16_t  fcw;
250  uint16_t  fsw;
251  uint8_t   ftw;
252  uint8_t   res_1;
253  uint16_t  fop;
254  uint32_t  fpu_ip;
255  uint16_t  cs;
256  uint16_t  res_2;
257  uint32_t  fpu_dp;
258  uint16_t  ds;
259  uint16_t  res_3;
260  uint32_t  mxcsr;
261  uint32_t  mxcsr_mask;
262  struct {
263        uint8_t fpreg[10];
264        uint8_t res_4[ 6];
265  } fp_mmregs[8];
266  uint8_t   xmmregs[8][16];
267  uint8_t   res_5[224];
268} Context_Control_sse
269__attribute__((aligned(16)))
270;
271#endif
272
273typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
274extern cpuExcHandlerType _currentExcHandler;
275extern void rtems_exception_init_mngt(void);
276
277#ifdef RTEMS_SMP
278  /* Throw compile-time error to indicate incomplete support */
279  #error "i386 targets do not support SMP.\
280 See: https://devel.rtems.org/ticket/3335"
281
282  /*
283   * This size must match the size of the CPU_Interrupt_frame, which must be
284   * used in the SMP context switch code, which is incomplete at the moment.
285   */
286  #define CPU_INTERRUPT_FRAME_SIZE 4
287#endif
288
289/*
290 * This port does not pass any frame info to the
291 * interrupt handler.
292 */
293
294typedef struct {
295  uint32_t todo_replace_with_apt_registers;
296} CPU_Interrupt_frame;
297
298typedef enum {
299  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
300  I386_EXCEPTION_DEBUG               = 1,
301  I386_EXCEPTION_NMI                 = 2,
302  I386_EXCEPTION_BREAKPOINT          = 3,
303  I386_EXCEPTION_OVERFLOW            = 4,
304  I386_EXCEPTION_BOUND               = 5,
305  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
306  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
307  I386_EXCEPTION_DOUBLE_FAULT        = 8,
308  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
309  I386_EXCEPTION_INVALID_TSS         = 10,
310  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
311  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
312  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
313  I386_EXCEPTION_PAGE_FAULT          = 14,
314  I386_EXCEPTION_INTEL_RES15         = 15,
315  I386_EXCEPTION_FLOAT_ERROR         = 16,
316  I386_EXCEPTION_ALIGN_CHECK         = 17,
317  I386_EXCEPTION_MACHINE_CHECK       = 18,
318  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
319
320} Intel_symbolic_exception_name;
321
322
323/*
324 *  context size area for floating point
325 *
326 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
327 */
328
329#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
330
331/* variables */
332
333extern Context_Control_fp _CPU_Null_fp_context;
334
335#endif /* ASM */
336
337/* constants */
338
339/*
340 *  This defines the number of levels and the mask used to pick those
341 *  bits out of a thread mode.
342 */
343
344#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
345#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
346
347/*
348 *  extra stack required by the MPCI receive server thread
349 */
350
351#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
352
353/*
354 *  This is defined if the port has a special way to report the ISR nesting
355 *  level.  Most ports maintain the variable _ISR_Nest_level.
356 */
357
358#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
359
360/*
361 *  Minimum size of a thread's stack.
362 */
363
364#define CPU_STACK_MINIMUM_SIZE          4096
365
366#define CPU_SIZEOF_POINTER 4
367
368/*
369 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
370 */
371
372#define CPU_ALIGNMENT                    4
373#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
374#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
375
376/*
377 *  On i386 thread stacks require no further alignment after allocation
378 *  from the Workspace. However, since gcc maintains 16-byte alignment
379 *  we try to respect that. If you find an option to let gcc squeeze
380 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
381 *  doesn't waste much space since this only determines the *initial*
382 *  alignment.
383 */
384
385#define CPU_STACK_ALIGNMENT             16
386
387/* macros */
388
389#ifndef ASM
390/*
391 *  ISR handler macros
392 *
393 *  These macros perform the following functions:
394 *     + initialize the RTEMS vector table
395 *     + disable all maskable CPU interrupts
396 *     + restore previous interrupt level (enable)
397 *     + temporarily restore interrupts (flash)
398 *     + set a particular level
399 */
400
401#if !defined(I386_DISABLE_INLINE_ISR_DISABLE_ENABLE)
402#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
403
404#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
405
406#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
407
408#define _CPU_ISR_Set_level( _new_level ) \
409  { \
410    if ( _new_level ) __asm__ volatile ( "cli" ); \
411    else              __asm__ volatile ( "sti" ); \
412  }
413#else
414#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
415#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
416#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
417#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
418#endif
419
420RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
421{
422  return ( level & EFLAGS_INTR_ENABLE ) != 0;
423}
424
425uint32_t   _CPU_ISR_Get_level( void );
426
427/*  Make sure interrupt stack has space for ISR
428 *  'vector' arg at the top and that it is aligned
429 *  properly.
430 */
431
432#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
433        do {                                        \
434                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
435        } while (0)
436
437#endif /* ASM */
438
439/* end of ISR handler macros */
440
441/*
442 *  Context handler macros
443 *
444 *  These macros perform the following functions:
445 *     + initialize a context area
446 *     + restart the current thread
447 *     + calculate the initial pointer into a FP context area
448 *     + initialize an FP context area
449 */
450
451#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
452#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
453
454#ifndef ASM
455
456void _CPU_Context_Initialize(
457  Context_Control *the_context,
458  void *stack_area_begin,
459  size_t stack_area_size,
460  uint32_t new_level,
461  void (*entry_point)( void ),
462  bool is_fp,
463  void *tls_area
464);
465
466#define _CPU_Context_Restart_self( _the_context ) \
467   _CPU_Context_restore( (_the_context) );
468
469#if defined(RTEMS_SMP)
470  uint32_t _CPU_SMP_Initialize( void );
471
472  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
473
474  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
475
476  /* Nothing to do */
477  #define _CPU_SMP_Prepare_start_multitasking() do { } while ( 0 )
478
479  uint32_t _CPU_SMP_Get_current_processor( void );
480
481  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
482
483  static inline void _CPU_SMP_Processor_event_broadcast( void )
484  {
485    __asm__ volatile ( "" : : : "memory" );
486  }
487
488  static inline void _CPU_SMP_Processor_event_receive( void )
489  {
490    __asm__ volatile ( "" : : : "memory" );
491  }
492#endif
493
494#define _CPU_Context_Initialize_fp( _fp_area ) \
495  { \
496    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
497  }
498
499/* end of Context handler macros */
500
501/*
502 *  Fatal Error manager macros
503 *
504 *  These macros perform the following functions:
505 *    + disable interrupts and halt the CPU
506 */
507
508extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
509  RTEMS_NO_RETURN;
510
511#endif /* ASM */
512
513/* end of Fatal Error manager macros */
514
515/*
516 *  Bitfield handler macros
517 *
518 *  These macros perform the following functions:
519 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
520 */
521
522#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
523
524#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
525  { \
526    register uint16_t __value_in_register = ( _value ); \
527    uint16_t          __output = 0; \
528    __asm__ volatile ( "bsfw    %0,%1 " \
529                    : "=r" ( __value_in_register ), "=r" ( __output ) \
530                    : "0"  ( __value_in_register ), "1"  ( __output ) \
531    ); \
532    ( _output ) = __output; \
533  }
534
535/* end of Bitfield handler macros */
536
537/*
538 *  Priority handler macros
539 *
540 *  These macros perform the following functions:
541 *    + return a mask with the bit for this major/minor portion of
542 *      of thread priority set.
543 *    + translate the bit number returned by "Bitfield_find_first_bit"
544 *      into an index into the thread ready chain bit maps
545 */
546
547#define _CPU_Priority_Mask( _bit_number ) \
548  ( 1 << (_bit_number) )
549
550#define _CPU_Priority_bits_index( _priority ) \
551  (_priority)
552
553/* functions */
554
555#ifndef ASM
556/*
557 *  _CPU_Initialize
558 *
559 *  This routine performs CPU dependent initialization.
560 */
561
562void _CPU_Initialize(void);
563
564/*
565 *  _CPU_ISR_install_raw_handler
566 *
567 *  This routine installs a "raw" interrupt handler directly into the
568 *  processor's vector table.
569 */
570
571void _CPU_ISR_install_raw_handler(
572  uint32_t    vector,
573  proc_ptr    new_handler,
574  proc_ptr   *old_handler
575);
576
577/*
578 *  _CPU_ISR_install_vector
579 *
580 *  This routine installs an interrupt vector.
581 */
582
583void _CPU_ISR_install_vector(
584  uint32_t    vector,
585  proc_ptr    new_handler,
586  proc_ptr   *old_handler
587);
588
589/*
590 *  _CPU_Thread_Idle_body
591 *
592 *  Use the halt instruction of low power mode of a particular i386 model.
593 */
594
595#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
596
597void *_CPU_Thread_Idle_body( uintptr_t ignored );
598
599#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
600
601/*
602 *  _CPU_Context_switch
603 *
604 *  This routine switches from the run context to the heir context.
605 */
606
607void _CPU_Context_switch(
608  Context_Control  *run,
609  Context_Control  *heir
610);
611
612/*
613 *  _CPU_Context_restore
614 *
615 *  This routine is generally used only to restart self in an
616 *  efficient manner and avoid stack conflicts.
617 */
618
619void _CPU_Context_restore(
620  Context_Control *new_context
621) RTEMS_NO_RETURN;
622
623/*
624 *  _CPU_Context_save_fp
625 *
626 *  This routine saves the floating point context passed to it.
627 */
628
629#ifdef __SSE__
630#define _CPU_Context_save_fp(fp_context_pp) \
631  do {                                      \
632    __asm__ __volatile__(                   \
633      "fstcw %0"                            \
634      :"=m"((*(fp_context_pp))->fpucw)      \
635    );                                      \
636        __asm__ __volatile__(                   \
637      "stmxcsr %0"                          \
638      :"=m"((*(fp_context_pp))->mxcsr)      \
639    );                                      \
640  } while (0)
641#else
642void _CPU_Context_save_fp(
643  Context_Control_fp **fp_context_ptr
644);
645#endif
646
647/*
648 *  _CPU_Context_restore_fp
649 *
650 *  This routine restores the floating point context passed to it.
651 */
652#ifdef __SSE__
653#define _CPU_Context_restore_fp(fp_context_pp) \
654  do {                                         \
655    __asm__ __volatile__(                      \
656      "fldcw %0"                               \
657      ::"m"((*(fp_context_pp))->fpucw)         \
658      :"fpcr"                                  \
659    );                                         \
660    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
661  } while (0)
662#else
663void _CPU_Context_restore_fp(
664  Context_Control_fp **fp_context_ptr
665);
666#endif
667
668#ifdef __SSE__
669#define _CPU_Context_Initialization_at_thread_begin() \
670  do {                                                \
671    __asm__ __volatile__(                             \
672      "finit"                                         \
673      :                                               \
674      :                                               \
675      :"st","st(1)","st(2)","st(3)",                  \
676       "st(4)","st(5)","st(6)","st(7)",               \
677       "fpsr","fpcr"                                  \
678    );                                                \
679        if ( _Thread_Executing->fp_context ) {            \
680          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
681   }                                                  \
682  } while (0)
683#endif
684
685static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
686{
687  /* TODO */
688}
689
690static inline void _CPU_Context_validate( uintptr_t pattern )
691{
692  while (1) {
693    /* TODO */
694  }
695}
696
697void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
698
699typedef uint32_t CPU_Counter_ticks;
700
701CPU_Counter_ticks _CPU_Counter_read( void );
702
703static inline CPU_Counter_ticks _CPU_Counter_difference(
704  CPU_Counter_ticks second,
705  CPU_Counter_ticks first
706)
707{
708  return second - first;
709}
710
711/** Type that can store a 32-bit integer or a pointer. */
712typedef uintptr_t CPU_Uint32ptr;
713
714#endif /* ASM */
715
716#ifdef __cplusplus
717}
718#endif
719
720#endif
Note: See TracBrowser for help on using the repository browser.