source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ 7b0c74ff

5
Last change on this file since 7b0c74ff was 7b0c74ff, checked in by Sebastian Huber <sebastian.huber@…>, on 06/09/17 at 13:42:36

i386: Support thread-local storage (TLS)

Update #2468.

  • Property mode set to 100644
File size: 17.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33/* conditional compilation parameters */
34
35/*
36 *  Does the CPU follow the simple vectored interrupt model?
37 *
38 *  If TRUE, then RTEMS allocates the vector table it internally manages.
39 *  If FALSE, then the BSP is assumed to allocate and manage the vector
40 *  table
41 *
42 *  PowerPC Specific Information:
43 *
44 *  The PowerPC and x86 were the first to use the PIC interrupt model.
45 *  They do not use the simple vectored interrupt model.
46 */
47#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
48
49/*
50 *  i386 has an RTEMS allocated and managed interrupt stack.
51 */
52
53#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
54#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
55#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
56
57/*
58 *  Does the RTEMS invoke the user's ISR with the vector number and
59 *  a pointer to the saved interrupt frame (1) or just the vector
60 *  number (0)?
61 */
62
63#define CPU_ISR_PASSES_FRAME_POINTER FALSE
64
65/*
66 *  Some family members have no FP, some have an FPU such as the i387
67 *  for the i386, others have it built in (i486DX, Pentium).
68 */
69
70#ifdef __SSE__
71#define CPU_HARDWARE_FP                  TRUE
72#define CPU_SOFTWARE_FP                  FALSE
73
74#define CPU_ALL_TASKS_ARE_FP             TRUE
75#define CPU_IDLE_TASK_IS_FP              TRUE
76#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
77#else /* __SSE__ */
78
79#if ( I386_HAS_FPU == 1 )
80#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
81#else
82#define CPU_HARDWARE_FP     FALSE
83#endif
84#define CPU_SOFTWARE_FP     FALSE
85
86#define CPU_ALL_TASKS_ARE_FP             FALSE
87#define CPU_IDLE_TASK_IS_FP              FALSE
88#if defined(RTEMS_SMP)
89  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
90#else
91  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
92#endif
93#endif /* __SSE__ */
94
95#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
96
97#define CPU_STACK_GROWS_UP               FALSE
98
99/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
100#define CPU_CACHE_LINE_BYTES 64
101
102#define CPU_STRUCTURE_ALIGNMENT
103
104/*
105 *  Does this port provide a CPU dependent IDLE task implementation?
106 *
107 *  If TRUE, then the routine _CPU_Thread_Idle_body
108 *  must be provided and is the default IDLE thread body instead of
109 *  _CPU_Thread_Idle_body.
110 *
111 *  If FALSE, then use the generic IDLE thread body if the BSP does
112 *  not provide one.
113 */
114
115#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
116
117#define CPU_MAXIMUM_PROCESSORS 32
118
119#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
120#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
121#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
122#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
123#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
124#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
125#define I386_CONTEXT_CONTROL_GS_0_OFFSET 24
126#define I386_CONTEXT_CONTROL_GS_1_OFFSET 28
127
128#ifdef RTEMS_SMP
129  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 32
130#endif
131
132/* structures */
133
134#ifndef ASM
135
136/*
137 *  Basic integer context for the i386 family.
138 */
139
140typedef struct {
141  uint32_t    eflags;     /* extended flags register                   */
142  void       *esp;        /* extended stack pointer register           */
143  void       *ebp;        /* extended base pointer register            */
144  uint32_t    ebx;        /* extended bx register                      */
145  uint32_t    esi;        /* extended source index register            */
146  uint32_t    edi;        /* extended destination index flags register */
147  segment_descriptors gs; /* gs segment descriptor                     */
148#ifdef RTEMS_SMP
149  volatile bool is_executing;
150#endif
151}   Context_Control;
152
153#define _CPU_Context_Get_SP( _context ) \
154  (_context)->esp
155
156#ifdef RTEMS_SMP
157  static inline bool _CPU_Context_Get_is_executing(
158    const Context_Control *context
159  )
160  {
161    return context->is_executing;
162  }
163
164  static inline void _CPU_Context_Set_is_executing(
165    Context_Control *context,
166    bool is_executing
167  )
168  {
169    context->is_executing = is_executing;
170  }
171#endif
172
173/*
174 *  FP context save area for the i387 numeric coprocessors.
175 */
176#ifdef __SSE__
177/* All FPU and SSE registers are volatile; hence, as long
178 * as we are within normally executing C code (including
179 * a task switch) there is no need for saving/restoring
180 * any of those registers.
181 * We must save/restore the full FPU/SSE context across
182 * interrupts and exceptions, however:
183 *   -  after ISR execution a _Thread_Dispatch() may happen
184 *      and it is therefore necessary to save the FPU/SSE
185 *      registers to be restored when control is returned
186 *      to the interrupted task.
187 *   -  gcc may implicitly use FPU/SSE instructions in
188 *      an ISR.
189 *
190 * Even though there is no explicit mentioning of the FPU
191 * control word in the SYSV ABI (i386) being non-volatile
192 * we maintain MXCSR and the FPU control-word for each task.
193 */
194typedef struct {
195        uint32_t  mxcsr;
196        uint16_t  fpucw;
197} Context_Control_fp;
198
199#else
200
201typedef struct {
202  uint8_t     fp_save_area[108];    /* context size area for I80387 */
203                                    /*  28 bytes for environment    */
204} Context_Control_fp;
205
206#endif
207
208
209/*
210 *  The following structure defines the set of information saved
211 *  on the current stack by RTEMS upon receipt of execptions.
212 *
213 * idtIndex is either the interrupt number or the trap/exception number.
214 * faultCode is the code pushed by the processor on some exceptions.
215 *
216 * Since the first registers are directly pushed by the CPU they
217 * may not respect 16-byte stack alignment, which is, however,
218 * mandatory for the SSE register area.
219 * Therefore, these registers are stored at an aligned address
220 * and a pointer is stored in the CPU_Exception_frame.
221 * If the executive was compiled without SSE support then
222 * this pointer is NULL.
223 */
224
225struct Context_Control_sse;
226
227typedef struct {
228  struct Context_Control_sse *fp_ctxt;
229  uint32_t    edi;
230  uint32_t    esi;
231  uint32_t    ebp;
232  uint32_t    esp0;
233  uint32_t    ebx;
234  uint32_t    edx;
235  uint32_t    ecx;
236  uint32_t    eax;
237  uint32_t    idtIndex;
238  uint32_t    faultCode;
239  uint32_t    eip;
240  uint32_t    cs;
241  uint32_t    eflags;
242} CPU_Exception_frame;
243
244#ifdef __SSE__
245typedef struct Context_Control_sse {
246  uint16_t  fcw;
247  uint16_t  fsw;
248  uint8_t   ftw;
249  uint8_t   res_1;
250  uint16_t  fop;
251  uint32_t  fpu_ip;
252  uint16_t  cs;
253  uint16_t  res_2;
254  uint32_t  fpu_dp;
255  uint16_t  ds;
256  uint16_t  res_3;
257  uint32_t  mxcsr;
258  uint32_t  mxcsr_mask;
259  struct {
260        uint8_t fpreg[10];
261        uint8_t res_4[ 6];
262  } fp_mmregs[8];
263  uint8_t   xmmregs[8][16];
264  uint8_t   res_5[224];
265} Context_Control_sse
266__attribute__((aligned(16)))
267;
268#endif
269
270typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
271extern cpuExcHandlerType _currentExcHandler;
272extern void rtems_exception_init_mngt(void);
273
274/*
275 * This port does not pass any frame info to the
276 * interrupt handler.
277 */
278
279typedef void CPU_Interrupt_frame;
280
281typedef enum {
282  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
283  I386_EXCEPTION_DEBUG               = 1,
284  I386_EXCEPTION_NMI                 = 2,
285  I386_EXCEPTION_BREAKPOINT          = 3,
286  I386_EXCEPTION_OVERFLOW            = 4,
287  I386_EXCEPTION_BOUND               = 5,
288  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
289  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
290  I386_EXCEPTION_DOUBLE_FAULT        = 8,
291  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
292  I386_EXCEPTION_INVALID_TSS         = 10,
293  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
294  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
295  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
296  I386_EXCEPTION_PAGE_FAULT          = 14,
297  I386_EXCEPTION_INTEL_RES15         = 15,
298  I386_EXCEPTION_FLOAT_ERROR         = 16,
299  I386_EXCEPTION_ALIGN_CHECK         = 17,
300  I386_EXCEPTION_MACHINE_CHECK       = 18,
301  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
302
303} Intel_symbolic_exception_name;
304
305
306/*
307 *  context size area for floating point
308 *
309 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
310 */
311
312#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
313
314/* variables */
315
316extern Context_Control_fp _CPU_Null_fp_context;
317
318#endif /* ASM */
319
320/* constants */
321
322/*
323 *  This defines the number of levels and the mask used to pick those
324 *  bits out of a thread mode.
325 */
326
327#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
328#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
329
330/*
331 *  extra stack required by the MPCI receive server thread
332 */
333
334#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
335
336/*
337 *  This is defined if the port has a special way to report the ISR nesting
338 *  level.  Most ports maintain the variable _ISR_Nest_level.
339 */
340
341#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
342
343/*
344 *  Minimum size of a thread's stack.
345 */
346
347#define CPU_STACK_MINIMUM_SIZE          4096
348
349#define CPU_SIZEOF_POINTER 4
350
351/*
352 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
353 */
354
355#define CPU_ALIGNMENT                    4
356#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
357#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
358
359/*
360 *  On i386 thread stacks require no further alignment after allocation
361 *  from the Workspace. However, since gcc maintains 16-byte alignment
362 *  we try to respect that. If you find an option to let gcc squeeze
363 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
364 *  doesn't waste much space since this only determines the *initial*
365 *  alignment.
366 */
367
368#define CPU_STACK_ALIGNMENT             16
369
370/* macros */
371
372#ifndef ASM
373/*
374 *  ISR handler macros
375 *
376 *  These macros perform the following functions:
377 *     + initialize the RTEMS vector table
378 *     + disable all maskable CPU interrupts
379 *     + restore previous interrupt level (enable)
380 *     + temporarily restore interrupts (flash)
381 *     + set a particular level
382 */
383
384#if !defined(RTEMS_PARAVIRT)
385#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
386
387#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
388
389#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
390
391#define _CPU_ISR_Set_level( _new_level ) \
392  { \
393    if ( _new_level ) __asm__ volatile ( "cli" ); \
394    else              __asm__ volatile ( "sti" ); \
395  }
396#else
397#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
398#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
399#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
400#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
401#endif
402
403RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
404{
405  return ( level & EFLAGS_INTR_ENABLE ) != 0;
406}
407
408uint32_t   _CPU_ISR_Get_level( void );
409
410/*  Make sure interrupt stack has space for ISR
411 *  'vector' arg at the top and that it is aligned
412 *  properly.
413 */
414
415#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
416        do {                                        \
417                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
418        } while (0)
419
420#endif /* ASM */
421
422/* end of ISR handler macros */
423
424/*
425 *  Context handler macros
426 *
427 *  These macros perform the following functions:
428 *     + initialize a context area
429 *     + restart the current thread
430 *     + calculate the initial pointer into a FP context area
431 *     + initialize an FP context area
432 */
433
434#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
435#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
436
437#ifndef ASM
438
439void _CPU_Context_Initialize(
440  Context_Control *the_context,
441  void *stack_area_begin,
442  size_t stack_area_size,
443  uint32_t new_level,
444  void (*entry_point)( void ),
445  bool is_fp,
446  void *tls_area
447);
448
449#define _CPU_Context_Restart_self( _the_context ) \
450   _CPU_Context_restore( (_the_context) );
451
452#if defined(RTEMS_SMP)
453  uint32_t _CPU_SMP_Initialize( void );
454
455  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
456
457  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
458
459  /* Nothing to do */
460  #define _CPU_SMP_Prepare_start_multitasking() do { } while ( 0 )
461
462  uint32_t _CPU_SMP_Get_current_processor( void );
463
464  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
465
466  static inline void _CPU_SMP_Processor_event_broadcast( void )
467  {
468    __asm__ volatile ( "" : : : "memory" );
469  }
470
471  static inline void _CPU_SMP_Processor_event_receive( void )
472  {
473    __asm__ volatile ( "" : : : "memory" );
474  }
475#endif
476
477#define _CPU_Context_Initialize_fp( _fp_area ) \
478  { \
479    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
480  }
481
482/* end of Context handler macros */
483
484/*
485 *  Fatal Error manager macros
486 *
487 *  These macros perform the following functions:
488 *    + disable interrupts and halt the CPU
489 */
490
491extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
492  RTEMS_NO_RETURN;
493
494#endif /* ASM */
495
496/* end of Fatal Error manager macros */
497
498/*
499 *  Bitfield handler macros
500 *
501 *  These macros perform the following functions:
502 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
503 */
504
505#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
506
507#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
508  { \
509    register uint16_t __value_in_register = ( _value ); \
510    uint16_t          __output = 0; \
511    __asm__ volatile ( "bsfw    %0,%1 " \
512                    : "=r" ( __value_in_register ), "=r" ( __output ) \
513                    : "0"  ( __value_in_register ), "1"  ( __output ) \
514    ); \
515    ( _output ) = __output; \
516  }
517
518/* end of Bitfield handler macros */
519
520/*
521 *  Priority handler macros
522 *
523 *  These macros perform the following functions:
524 *    + return a mask with the bit for this major/minor portion of
525 *      of thread priority set.
526 *    + translate the bit number returned by "Bitfield_find_first_bit"
527 *      into an index into the thread ready chain bit maps
528 */
529
530#define _CPU_Priority_Mask( _bit_number ) \
531  ( 1 << (_bit_number) )
532
533#define _CPU_Priority_bits_index( _priority ) \
534  (_priority)
535
536/* functions */
537
538#ifndef ASM
539/*
540 *  _CPU_Initialize
541 *
542 *  This routine performs CPU dependent initialization.
543 */
544
545void _CPU_Initialize(void);
546
547/*
548 *  _CPU_ISR_install_raw_handler
549 *
550 *  This routine installs a "raw" interrupt handler directly into the
551 *  processor's vector table.
552 */
553
554void _CPU_ISR_install_raw_handler(
555  uint32_t    vector,
556  proc_ptr    new_handler,
557  proc_ptr   *old_handler
558);
559
560/*
561 *  _CPU_ISR_install_vector
562 *
563 *  This routine installs an interrupt vector.
564 */
565
566void _CPU_ISR_install_vector(
567  uint32_t    vector,
568  proc_ptr    new_handler,
569  proc_ptr   *old_handler
570);
571
572/*
573 *  _CPU_Thread_Idle_body
574 *
575 *  Use the halt instruction of low power mode of a particular i386 model.
576 */
577
578#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
579
580void *_CPU_Thread_Idle_body( uintptr_t ignored );
581
582#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
583
584/*
585 *  _CPU_Context_switch
586 *
587 *  This routine switches from the run context to the heir context.
588 */
589
590void _CPU_Context_switch(
591  Context_Control  *run,
592  Context_Control  *heir
593);
594
595/*
596 *  _CPU_Context_restore
597 *
598 *  This routine is generally used only to restart self in an
599 *  efficient manner and avoid stack conflicts.
600 */
601
602void _CPU_Context_restore(
603  Context_Control *new_context
604) RTEMS_NO_RETURN;
605
606/*
607 *  _CPU_Context_save_fp
608 *
609 *  This routine saves the floating point context passed to it.
610 */
611
612#ifdef __SSE__
613#define _CPU_Context_save_fp(fp_context_pp) \
614  do {                                      \
615    __asm__ __volatile__(                   \
616      "fstcw %0"                            \
617      :"=m"((*(fp_context_pp))->fpucw)      \
618    );                                      \
619        __asm__ __volatile__(                   \
620      "stmxcsr %0"                          \
621      :"=m"((*(fp_context_pp))->mxcsr)      \
622    );                                      \
623  } while (0)
624#else
625void _CPU_Context_save_fp(
626  Context_Control_fp **fp_context_ptr
627);
628#endif
629
630/*
631 *  _CPU_Context_restore_fp
632 *
633 *  This routine restores the floating point context passed to it.
634 */
635#ifdef __SSE__
636#define _CPU_Context_restore_fp(fp_context_pp) \
637  do {                                         \
638    __asm__ __volatile__(                      \
639      "fldcw %0"                               \
640      ::"m"((*(fp_context_pp))->fpucw)         \
641      :"fpcr"                                  \
642    );                                         \
643    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
644  } while (0)
645#else
646void _CPU_Context_restore_fp(
647  Context_Control_fp **fp_context_ptr
648);
649#endif
650
651#ifdef __SSE__
652#define _CPU_Context_Initialization_at_thread_begin() \
653  do {                                                \
654    __asm__ __volatile__(                             \
655      "finit"                                         \
656      :                                               \
657      :                                               \
658      :"st","st(1)","st(2)","st(3)",                  \
659       "st(4)","st(5)","st(6)","st(7)",               \
660       "fpsr","fpcr"                                  \
661    );                                                \
662        if ( _Thread_Executing->fp_context ) {            \
663          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
664   }                                                  \
665  } while (0)
666#endif
667
668static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
669{
670  /* TODO */
671}
672
673static inline void _CPU_Context_validate( uintptr_t pattern )
674{
675  while (1) {
676    /* TODO */
677  }
678}
679
680void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
681
682typedef uint32_t CPU_Counter_ticks;
683
684CPU_Counter_ticks _CPU_Counter_read( void );
685
686static inline CPU_Counter_ticks _CPU_Counter_difference(
687  CPU_Counter_ticks second,
688  CPU_Counter_ticks first
689)
690{
691  return second - first;
692}
693
694#endif /* ASM */
695
696#ifdef __cplusplus
697}
698#endif
699
700#endif
Note: See TracBrowser for help on using the repository browser.