source: rtems/cpukit/score/cpu/i386/include/rtems/score/cpu.h @ 48dd7b8c

Last change on this file since 48dd7b8c was 48dd7b8c, checked in by Kinsey Moore <kinsey.moore@…>, on 06/29/20 at 19:35:08

score: Add CPU_USE_LIBC_INIT_FINI_ARRAY

This introduces the CPU_USE_LIBC_INIT_FINI_ARRAY define for use by CPU
ports to determine which global constructor and destructor methods are
used instead of placing architecture defines where they shouldn't be.

Close #4018

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/basedefs.h>
31#if defined(RTEMS_PARAVIRT)
32#include <rtems/score/paravirt.h>
33#endif
34#include <rtems/score/i386.h>
35   
36/**
37 * @defgroup RTEMSScoreCPUi386 i386 Specific Support
38 *
39 * @ingroup RTEMSScoreCPUi386
40 *
41 * @brief i386 specific support.
42 */
43/**@{**/
44
45/* conditional compilation parameters */
46
47/*
48 *  Does the CPU follow the simple vectored interrupt model?
49 *
50 *  If TRUE, then RTEMS allocates the vector table it internally manages.
51 *  If FALSE, then the BSP is assumed to allocate and manage the vector
52 *  table
53 *
54 *  PowerPC Specific Information:
55 *
56 *  The PowerPC and x86 were the first to use the PIC interrupt model.
57 *  They do not use the simple vectored interrupt model.
58 */
59#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
60
61/*
62 *  Does the RTEMS invoke the user's ISR with the vector number and
63 *  a pointer to the saved interrupt frame (1) or just the vector
64 *  number (0)?
65 */
66
67#define CPU_ISR_PASSES_FRAME_POINTER FALSE
68
69/*
70 *  Some family members have no FP, some have an FPU such as the i387
71 *  for the i386, others have it built in (i486DX, Pentium).
72 */
73
74#ifdef __SSE__
75#define CPU_HARDWARE_FP                  TRUE
76#define CPU_SOFTWARE_FP                  FALSE
77
78#define CPU_ALL_TASKS_ARE_FP             TRUE
79#define CPU_IDLE_TASK_IS_FP              TRUE
80#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
81#else /* __SSE__ */
82
83#if ( I386_HAS_FPU == 1 )
84#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
85#else
86#define CPU_HARDWARE_FP     FALSE
87#endif
88#define CPU_SOFTWARE_FP     FALSE
89
90#define CPU_ALL_TASKS_ARE_FP             FALSE
91#define CPU_IDLE_TASK_IS_FP              FALSE
92#if defined(RTEMS_SMP)
93  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
94#else
95  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
96#endif
97#endif /* __SSE__ */
98
99#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
100
101#define CPU_STACK_GROWS_UP               FALSE
102
103/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
104#define CPU_CACHE_LINE_BYTES 64
105
106#define CPU_STRUCTURE_ALIGNMENT
107
108#define CPU_MAXIMUM_PROCESSORS 32
109
110#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
111#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
112#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
113#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
114#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
115#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
116#define I386_CONTEXT_CONTROL_GS_0_OFFSET 24
117#define I386_CONTEXT_CONTROL_GS_1_OFFSET 28
118#define I386_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 32
119
120#ifdef RTEMS_SMP
121  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 36
122#endif
123
124/* structures */
125
126#ifndef ASM
127
128/*
129 *  Basic integer context for the i386 family.
130 */
131
132typedef struct {
133  uint32_t    eflags;     /* extended flags register                   */
134  void       *esp;        /* extended stack pointer register           */
135  void       *ebp;        /* extended base pointer register            */
136  uint32_t    ebx;        /* extended bx register                      */
137  uint32_t    esi;        /* extended source index register            */
138  uint32_t    edi;        /* extended destination index flags register */
139  segment_descriptors gs; /* gs segment descriptor                     */
140  uint32_t isr_dispatch_disable;
141#ifdef RTEMS_SMP
142  volatile bool is_executing;
143#endif
144}   Context_Control;
145
146#define _CPU_Context_Get_SP( _context ) \
147  (_context)->esp
148
149#ifdef RTEMS_SMP
150  static inline bool _CPU_Context_Get_is_executing(
151    const Context_Control *context
152  )
153  {
154    return context->is_executing;
155  }
156
157  static inline void _CPU_Context_Set_is_executing(
158    Context_Control *context,
159    bool is_executing
160  )
161  {
162    context->is_executing = is_executing;
163  }
164#endif
165
166/*
167 *  FP context save area for the i387 numeric coprocessors.
168 */
169#ifdef __SSE__
170/* All FPU and SSE registers are volatile; hence, as long
171 * as we are within normally executing C code (including
172 * a task switch) there is no need for saving/restoring
173 * any of those registers.
174 * We must save/restore the full FPU/SSE context across
175 * interrupts and exceptions, however:
176 *   -  after ISR execution a _Thread_Dispatch() may happen
177 *      and it is therefore necessary to save the FPU/SSE
178 *      registers to be restored when control is returned
179 *      to the interrupted task.
180 *   -  gcc may implicitly use FPU/SSE instructions in
181 *      an ISR.
182 *
183 * Even though there is no explicit mentioning of the FPU
184 * control word in the SYSV ABI (i386) being non-volatile
185 * we maintain MXCSR and the FPU control-word for each task.
186 */
187typedef struct {
188        uint32_t  mxcsr;
189        uint16_t  fpucw;
190} Context_Control_fp;
191
192#else
193
194typedef struct {
195  uint8_t     fp_save_area[108];    /* context size area for I80387 */
196                                    /*  28 bytes for environment    */
197} Context_Control_fp;
198
199#endif
200
201
202/*
203 *  The following structure defines the set of information saved
204 *  on the current stack by RTEMS upon receipt of execptions.
205 *
206 * idtIndex is either the interrupt number or the trap/exception number.
207 * faultCode is the code pushed by the processor on some exceptions.
208 *
209 * Since the first registers are directly pushed by the CPU they
210 * may not respect 16-byte stack alignment, which is, however,
211 * mandatory for the SSE register area.
212 * Therefore, these registers are stored at an aligned address
213 * and a pointer is stored in the CPU_Exception_frame.
214 * If the executive was compiled without SSE support then
215 * this pointer is NULL.
216 */
217
218struct Context_Control_sse;
219
220typedef struct {
221  struct Context_Control_sse *fp_ctxt;
222  uint32_t    edi;
223  uint32_t    esi;
224  uint32_t    ebp;
225  uint32_t    esp0;
226  uint32_t    ebx;
227  uint32_t    edx;
228  uint32_t    ecx;
229  uint32_t    eax;
230  uint32_t    idtIndex;
231  uint32_t    faultCode;
232  uint32_t    eip;
233  uint32_t    cs;
234  uint32_t    eflags;
235} CPU_Exception_frame;
236
237#ifdef __SSE__
238typedef struct Context_Control_sse {
239  uint16_t  fcw;
240  uint16_t  fsw;
241  uint8_t   ftw;
242  uint8_t   res_1;
243  uint16_t  fop;
244  uint32_t  fpu_ip;
245  uint16_t  cs;
246  uint16_t  res_2;
247  uint32_t  fpu_dp;
248  uint16_t  ds;
249  uint16_t  res_3;
250  uint32_t  mxcsr;
251  uint32_t  mxcsr_mask;
252  struct {
253        uint8_t fpreg[10];
254        uint8_t res_4[ 6];
255  } fp_mmregs[8];
256  uint8_t   xmmregs[8][16];
257  uint8_t   res_5[224];
258} Context_Control_sse
259__attribute__((aligned(16)))
260;
261#endif
262
263typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
264extern cpuExcHandlerType _currentExcHandler;
265extern void rtems_exception_init_mngt(void);
266
267/*
268 * This port does not pass any frame info to the
269 * interrupt handler.
270 */
271
272typedef struct {
273/* allow for 16B alignment (worst case 12 Bytes more) and isr right after pushfl */
274  uint32_t reserved[3];
275/* registers saved by _ISR_Handler */
276  uint32_t isr_vector;
277  uint32_t ebx;
278  uint32_t ebp;
279  uint32_t esp;
280/* registers saved by rtems_irq_prologue_##_vector */
281  uint32_t edx;
282  uint32_t ecx;
283  uint32_t eax;
284/* registers saved by CPU */
285  uint32_t eip;
286  uint32_t cs;
287  uint32_t eflags;
288} CPU_Interrupt_frame;
289
290typedef enum {
291  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
292  I386_EXCEPTION_DEBUG               = 1,
293  I386_EXCEPTION_NMI                 = 2,
294  I386_EXCEPTION_BREAKPOINT          = 3,
295  I386_EXCEPTION_OVERFLOW            = 4,
296  I386_EXCEPTION_BOUND               = 5,
297  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
298  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
299  I386_EXCEPTION_DOUBLE_FAULT        = 8,
300  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
301  I386_EXCEPTION_INVALID_TSS         = 10,
302  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
303  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
304  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
305  I386_EXCEPTION_PAGE_FAULT          = 14,
306  I386_EXCEPTION_INTEL_RES15         = 15,
307  I386_EXCEPTION_FLOAT_ERROR         = 16,
308  I386_EXCEPTION_ALIGN_CHECK         = 17,
309  I386_EXCEPTION_MACHINE_CHECK       = 18,
310  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
311
312} Intel_symbolic_exception_name;
313
314
315/*
316 *  context size area for floating point
317 *
318 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
319 */
320
321#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
322
323/* variables */
324
325extern Context_Control_fp _CPU_Null_fp_context;
326
327#endif /* ASM */
328
329/* constants */
330
331/*
332 *  This defines the number of levels and the mask used to pick those
333 *  bits out of a thread mode.
334 */
335
336#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
337
338/*
339 *  extra stack required by the MPCI receive server thread
340 */
341
342#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
343
344/*
345 *  This is defined if the port has a special way to report the ISR nesting
346 *  level.  Most ports maintain the variable _ISR_Nest_level.
347 */
348
349#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
350
351/*
352 *  Minimum size of a thread's stack.
353 */
354
355#define CPU_STACK_MINIMUM_SIZE          4096
356
357#define CPU_SIZEOF_POINTER 4
358
359/*
360 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
361 */
362
363#define CPU_ALIGNMENT                    4
364#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
365
366/*
367 *  On i386 thread stacks require no further alignment after allocation
368 *  from the Workspace. However, since gcc maintains 16-byte alignment
369 *  we try to respect that. If you find an option to let gcc squeeze
370 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
371 *  doesn't waste much space since this only determines the *initial*
372 *  alignment.
373 */
374
375#define CPU_STACK_ALIGNMENT             16
376
377#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
378
379/* macros */
380
381#ifndef ASM
382/*
383 *  ISR handler macros
384 *
385 *  These macros perform the following functions:
386 *     + initialize the RTEMS vector table
387 *     + disable all maskable CPU interrupts
388 *     + restore previous interrupt level (enable)
389 *     + temporarily restore interrupts (flash)
390 *     + set a particular level
391 */
392
393#if !defined(I386_DISABLE_INLINE_ISR_DISABLE_ENABLE)
394#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
395
396#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
397
398#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
399
400#define _CPU_ISR_Set_level( _new_level ) \
401  { \
402    if ( _new_level ) __asm__ volatile ( "cli" ); \
403    else              __asm__ volatile ( "sti" ); \
404  }
405#else
406#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
407#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
408#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
409#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
410#endif
411
412RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
413{
414  return ( level & EFLAGS_INTR_ENABLE ) != 0;
415}
416
417uint32_t   _CPU_ISR_Get_level( void );
418
419/*  Make sure interrupt stack has space for ISR
420 *  'vector' arg at the top and that it is aligned
421 *  properly.
422 */
423
424#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
425        do {                                        \
426                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
427        } while (0)
428
429#endif /* ASM */
430
431/* end of ISR handler macros */
432
433/*
434 *  Context handler macros
435 *
436 *  These macros perform the following functions:
437 *     + initialize a context area
438 *     + restart the current thread
439 *     + calculate the initial pointer into a FP context area
440 *     + initialize an FP context area
441 */
442
443#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
444#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
445
446#ifndef ASM
447
448void _CPU_Context_Initialize(
449  Context_Control *the_context,
450  void *stack_area_begin,
451  size_t stack_area_size,
452  uint32_t new_level,
453  void (*entry_point)( void ),
454  bool is_fp,
455  void *tls_area
456);
457
458#define _CPU_Context_Restart_self( _the_context ) \
459   _CPU_Context_restore( (_the_context) );
460
461#if defined(RTEMS_SMP)
462  uint32_t _CPU_SMP_Initialize( void );
463
464  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
465
466  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
467
468  void _CPU_SMP_Prepare_start_multitasking( void );
469
470  uint32_t _CPU_SMP_Get_current_processor( void );
471
472  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
473
474  static inline void _CPU_SMP_Processor_event_broadcast( void )
475  {
476    __asm__ volatile ( "" : : : "memory" );
477  }
478
479  static inline void _CPU_SMP_Processor_event_receive( void )
480  {
481    __asm__ volatile ( "" : : : "memory" );
482  }
483#endif
484
485#define _CPU_Context_Initialize_fp( _fp_area ) \
486  { \
487    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
488  }
489
490/* end of Context handler macros */
491
492/*
493 *  Fatal Error manager macros
494 *
495 *  These macros perform the following functions:
496 *    + disable interrupts and halt the CPU
497 */
498
499extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
500  RTEMS_NO_RETURN;
501
502#endif /* ASM */
503
504/* end of Fatal Error manager macros */
505
506#define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE
507
508/*
509 *  Bitfield handler macros
510 *
511 *  These macros perform the following functions:
512 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
513 */
514
515#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
516
517#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
518  { \
519    uint16_t __value_in_register = ( _value ); \
520    uint16_t __output = 0; \
521    __asm__ volatile ( "bsfw    %0,%1 " \
522                    : "=r" ( __value_in_register ), "=r" ( __output ) \
523                    : "0"  ( __value_in_register ), "1"  ( __output ) \
524    ); \
525    ( _output ) = __output; \
526  }
527
528/* end of Bitfield handler macros */
529
530/*
531 *  Priority handler macros
532 *
533 *  These macros perform the following functions:
534 *    + return a mask with the bit for this major/minor portion of
535 *      of thread priority set.
536 *    + translate the bit number returned by "Bitfield_find_first_bit"
537 *      into an index into the thread ready chain bit maps
538 */
539
540#define _CPU_Priority_Mask( _bit_number ) \
541  ( 1 << (_bit_number) )
542
543#define _CPU_Priority_bits_index( _priority ) \
544  (_priority)
545
546/* functions */
547
548#ifndef ASM
549/*
550 *  _CPU_Initialize
551 *
552 *  This routine performs CPU dependent initialization.
553 */
554
555void _CPU_Initialize(void);
556
557typedef void ( *CPU_ISR_handler )( void );
558
559void _CPU_ISR_install_vector(
560  uint32_t         vector,
561  CPU_ISR_handler  new_handler,
562  CPU_ISR_handler *old_handler
563);
564
565void *_CPU_Thread_Idle_body( uintptr_t ignored );
566
567/*
568 *  _CPU_Context_switch
569 *
570 *  This routine switches from the run context to the heir context.
571 */
572
573void _CPU_Context_switch(
574  Context_Control  *run,
575  Context_Control  *heir
576);
577
578/*
579 *  _CPU_Context_restore
580 *
581 *  This routine is generally used only to restart self in an
582 *  efficient manner and avoid stack conflicts.
583 */
584
585void _CPU_Context_restore(
586  Context_Control *new_context
587) RTEMS_NO_RETURN;
588
589/*
590 *  _CPU_Context_save_fp
591 *
592 *  This routine saves the floating point context passed to it.
593 */
594
595#ifdef __SSE__
596#define _CPU_Context_save_fp(fp_context_pp) \
597  do {                                      \
598    __asm__ __volatile__(                   \
599      "fstcw %0"                            \
600      :"=m"((*(fp_context_pp))->fpucw)      \
601    );                                      \
602        __asm__ __volatile__(                   \
603      "stmxcsr %0"                          \
604      :"=m"((*(fp_context_pp))->mxcsr)      \
605    );                                      \
606  } while (0)
607#else
608void _CPU_Context_save_fp(
609  Context_Control_fp **fp_context_ptr
610);
611#endif
612
613/*
614 *  _CPU_Context_restore_fp
615 *
616 *  This routine restores the floating point context passed to it.
617 */
618#ifdef __SSE__
619#define _CPU_Context_restore_fp(fp_context_pp) \
620  do {                                         \
621    __asm__ __volatile__(                      \
622      "fldcw %0"                               \
623      ::"m"((*(fp_context_pp))->fpucw)         \
624      :"fpcr"                                  \
625    );                                         \
626    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
627  } while (0)
628#else
629void _CPU_Context_restore_fp(
630  Context_Control_fp **fp_context_ptr
631);
632#endif
633
634#ifdef __SSE__
635#define _CPU_Context_Initialization_at_thread_begin() \
636  do {                                                \
637    __asm__ __volatile__(                             \
638      "finit"                                         \
639      :                                               \
640      :                                               \
641      :"st","st(1)","st(2)","st(3)",                  \
642       "st(4)","st(5)","st(6)","st(7)",               \
643       "fpsr","fpcr"                                  \
644    );                                                \
645        if ( _Thread_Executing->fp_context ) {            \
646          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
647   }                                                  \
648  } while (0)
649#endif
650
651void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
652
653typedef uint32_t CPU_Counter_ticks;
654
655uint32_t _CPU_Counter_frequency( void );
656
657CPU_Counter_ticks _CPU_Counter_read( void );
658
659static inline CPU_Counter_ticks _CPU_Counter_difference(
660  CPU_Counter_ticks second,
661  CPU_Counter_ticks first
662)
663{
664  return second - first;
665}
666
667/**@}**/
668
669/** Type that can store a 32-bit integer or a pointer. */
670typedef uintptr_t CPU_Uint32ptr;
671
672#endif /* ASM */
673
674#ifdef __cplusplus
675}
676#endif
677
678#endif
Note: See TracBrowser for help on using the repository browser.