source: rtems/cpukit/score/cpu/i386/include/rtems/score/cpu.h @ 65f868c

5
Last change on this file since 65f868c was 65f868c, checked in by Sebastian Huber <sebastian.huber@…>, on 05/23/18 at 12:17:25

Add _CPU_Counter_frequency()

Add rtems_counter_frequency() API function. Use it to initialize the
counter value converter via the new system initialization step
(RTEMS_SYSINIT_CPU_COUNTER). This decouples the counter implementation
and the counter converter. It avoids an unnecessary pull in of the
64-bit integer division from libgcc.

Update #3456.

  • Property mode set to 100644
File size: 18.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/basedefs.h>
31#if defined(RTEMS_PARAVIRT)
32#include <rtems/score/paravirt.h>
33#endif
34#include <rtems/score/i386.h>
35
36/* conditional compilation parameters */
37
38/*
39 *  Does the CPU follow the simple vectored interrupt model?
40 *
41 *  If TRUE, then RTEMS allocates the vector table it internally manages.
42 *  If FALSE, then the BSP is assumed to allocate and manage the vector
43 *  table
44 *
45 *  PowerPC Specific Information:
46 *
47 *  The PowerPC and x86 were the first to use the PIC interrupt model.
48 *  They do not use the simple vectored interrupt model.
49 */
50#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
51
52/*
53 *  i386 has an RTEMS allocated and managed interrupt stack.
54 */
55
56#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
57#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
58#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
59
60/*
61 *  Does the RTEMS invoke the user's ISR with the vector number and
62 *  a pointer to the saved interrupt frame (1) or just the vector
63 *  number (0)?
64 */
65
66#define CPU_ISR_PASSES_FRAME_POINTER FALSE
67
68/*
69 *  Some family members have no FP, some have an FPU such as the i387
70 *  for the i386, others have it built in (i486DX, Pentium).
71 */
72
73#ifdef __SSE__
74#define CPU_HARDWARE_FP                  TRUE
75#define CPU_SOFTWARE_FP                  FALSE
76
77#define CPU_ALL_TASKS_ARE_FP             TRUE
78#define CPU_IDLE_TASK_IS_FP              TRUE
79#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
80#else /* __SSE__ */
81
82#if ( I386_HAS_FPU == 1 )
83#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
84#else
85#define CPU_HARDWARE_FP     FALSE
86#endif
87#define CPU_SOFTWARE_FP     FALSE
88
89#define CPU_ALL_TASKS_ARE_FP             FALSE
90#define CPU_IDLE_TASK_IS_FP              FALSE
91#if defined(RTEMS_SMP)
92  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
93#else
94  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
95#endif
96#endif /* __SSE__ */
97
98#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
99
100#define CPU_STACK_GROWS_UP               FALSE
101
102/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
103#define CPU_CACHE_LINE_BYTES 64
104
105#define CPU_STRUCTURE_ALIGNMENT
106
107/*
108 *  Does this port provide a CPU dependent IDLE task implementation?
109 *
110 *  If TRUE, then the routine _CPU_Thread_Idle_body
111 *  must be provided and is the default IDLE thread body instead of
112 *  _CPU_Thread_Idle_body.
113 *
114 *  If FALSE, then use the generic IDLE thread body if the BSP does
115 *  not provide one.
116 */
117
118#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
119
120#define CPU_MAXIMUM_PROCESSORS 32
121
122#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
123#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
124#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
125#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
126#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
127#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
128#define I386_CONTEXT_CONTROL_GS_0_OFFSET 24
129#define I386_CONTEXT_CONTROL_GS_1_OFFSET 28
130
131#ifdef RTEMS_SMP
132  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 32
133#endif
134
135/* structures */
136
137#ifndef ASM
138
139/*
140 *  Basic integer context for the i386 family.
141 */
142
143typedef struct {
144  uint32_t    eflags;     /* extended flags register                   */
145  void       *esp;        /* extended stack pointer register           */
146  void       *ebp;        /* extended base pointer register            */
147  uint32_t    ebx;        /* extended bx register                      */
148  uint32_t    esi;        /* extended source index register            */
149  uint32_t    edi;        /* extended destination index flags register */
150  segment_descriptors gs; /* gs segment descriptor                     */
151#ifdef RTEMS_SMP
152  volatile bool is_executing;
153#endif
154}   Context_Control;
155
156#define _CPU_Context_Get_SP( _context ) \
157  (_context)->esp
158
159#ifdef RTEMS_SMP
160  static inline bool _CPU_Context_Get_is_executing(
161    const Context_Control *context
162  )
163  {
164    return context->is_executing;
165  }
166
167  static inline void _CPU_Context_Set_is_executing(
168    Context_Control *context,
169    bool is_executing
170  )
171  {
172    context->is_executing = is_executing;
173  }
174#endif
175
176/*
177 *  FP context save area for the i387 numeric coprocessors.
178 */
179#ifdef __SSE__
180/* All FPU and SSE registers are volatile; hence, as long
181 * as we are within normally executing C code (including
182 * a task switch) there is no need for saving/restoring
183 * any of those registers.
184 * We must save/restore the full FPU/SSE context across
185 * interrupts and exceptions, however:
186 *   -  after ISR execution a _Thread_Dispatch() may happen
187 *      and it is therefore necessary to save the FPU/SSE
188 *      registers to be restored when control is returned
189 *      to the interrupted task.
190 *   -  gcc may implicitly use FPU/SSE instructions in
191 *      an ISR.
192 *
193 * Even though there is no explicit mentioning of the FPU
194 * control word in the SYSV ABI (i386) being non-volatile
195 * we maintain MXCSR and the FPU control-word for each task.
196 */
197typedef struct {
198        uint32_t  mxcsr;
199        uint16_t  fpucw;
200} Context_Control_fp;
201
202#else
203
204typedef struct {
205  uint8_t     fp_save_area[108];    /* context size area for I80387 */
206                                    /*  28 bytes for environment    */
207} Context_Control_fp;
208
209#endif
210
211
212/*
213 *  The following structure defines the set of information saved
214 *  on the current stack by RTEMS upon receipt of execptions.
215 *
216 * idtIndex is either the interrupt number or the trap/exception number.
217 * faultCode is the code pushed by the processor on some exceptions.
218 *
219 * Since the first registers are directly pushed by the CPU they
220 * may not respect 16-byte stack alignment, which is, however,
221 * mandatory for the SSE register area.
222 * Therefore, these registers are stored at an aligned address
223 * and a pointer is stored in the CPU_Exception_frame.
224 * If the executive was compiled without SSE support then
225 * this pointer is NULL.
226 */
227
228struct Context_Control_sse;
229
230typedef struct {
231  struct Context_Control_sse *fp_ctxt;
232  uint32_t    edi;
233  uint32_t    esi;
234  uint32_t    ebp;
235  uint32_t    esp0;
236  uint32_t    ebx;
237  uint32_t    edx;
238  uint32_t    ecx;
239  uint32_t    eax;
240  uint32_t    idtIndex;
241  uint32_t    faultCode;
242  uint32_t    eip;
243  uint32_t    cs;
244  uint32_t    eflags;
245} CPU_Exception_frame;
246
247#ifdef __SSE__
248typedef struct Context_Control_sse {
249  uint16_t  fcw;
250  uint16_t  fsw;
251  uint8_t   ftw;
252  uint8_t   res_1;
253  uint16_t  fop;
254  uint32_t  fpu_ip;
255  uint16_t  cs;
256  uint16_t  res_2;
257  uint32_t  fpu_dp;
258  uint16_t  ds;
259  uint16_t  res_3;
260  uint32_t  mxcsr;
261  uint32_t  mxcsr_mask;
262  struct {
263        uint8_t fpreg[10];
264        uint8_t res_4[ 6];
265  } fp_mmregs[8];
266  uint8_t   xmmregs[8][16];
267  uint8_t   res_5[224];
268} Context_Control_sse
269__attribute__((aligned(16)))
270;
271#endif
272
273typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
274extern cpuExcHandlerType _currentExcHandler;
275extern void rtems_exception_init_mngt(void);
276
277#ifdef RTEMS_SMP
278  /* Throw compile-time error to indicate incomplete support */
279  #error "i386 targets do not support SMP.\
280 See: https://devel.rtems.org/ticket/3335"
281
282  /*
283   * This size must match the size of the CPU_Interrupt_frame, which must be
284   * used in the SMP context switch code, which is incomplete at the moment.
285   */
286  #define CPU_INTERRUPT_FRAME_SIZE 4
287#endif
288
289/*
290 * This port does not pass any frame info to the
291 * interrupt handler.
292 */
293
294typedef struct {
295  uint32_t todo_replace_with_apt_registers;
296} CPU_Interrupt_frame;
297
298typedef enum {
299  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
300  I386_EXCEPTION_DEBUG               = 1,
301  I386_EXCEPTION_NMI                 = 2,
302  I386_EXCEPTION_BREAKPOINT          = 3,
303  I386_EXCEPTION_OVERFLOW            = 4,
304  I386_EXCEPTION_BOUND               = 5,
305  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
306  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
307  I386_EXCEPTION_DOUBLE_FAULT        = 8,
308  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
309  I386_EXCEPTION_INVALID_TSS         = 10,
310  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
311  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
312  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
313  I386_EXCEPTION_PAGE_FAULT          = 14,
314  I386_EXCEPTION_INTEL_RES15         = 15,
315  I386_EXCEPTION_FLOAT_ERROR         = 16,
316  I386_EXCEPTION_ALIGN_CHECK         = 17,
317  I386_EXCEPTION_MACHINE_CHECK       = 18,
318  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
319
320} Intel_symbolic_exception_name;
321
322
323/*
324 *  context size area for floating point
325 *
326 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
327 */
328
329#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
330
331/* variables */
332
333extern Context_Control_fp _CPU_Null_fp_context;
334
335#endif /* ASM */
336
337/* constants */
338
339/*
340 *  This defines the number of levels and the mask used to pick those
341 *  bits out of a thread mode.
342 */
343
344#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
345#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
346
347/*
348 *  extra stack required by the MPCI receive server thread
349 */
350
351#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
352
353/*
354 *  This is defined if the port has a special way to report the ISR nesting
355 *  level.  Most ports maintain the variable _ISR_Nest_level.
356 */
357
358#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
359
360/*
361 *  Minimum size of a thread's stack.
362 */
363
364#define CPU_STACK_MINIMUM_SIZE          4096
365
366#define CPU_SIZEOF_POINTER 4
367
368/*
369 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
370 */
371
372#define CPU_ALIGNMENT                    4
373#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
374#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
375
376/*
377 *  On i386 thread stacks require no further alignment after allocation
378 *  from the Workspace. However, since gcc maintains 16-byte alignment
379 *  we try to respect that. If you find an option to let gcc squeeze
380 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
381 *  doesn't waste much space since this only determines the *initial*
382 *  alignment.
383 */
384
385#define CPU_STACK_ALIGNMENT             16
386
387/* macros */
388
389#ifndef ASM
390/*
391 *  ISR handler macros
392 *
393 *  These macros perform the following functions:
394 *     + initialize the RTEMS vector table
395 *     + disable all maskable CPU interrupts
396 *     + restore previous interrupt level (enable)
397 *     + temporarily restore interrupts (flash)
398 *     + set a particular level
399 */
400
401#if !defined(I386_DISABLE_INLINE_ISR_DISABLE_ENABLE)
402#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
403
404#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
405
406#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
407
408#define _CPU_ISR_Set_level( _new_level ) \
409  { \
410    if ( _new_level ) __asm__ volatile ( "cli" ); \
411    else              __asm__ volatile ( "sti" ); \
412  }
413#else
414#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
415#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
416#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
417#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
418#endif
419
420RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
421{
422  return ( level & EFLAGS_INTR_ENABLE ) != 0;
423}
424
425uint32_t   _CPU_ISR_Get_level( void );
426
427/*  Make sure interrupt stack has space for ISR
428 *  'vector' arg at the top and that it is aligned
429 *  properly.
430 */
431
432#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
433        do {                                        \
434                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
435        } while (0)
436
437#endif /* ASM */
438
439/* end of ISR handler macros */
440
441/*
442 *  Context handler macros
443 *
444 *  These macros perform the following functions:
445 *     + initialize a context area
446 *     + restart the current thread
447 *     + calculate the initial pointer into a FP context area
448 *     + initialize an FP context area
449 */
450
451#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
452#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
453
454#ifndef ASM
455
456void _CPU_Context_Initialize(
457  Context_Control *the_context,
458  void *stack_area_begin,
459  size_t stack_area_size,
460  uint32_t new_level,
461  void (*entry_point)( void ),
462  bool is_fp,
463  void *tls_area
464);
465
466#define _CPU_Context_Restart_self( _the_context ) \
467   _CPU_Context_restore( (_the_context) );
468
469#if defined(RTEMS_SMP)
470  uint32_t _CPU_SMP_Initialize( void );
471
472  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
473
474  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
475
476  void _CPU_SMP_Prepare_start_multitasking( void );
477
478  uint32_t _CPU_SMP_Get_current_processor( void );
479
480  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
481
482  static inline void _CPU_SMP_Processor_event_broadcast( void )
483  {
484    __asm__ volatile ( "" : : : "memory" );
485  }
486
487  static inline void _CPU_SMP_Processor_event_receive( void )
488  {
489    __asm__ volatile ( "" : : : "memory" );
490  }
491#endif
492
493#define _CPU_Context_Initialize_fp( _fp_area ) \
494  { \
495    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
496  }
497
498/* end of Context handler macros */
499
500/*
501 *  Fatal Error manager macros
502 *
503 *  These macros perform the following functions:
504 *    + disable interrupts and halt the CPU
505 */
506
507extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
508  RTEMS_NO_RETURN;
509
510#endif /* ASM */
511
512/* end of Fatal Error manager macros */
513
514/*
515 *  Bitfield handler macros
516 *
517 *  These macros perform the following functions:
518 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
519 */
520
521#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
522
523#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
524  { \
525    uint16_t __value_in_register = ( _value ); \
526    uint16_t __output = 0; \
527    __asm__ volatile ( "bsfw    %0,%1 " \
528                    : "=r" ( __value_in_register ), "=r" ( __output ) \
529                    : "0"  ( __value_in_register ), "1"  ( __output ) \
530    ); \
531    ( _output ) = __output; \
532  }
533
534/* end of Bitfield handler macros */
535
536/*
537 *  Priority handler macros
538 *
539 *  These macros perform the following functions:
540 *    + return a mask with the bit for this major/minor portion of
541 *      of thread priority set.
542 *    + translate the bit number returned by "Bitfield_find_first_bit"
543 *      into an index into the thread ready chain bit maps
544 */
545
546#define _CPU_Priority_Mask( _bit_number ) \
547  ( 1 << (_bit_number) )
548
549#define _CPU_Priority_bits_index( _priority ) \
550  (_priority)
551
552/* functions */
553
554#ifndef ASM
555/*
556 *  _CPU_Initialize
557 *
558 *  This routine performs CPU dependent initialization.
559 */
560
561void _CPU_Initialize(void);
562
563/*
564 *  _CPU_ISR_install_raw_handler
565 *
566 *  This routine installs a "raw" interrupt handler directly into the
567 *  processor's vector table.
568 */
569
570void _CPU_ISR_install_raw_handler(
571  uint32_t    vector,
572  proc_ptr    new_handler,
573  proc_ptr   *old_handler
574);
575
576/*
577 *  _CPU_ISR_install_vector
578 *
579 *  This routine installs an interrupt vector.
580 */
581
582void _CPU_ISR_install_vector(
583  uint32_t    vector,
584  proc_ptr    new_handler,
585  proc_ptr   *old_handler
586);
587
588/*
589 *  _CPU_Thread_Idle_body
590 *
591 *  Use the halt instruction of low power mode of a particular i386 model.
592 */
593
594#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
595
596void *_CPU_Thread_Idle_body( uintptr_t ignored );
597
598#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
599
600/*
601 *  _CPU_Context_switch
602 *
603 *  This routine switches from the run context to the heir context.
604 */
605
606void _CPU_Context_switch(
607  Context_Control  *run,
608  Context_Control  *heir
609);
610
611/*
612 *  _CPU_Context_restore
613 *
614 *  This routine is generally used only to restart self in an
615 *  efficient manner and avoid stack conflicts.
616 */
617
618void _CPU_Context_restore(
619  Context_Control *new_context
620) RTEMS_NO_RETURN;
621
622/*
623 *  _CPU_Context_save_fp
624 *
625 *  This routine saves the floating point context passed to it.
626 */
627
628#ifdef __SSE__
629#define _CPU_Context_save_fp(fp_context_pp) \
630  do {                                      \
631    __asm__ __volatile__(                   \
632      "fstcw %0"                            \
633      :"=m"((*(fp_context_pp))->fpucw)      \
634    );                                      \
635        __asm__ __volatile__(                   \
636      "stmxcsr %0"                          \
637      :"=m"((*(fp_context_pp))->mxcsr)      \
638    );                                      \
639  } while (0)
640#else
641void _CPU_Context_save_fp(
642  Context_Control_fp **fp_context_ptr
643);
644#endif
645
646/*
647 *  _CPU_Context_restore_fp
648 *
649 *  This routine restores the floating point context passed to it.
650 */
651#ifdef __SSE__
652#define _CPU_Context_restore_fp(fp_context_pp) \
653  do {                                         \
654    __asm__ __volatile__(                      \
655      "fldcw %0"                               \
656      ::"m"((*(fp_context_pp))->fpucw)         \
657      :"fpcr"                                  \
658    );                                         \
659    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
660  } while (0)
661#else
662void _CPU_Context_restore_fp(
663  Context_Control_fp **fp_context_ptr
664);
665#endif
666
667#ifdef __SSE__
668#define _CPU_Context_Initialization_at_thread_begin() \
669  do {                                                \
670    __asm__ __volatile__(                             \
671      "finit"                                         \
672      :                                               \
673      :                                               \
674      :"st","st(1)","st(2)","st(3)",                  \
675       "st(4)","st(5)","st(6)","st(7)",               \
676       "fpsr","fpcr"                                  \
677    );                                                \
678        if ( _Thread_Executing->fp_context ) {            \
679          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
680   }                                                  \
681  } while (0)
682#endif
683
684static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
685{
686  /* TODO */
687}
688
689static inline void _CPU_Context_validate( uintptr_t pattern )
690{
691  while (1) {
692    /* TODO */
693  }
694}
695
696void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
697
698typedef uint32_t CPU_Counter_ticks;
699
700uint32_t _CPU_Counter_frequency( void );
701
702CPU_Counter_ticks _CPU_Counter_read( void );
703
704static inline CPU_Counter_ticks _CPU_Counter_difference(
705  CPU_Counter_ticks second,
706  CPU_Counter_ticks first
707)
708{
709  return second - first;
710}
711
712/** Type that can store a 32-bit integer or a pointer. */
713typedef uintptr_t CPU_Uint32ptr;
714
715#endif /* ASM */
716
717#ifdef __cplusplus
718}
719#endif
720
721#endif
Note: See TracBrowser for help on using the repository browser.