source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ a16af0b3

4.115
Last change on this file since a16af0b3 was 53e008b, checked in by Sebastian Huber <sebastian.huber@…>, on 04/10/14 at 13:48:05

score: SMP initialization changes

Add and use _CPU_SMP_Start_processor(). Add and use
_CPU_SMP_Finalize_initialization(). This makes most
_CPU_SMP_Initialize() functions a bit simpler since we can calculate the
minimum value of the count of processors requested by the application
configuration and the count of physically or virtually available
processors in the high-level code.

The CPU port has now the ability to signal a processor start failure.
With the support for clustered/partitioned scheduling the presence of
particular processors can be configured to be optional or mandatory.
There will be a fatal error only in case mandatory processors are not
present.

The CPU port may use a timeout to monitor the start of a processor.

  • Property mode set to 100644
File size: 18.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  Does the CPU follow the simple vectored interrupt model?
45 *
46 *  If TRUE, then RTEMS allocates the vector table it internally manages.
47 *  If FALSE, then the BSP is assumed to allocate and manage the vector
48 *  table
49 *
50 *  PowerPC Specific Information:
51 *
52 *  The PowerPC and x86 were the first to use the PIC interrupt model.
53 *  They do not use the simple vectored interrupt model.
54 */
55#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
56
57/*
58 *  i386 has an RTEMS allocated and managed interrupt stack.
59 */
60
61#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
62#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
63#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
64
65/*
66 *  Does the RTEMS invoke the user's ISR with the vector number and
67 *  a pointer to the saved interrupt frame (1) or just the vector
68 *  number (0)?
69 */
70
71#define CPU_ISR_PASSES_FRAME_POINTER 0
72
73/*
74 *  Some family members have no FP, some have an FPU such as the i387
75 *  for the i386, others have it built in (i486DX, Pentium).
76 */
77
78#ifdef __SSE__
79#define CPU_HARDWARE_FP                  TRUE
80#define CPU_SOFTWARE_FP                  FALSE
81
82#define CPU_ALL_TASKS_ARE_FP             TRUE
83#define CPU_IDLE_TASK_IS_FP              TRUE
84#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
85#else /* __SSE__ */
86
87#if ( I386_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
89#else
90#define CPU_HARDWARE_FP     FALSE
91#endif
92#define CPU_SOFTWARE_FP     FALSE
93
94#define CPU_ALL_TASKS_ARE_FP             FALSE
95#define CPU_IDLE_TASK_IS_FP              FALSE
96#if defined(RTEMS_SMP)
97  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
98#else
99  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
100#endif
101#endif /* __SSE__ */
102
103#define CPU_STACK_GROWS_UP               FALSE
104#define CPU_STRUCTURE_ALIGNMENT
105
106#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
107
108/*
109 *  Does this port provide a CPU dependent IDLE task implementation?
110 *
111 *  If TRUE, then the routine _CPU_Thread_Idle_body
112 *  must be provided and is the default IDLE thread body instead of
113 *  _CPU_Thread_Idle_body.
114 *
115 *  If FALSE, then use the generic IDLE thread body if the BSP does
116 *  not provide one.
117 */
118
119#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
120
121/*
122 *  Define what is required to specify how the network to host conversion
123 *  routines are handled.
124 */
125
126#define CPU_BIG_ENDIAN                           FALSE
127#define CPU_LITTLE_ENDIAN                        TRUE
128
129#define CPU_PER_CPU_CONTROL_SIZE 0
130
131/* structures */
132
133#ifndef ASM
134
135typedef struct {
136  /* There is no CPU specific per-CPU state */
137} CPU_Per_CPU_control;
138
139/*
140 *  Basic integer context for the i386 family.
141 */
142
143typedef struct {
144  uint32_t    eflags;   /* extended flags register                   */
145  void       *esp;      /* extended stack pointer register           */
146  void       *ebp;      /* extended base pointer register            */
147  uint32_t    ebx;      /* extended bx register                      */
148  uint32_t    esi;      /* extended source index register            */
149  uint32_t    edi;      /* extended destination index flags register */
150}   Context_Control;
151
152#define _CPU_Context_Get_SP( _context ) \
153  (_context)->esp
154
155/*
156 *  FP context save area for the i387 numeric coprocessors.
157 */
158#ifdef __SSE__
159/* All FPU and SSE registers are volatile; hence, as long
160 * as we are within normally executing C code (including
161 * a task switch) there is no need for saving/restoring
162 * any of those registers.
163 * We must save/restore the full FPU/SSE context across
164 * interrupts and exceptions, however:
165 *   -  after ISR execution a _Thread_Dispatch() may happen
166 *      and it is therefore necessary to save the FPU/SSE
167 *      registers to be restored when control is returned
168 *      to the interrupted task.
169 *   -  gcc may implicitly use FPU/SSE instructions in
170 *      an ISR.
171 *
172 * Even though there is no explicit mentioning of the FPU
173 * control word in the SYSV ABI (i386) being non-volatile
174 * we maintain MXCSR and the FPU control-word for each task.
175 */
176typedef struct {
177        uint32_t  mxcsr;
178        uint16_t  fpucw;
179} Context_Control_fp;
180
181#else
182
183typedef struct {
184  uint8_t     fp_save_area[108];    /* context size area for I80387 */
185                                    /*  28 bytes for environment    */
186} Context_Control_fp;
187
188#endif
189
190
191/*
192 *  The following structure defines the set of information saved
193 *  on the current stack by RTEMS upon receipt of execptions.
194 *
195 * idtIndex is either the interrupt number or the trap/exception number.
196 * faultCode is the code pushed by the processor on some exceptions.
197 *
198 * Since the first registers are directly pushed by the CPU they
199 * may not respect 16-byte stack alignment, which is, however,
200 * mandatory for the SSE register area.
201 * Therefore, these registers are stored at an aligned address
202 * and a pointer is stored in the CPU_Exception_frame.
203 * If the executive was compiled without SSE support then
204 * this pointer is NULL.
205 */
206
207struct Context_Control_sse;
208
209typedef struct {
210  struct Context_Control_sse *fp_ctxt;
211  uint32_t    edi;
212  uint32_t    esi;
213  uint32_t    ebp;
214  uint32_t    esp0;
215  uint32_t    ebx;
216  uint32_t    edx;
217  uint32_t    ecx;
218  uint32_t    eax;
219  uint32_t    idtIndex;
220  uint32_t    faultCode;
221  uint32_t    eip;
222  uint32_t    cs;
223  uint32_t    eflags;
224} CPU_Exception_frame;
225
226#ifdef __SSE__
227typedef struct Context_Control_sse {
228  uint16_t  fcw;
229  uint16_t  fsw;
230  uint8_t   ftw;
231  uint8_t   res_1;
232  uint16_t  fop;
233  uint32_t  fpu_ip;
234  uint16_t  cs;
235  uint16_t  res_2;
236  uint32_t  fpu_dp;
237  uint16_t  ds;
238  uint16_t  res_3;
239  uint32_t  mxcsr;
240  uint32_t  mxcsr_mask;
241  struct {
242        uint8_t fpreg[10];
243        uint8_t res_4[ 6];
244  } fp_mmregs[8];
245  uint8_t   xmmregs[8][16];
246  uint8_t   res_5[224];
247} Context_Control_sse
248__attribute__((aligned(16)))
249;
250#endif
251
252typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
253extern cpuExcHandlerType _currentExcHandler;
254extern void rtems_exception_init_mngt(void);
255
256/*
257 * This port does not pass any frame info to the
258 * interrupt handler.
259 */
260
261typedef void CPU_Interrupt_frame;
262
263typedef enum {
264  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
265  I386_EXCEPTION_DEBUG               = 1,
266  I386_EXCEPTION_NMI                 = 2,
267  I386_EXCEPTION_BREAKPOINT          = 3,
268  I386_EXCEPTION_OVERFLOW            = 4,
269  I386_EXCEPTION_BOUND               = 5,
270  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
271  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
272  I386_EXCEPTION_DOUBLE_FAULT        = 8,
273  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
274  I386_EXCEPTION_INVALID_TSS         = 10,
275  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
276  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
277  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
278  I386_EXCEPTION_PAGE_FAULT          = 14,
279  I386_EXCEPTION_INTEL_RES15         = 15,
280  I386_EXCEPTION_FLOAT_ERROR         = 16,
281  I386_EXCEPTION_ALIGN_CHECK         = 17,
282  I386_EXCEPTION_MACHINE_CHECK       = 18,
283  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
284
285} Intel_symbolic_exception_name;
286
287
288/*
289 *  context size area for floating point
290 *
291 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
292 */
293
294#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
295
296/* variables */
297
298SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
299
300#endif /* ASM */
301
302/* constants */
303
304/*
305 *  This defines the number of levels and the mask used to pick those
306 *  bits out of a thread mode.
307 */
308
309#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
310#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
311
312/*
313 *  extra stack required by the MPCI receive server thread
314 */
315
316#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
317
318/*
319 *  i386 family supports 256 distinct vectors.
320 */
321
322#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
323#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
324
325/*
326 *  This is defined if the port has a special way to report the ISR nesting
327 *  level.  Most ports maintain the variable _ISR_Nest_level.
328 */
329
330#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
331
332/*
333 *  Minimum size of a thread's stack.
334 */
335
336#define CPU_STACK_MINIMUM_SIZE          4096
337
338#define CPU_SIZEOF_POINTER 4
339
340/*
341 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
342 */
343
344#define CPU_ALIGNMENT                    4
345#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
346#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
347
348/*
349 *  On i386 thread stacks require no further alignment after allocation
350 *  from the Workspace. However, since gcc maintains 16-byte alignment
351 *  we try to respect that. If you find an option to let gcc squeeze
352 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
353 *  doesn't waste much space since this only determines the *initial*
354 *  alignment.
355 */
356
357#define CPU_STACK_ALIGNMENT             16
358
359/* macros */
360
361#ifndef ASM
362/*
363 *  ISR handler macros
364 *
365 *  These macros perform the following functions:
366 *     + initialize the RTEMS vector table
367 *     + disable all maskable CPU interrupts
368 *     + restore previous interrupt level (enable)
369 *     + temporarily restore interrupts (flash)
370 *     + set a particular level
371 */
372
373#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
374
375#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
376
377#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
378
379#define _CPU_ISR_Set_level( _new_level ) \
380  { \
381    if ( _new_level ) __asm__ volatile ( "cli" ); \
382    else              __asm__ volatile ( "sti" ); \
383  }
384
385uint32_t   _CPU_ISR_Get_level( void );
386
387/*  Make sure interrupt stack has space for ISR
388 *  'vector' arg at the top and that it is aligned
389 *  properly.
390 */
391
392#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
393        do {                                        \
394                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
395        } while (0)
396
397#endif /* ASM */
398
399/* end of ISR handler macros */
400
401/*
402 *  Context handler macros
403 *
404 *  These macros perform the following functions:
405 *     + initialize a context area
406 *     + restart the current thread
407 *     + calculate the initial pointer into a FP context area
408 *     + initialize an FP context area
409 */
410
411#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
412#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
413
414#ifndef ASM
415
416/*
417 * Stack alignment note:
418 *
419 * We want the stack to look to the '_entry_point' routine
420 * like an ordinary stack frame as if '_entry_point' was
421 * called from C-code.
422 * Note that '_entry_point' is jumped-to by the 'ret'
423 * instruction returning from _CPU_Context_switch() or
424 * _CPU_Context_restore() thus popping the _entry_point
425 * from the stack.
426 * However, _entry_point expects a frame to look like this:
427 *
428 *      args        [_Thread_Handler expects no args, however]
429 *      ------      (alignment boundary)
430 * SP-> return_addr return here when _entry_point returns which (never happens)
431 *
432 *
433 * Hence we must initialize the stack as follows
434 *
435 *         [arg1          ]:  n/a
436 *         [arg0 (aligned)]:  n/a
437 *         [ret. addr     ]:  NULL
438 * SP->    [jump-target   ]:  _entry_point
439 *
440 * When Context_switch returns it pops the _entry_point from
441 * the stack which then finds a standard layout.
442 */
443
444
445#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
446                                   _isr, _entry_point, _is_fp, _tls_area ) \
447  do { \
448    uint32_t   _stack; \
449    \
450    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
451    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
452    \
453    _stack  = ((uint32_t)(_stack_base)) + (_size); \
454        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
455    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
456    *((proc_ptr *)(_stack)) = (_entry_point); \
457    (_the_context)->ebp     = (void *) 0; \
458    (_the_context)->esp     = (void *) _stack; \
459  } while (0)
460
461#define _CPU_Context_Restart_self( _the_context ) \
462   _CPU_Context_restore( (_the_context) );
463
464#if defined(RTEMS_SMP)
465  uint32_t _CPU_SMP_Initialize( void );
466
467  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
468
469  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
470
471  uint32_t _CPU_SMP_Get_current_processor( void );
472
473  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
474
475  static inline void _CPU_SMP_Processor_event_broadcast( void )
476  {
477    __asm__ volatile ( "" : : : "memory" );
478  }
479
480  static inline void _CPU_SMP_Processor_event_receive( void )
481  {
482    __asm__ volatile ( "" : : : "memory" );
483  }
484#endif
485
486#define _CPU_Context_Fp_start( _base, _offset ) \
487   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
488
489#define _CPU_Context_Initialize_fp( _fp_area ) \
490  { \
491    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
492  }
493
494/* end of Context handler macros */
495
496/*
497 *  Fatal Error manager macros
498 *
499 *  These macros perform the following functions:
500 *    + disable interrupts and halt the CPU
501 */
502
503#define _CPU_Fatal_halt( _error ) \
504  { \
505    uint32_t _error_lvalue = ( _error ); \
506    __asm__ volatile ( "cli ; \
507                    movl %0,%%eax ; \
508                    hlt" \
509                    : "=r" ((_error_lvalue)) : "0" ((_error_lvalue)) \
510    ); \
511  }
512
513#endif /* ASM */
514
515/* end of Fatal Error manager macros */
516
517/*
518 *  Bitfield handler macros
519 *
520 *  These macros perform the following functions:
521 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
522 */
523
524#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
525#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
526
527#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
528  { \
529    register uint16_t   __value_in_register = (_value); \
530    \
531    _output = 0; \
532    \
533    __asm__ volatile ( "bsfw    %0,%1 " \
534                    : "=r" (__value_in_register), "=r" (_output) \
535                    : "0"  (__value_in_register), "1"  (_output) \
536    ); \
537  }
538
539/* end of Bitfield handler macros */
540
541/*
542 *  Priority handler macros
543 *
544 *  These macros perform the following functions:
545 *    + return a mask with the bit for this major/minor portion of
546 *      of thread priority set.
547 *    + translate the bit number returned by "Bitfield_find_first_bit"
548 *      into an index into the thread ready chain bit maps
549 */
550
551#define _CPU_Priority_Mask( _bit_number ) \
552  ( 1 << (_bit_number) )
553
554#define _CPU_Priority_bits_index( _priority ) \
555  (_priority)
556
557/* functions */
558
559#ifndef ASM
560/*
561 *  _CPU_Initialize
562 *
563 *  This routine performs CPU dependent initialization.
564 */
565
566void _CPU_Initialize(void);
567
568/*
569 *  _CPU_ISR_install_raw_handler
570 *
571 *  This routine installs a "raw" interrupt handler directly into the
572 *  processor's vector table.
573 */
574
575void _CPU_ISR_install_raw_handler(
576  uint32_t    vector,
577  proc_ptr    new_handler,
578  proc_ptr   *old_handler
579);
580
581/*
582 *  _CPU_ISR_install_vector
583 *
584 *  This routine installs an interrupt vector.
585 */
586
587void _CPU_ISR_install_vector(
588  uint32_t    vector,
589  proc_ptr    new_handler,
590  proc_ptr   *old_handler
591);
592
593/*
594 *  _CPU_Thread_Idle_body
595 *
596 *  Use the halt instruction of low power mode of a particular i386 model.
597 */
598
599#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
600
601void *_CPU_Thread_Idle_body( uintptr_t ignored );
602
603#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
604
605/*
606 *  _CPU_Context_switch
607 *
608 *  This routine switches from the run context to the heir context.
609 */
610
611void _CPU_Context_switch(
612  Context_Control  *run,
613  Context_Control  *heir
614);
615
616/*
617 *  _CPU_Context_restore
618 *
619 *  This routine is generally used only to restart self in an
620 *  efficient manner and avoid stack conflicts.
621 */
622
623void _CPU_Context_restore(
624  Context_Control *new_context
625) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
626
627/*
628 *  _CPU_Context_save_fp
629 *
630 *  This routine saves the floating point context passed to it.
631 */
632
633#ifdef __SSE__
634#define _CPU_Context_save_fp(fp_context_pp) \
635  do {                                      \
636    __asm__ __volatile__(                   \
637      "fstcw %0"                            \
638      :"=m"((*(fp_context_pp))->fpucw)      \
639    );                                      \
640        __asm__ __volatile__(                   \
641      "stmxcsr %0"                          \
642      :"=m"((*(fp_context_pp))->mxcsr)      \
643    );                                      \
644  } while (0)
645#else
646void _CPU_Context_save_fp(
647  Context_Control_fp **fp_context_ptr
648);
649#endif
650
651/*
652 *  _CPU_Context_restore_fp
653 *
654 *  This routine restores the floating point context passed to it.
655 */
656#ifdef __SSE__
657#define _CPU_Context_restore_fp(fp_context_pp) \
658  do {                                         \
659    __asm__ __volatile__(                      \
660      "fldcw %0"                               \
661      ::"m"((*(fp_context_pp))->fpucw)         \
662      :"fpcr"                                  \
663    );                                         \
664    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
665  } while (0)
666#else
667void _CPU_Context_restore_fp(
668  Context_Control_fp **fp_context_ptr
669);
670#endif
671
672#ifdef __SSE__
673#define _CPU_Context_Initialization_at_thread_begin() \
674  do {                                                \
675    __asm__ __volatile__(                             \
676      "finit"                                         \
677      :                                               \
678      :                                               \
679      :"st","st(1)","st(2)","st(3)",                  \
680       "st(4)","st(5)","st(6)","st(7)",               \
681       "fpsr","fpcr"                                  \
682    );                                                \
683        if ( _Thread_Executing->fp_context ) {            \
684          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
685   }                                                  \
686  } while (0)
687#endif
688
689static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
690{
691  /* TODO */
692}
693
694static inline void _CPU_Context_validate( uintptr_t pattern )
695{
696  while (1) {
697    /* TODO */
698  }
699}
700
701void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
702
703typedef uint32_t CPU_Counter_ticks;
704
705CPU_Counter_ticks _CPU_Counter_read( void );
706
707static inline CPU_Counter_ticks _CPU_Counter_difference(
708  CPU_Counter_ticks second,
709  CPU_Counter_ticks first
710)
711{
712  return second - first;
713}
714
715#endif /* ASM */
716
717#ifdef __cplusplus
718}
719#endif
720
721#endif
Note: See TracBrowser for help on using the repository browser.