source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ ca63ae2

4.115
Last change on this file since ca63ae2 was ca63ae2, checked in by Sebastian Huber <sebastian.huber@…>, on 06/13/13 at 13:41:21

smp: Add and use _CPU_SMP_Send_interrupt()

Delete bsp_smp_interrupt_cpu().

  • Property mode set to 100644
File size: 18.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.com/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  Does the CPU follow the simple vectored interrupt model?
45 *
46 *  If TRUE, then RTEMS allocates the vector table it internally manages.
47 *  If FALSE, then the BSP is assumed to allocate and manage the vector
48 *  table
49 *
50 *  PowerPC Specific Information:
51 *
52 *  The PowerPC and x86 were the first to use the PIC interrupt model.
53 *  They do not use the simple vectored interrupt model.
54 */
55#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
56
57/*
58 *  i386 has an RTEMS allocated and managed interrupt stack.
59 */
60
61#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
62#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
63#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
64
65/*
66 *  Does the RTEMS invoke the user's ISR with the vector number and
67 *  a pointer to the saved interrupt frame (1) or just the vector
68 *  number (0)?
69 */
70
71#define CPU_ISR_PASSES_FRAME_POINTER 0
72
73/*
74 *  Some family members have no FP, some have an FPU such as the i387
75 *  for the i386, others have it built in (i486DX, Pentium).
76 */
77
78#ifdef __SSE__
79#define CPU_HARDWARE_FP                  TRUE
80#define CPU_SOFTWARE_FP                  FALSE
81
82#define CPU_ALL_TASKS_ARE_FP             TRUE
83#define CPU_IDLE_TASK_IS_FP              TRUE
84#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
85#else /* __SSE__ */
86
87#if ( I386_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
89#else
90#define CPU_HARDWARE_FP     FALSE
91#endif
92#define CPU_SOFTWARE_FP     FALSE
93
94#define CPU_ALL_TASKS_ARE_FP             FALSE
95#define CPU_IDLE_TASK_IS_FP              FALSE
96#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
97#endif /* __SSE__ */
98
99#define CPU_STACK_GROWS_UP               FALSE
100#define CPU_STRUCTURE_ALIGNMENT
101
102#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
103
104/*
105 *  Does this port provide a CPU dependent IDLE task implementation?
106 *
107 *  If TRUE, then the routine _CPU_Thread_Idle_body
108 *  must be provided and is the default IDLE thread body instead of
109 *  _CPU_Thread_Idle_body.
110 *
111 *  If FALSE, then use the generic IDLE thread body if the BSP does
112 *  not provide one.
113 */
114
115#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
116
117/*
118 *  Define what is required to specify how the network to host conversion
119 *  routines are handled.
120 */
121
122#define CPU_BIG_ENDIAN                           FALSE
123#define CPU_LITTLE_ENDIAN                        TRUE
124
125/* structures */
126
127#ifndef ASM
128
129/*
130 *  Basic integer context for the i386 family.
131 */
132
133typedef struct {
134  uint32_t    eflags;   /* extended flags register                   */
135  void       *esp;      /* extended stack pointer register           */
136  void       *ebp;      /* extended base pointer register            */
137  uint32_t    ebx;      /* extended bx register                      */
138  uint32_t    esi;      /* extended source index register            */
139  uint32_t    edi;      /* extended destination index flags register */
140}   Context_Control;
141
142#define _CPU_Context_Get_SP( _context ) \
143  (_context)->esp
144
145/*
146 *  FP context save area for the i387 numeric coprocessors.
147 */
148#ifdef __SSE__
149/* All FPU and SSE registers are volatile; hence, as long
150 * as we are within normally executing C code (including
151 * a task switch) there is no need for saving/restoring
152 * any of those registers.
153 * We must save/restore the full FPU/SSE context across
154 * interrupts and exceptions, however:
155 *   -  after ISR execution a _Thread_Dispatch() may happen
156 *      and it is therefore necessary to save the FPU/SSE
157 *      registers to be restored when control is returned
158 *      to the interrupted task.
159 *   -  gcc may implicitly use FPU/SSE instructions in
160 *      an ISR.
161 *
162 * Even though there is no explicit mentioning of the FPU
163 * control word in the SYSV ABI (i386) being non-volatile
164 * we maintain MXCSR and the FPU control-word for each task.
165 */
166typedef struct {
167        uint32_t  mxcsr;
168        uint16_t  fpucw;
169} Context_Control_fp;
170
171#else
172
173typedef struct {
174  uint8_t     fp_save_area[108];    /* context size area for I80387 */
175                                    /*  28 bytes for environment    */
176} Context_Control_fp;
177
178#endif
179
180
181/*
182 *  The following structure defines the set of information saved
183 *  on the current stack by RTEMS upon receipt of execptions.
184 *
185 * idtIndex is either the interrupt number or the trap/exception number.
186 * faultCode is the code pushed by the processor on some exceptions.
187 *
188 * Since the first registers are directly pushed by the CPU they
189 * may not respect 16-byte stack alignment, which is, however,
190 * mandatory for the SSE register area.
191 * Therefore, these registers are stored at an aligned address
192 * and a pointer is stored in the CPU_Exception_frame.
193 * If the executive was compiled without SSE support then
194 * this pointer is NULL.
195 */
196
197struct Context_Control_sse;
198
199typedef struct {
200  struct Context_Control_sse *fp_ctxt;
201  uint32_t    edi;
202  uint32_t    esi;
203  uint32_t    ebp;
204  uint32_t    esp0;
205  uint32_t    ebx;
206  uint32_t    edx;
207  uint32_t    ecx;
208  uint32_t    eax;
209  uint32_t    idtIndex;
210  uint32_t    faultCode;
211  uint32_t    eip;
212  uint32_t    cs;
213  uint32_t    eflags;
214} CPU_Exception_frame;
215
216#ifdef __SSE__
217typedef struct Context_Control_sse {
218  uint16_t  fcw;
219  uint16_t  fsw;
220  uint8_t   ftw;
221  uint8_t   res_1;
222  uint16_t  fop;
223  uint32_t  fpu_ip;
224  uint16_t  cs;
225  uint16_t  res_2;
226  uint32_t  fpu_dp;
227  uint16_t  ds;
228  uint16_t  res_3;
229  uint32_t  mxcsr;
230  uint32_t  mxcsr_mask;
231  struct {
232        uint8_t fpreg[10];
233        uint8_t res_4[ 6];
234  } fp_mmregs[8];
235  uint8_t   xmmregs[8][16];
236  uint8_t   res_5[224];
237} Context_Control_sse
238__attribute__((aligned(16)))
239;
240#endif
241
242typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
243extern cpuExcHandlerType _currentExcHandler;
244extern void rtems_exception_init_mngt(void);
245
246/*
247 * This port does not pass any frame info to the
248 * interrupt handler.
249 */
250
251typedef void CPU_Interrupt_frame;
252
253typedef enum {
254  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
255  I386_EXCEPTION_DEBUG               = 1,
256  I386_EXCEPTION_NMI                 = 2,
257  I386_EXCEPTION_BREAKPOINT          = 3,
258  I386_EXCEPTION_OVERFLOW            = 4,
259  I386_EXCEPTION_BOUND               = 5,
260  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
261  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
262  I386_EXCEPTION_DOUBLE_FAULT        = 8,
263  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
264  I386_EXCEPTION_INVALID_TSS         = 10,
265  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
266  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
267  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
268  I386_EXCEPTION_PAGE_FAULT          = 14,
269  I386_EXCEPTION_INTEL_RES15         = 15,
270  I386_EXCEPTION_FLOAT_ERROR         = 16,
271  I386_EXCEPTION_ALIGN_CHECK         = 17,
272  I386_EXCEPTION_MACHINE_CHECK       = 18,
273  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
274
275} Intel_symbolic_exception_name;
276
277
278/*
279 *  context size area for floating point
280 *
281 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
282 */
283
284#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
285
286/* variables */
287
288SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
289
290#endif /* ASM */
291
292/* constants */
293
294/*
295 *  This defines the number of levels and the mask used to pick those
296 *  bits out of a thread mode.
297 */
298
299#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
300#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
301
302/*
303 *  extra stack required by the MPCI receive server thread
304 */
305
306#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
307
308/*
309 *  i386 family supports 256 distinct vectors.
310 */
311
312#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
313#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
314
315/*
316 *  This is defined if the port has a special way to report the ISR nesting
317 *  level.  Most ports maintain the variable _ISR_Nest_level.
318 */
319
320#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
321
322/*
323 *  Minimum size of a thread's stack.
324 */
325
326#define CPU_STACK_MINIMUM_SIZE          4096
327
328#define CPU_SIZEOF_POINTER 4
329
330/*
331 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
332 */
333
334#define CPU_ALIGNMENT                    4
335#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
336#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
337
338/*
339 *  On i386 thread stacks require no further alignment after allocation
340 *  from the Workspace. However, since gcc maintains 16-byte alignment
341 *  we try to respect that. If you find an option to let gcc squeeze
342 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
343 *  doesn't waste much space since this only determines the *initial*
344 *  alignment.
345 */
346
347#define CPU_STACK_ALIGNMENT             16
348
349/* macros */
350
351#ifndef ASM
352/*
353 *  ISR handler macros
354 *
355 *  These macros perform the following functions:
356 *     + initialize the RTEMS vector table
357 *     + disable all maskable CPU interrupts
358 *     + restore previous interrupt level (enable)
359 *     + temporarily restore interrupts (flash)
360 *     + set a particular level
361 */
362
363#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
364
365#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
366
367#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
368
369#define _CPU_ISR_Set_level( _new_level ) \
370  { \
371    if ( _new_level ) __asm__ volatile ( "cli" ); \
372    else              __asm__ volatile ( "sti" ); \
373  }
374
375uint32_t   _CPU_ISR_Get_level( void );
376
377/*  Make sure interrupt stack has space for ISR
378 *  'vector' arg at the top and that it is aligned
379 *  properly.
380 */
381
382#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
383        do {                                        \
384                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
385        } while (0)
386
387#endif /* ASM */
388
389/* end of ISR handler macros */
390
391/*
392 *  Context handler macros
393 *
394 *  These macros perform the following functions:
395 *     + initialize a context area
396 *     + restart the current thread
397 *     + calculate the initial pointer into a FP context area
398 *     + initialize an FP context area
399 */
400
401#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
402#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
403
404#ifndef ASM
405
406/*
407 * Stack alignment note:
408 *
409 * We want the stack to look to the '_entry_point' routine
410 * like an ordinary stack frame as if '_entry_point' was
411 * called from C-code.
412 * Note that '_entry_point' is jumped-to by the 'ret'
413 * instruction returning from _CPU_Context_switch() or
414 * _CPU_Context_restore() thus popping the _entry_point
415 * from the stack.
416 * However, _entry_point expects a frame to look like this:
417 *
418 *      args        [_Thread_Handler expects no args, however]
419 *      ------      (alignment boundary)
420 * SP-> return_addr return here when _entry_point returns which (never happens)
421 *
422 *
423 * Hence we must initialize the stack as follows
424 *
425 *         [arg1          ]:  n/a
426 *         [arg0 (aligned)]:  n/a
427 *         [ret. addr     ]:  NULL
428 * SP->    [jump-target   ]:  _entry_point
429 *
430 * When Context_switch returns it pops the _entry_point from
431 * the stack which then finds a standard layout.
432 */
433
434
435#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
436                                   _isr, _entry_point, _is_fp ) \
437  do { \
438    uint32_t   _stack; \
439    \
440    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
441    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
442    \
443    _stack  = ((uint32_t)(_stack_base)) + (_size); \
444        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
445    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
446    *((proc_ptr *)(_stack)) = (_entry_point); \
447    (_the_context)->ebp     = (void *) 0; \
448    (_the_context)->esp     = (void *) _stack; \
449  } while (0)
450
451#define _CPU_Context_Restart_self( _the_context ) \
452   _CPU_Context_restore( (_the_context) );
453
454#if defined(RTEMS_SMP)
455  #define _CPU_Context_switch_to_first_task_smp( _the_context ) \
456     _CPU_Context_restore( (_the_context) );
457
458  RTEMS_COMPILER_PURE_ATTRIBUTE uint32_t _CPU_SMP_Get_current_processor( void );
459
460  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
461
462  static inline void _CPU_Processor_event_broadcast( void )
463  {
464    __asm__ volatile ( "" : : : "memory" );
465  }
466
467  static inline void _CPU_Processor_event_receive( void )
468  {
469    __asm__ volatile ( "" : : : "memory" );
470  }
471#endif
472
473#define _CPU_Context_Fp_start( _base, _offset ) \
474   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
475
476#define _CPU_Context_Initialize_fp( _fp_area ) \
477  { \
478    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
479  }
480
481/* end of Context handler macros */
482
483/*
484 *  Fatal Error manager macros
485 *
486 *  These macros perform the following functions:
487 *    + disable interrupts and halt the CPU
488 */
489
490#define _CPU_Fatal_halt( _error ) \
491  { \
492    __asm__ volatile ( "cli ; \
493                    movl %0,%%eax ; \
494                    hlt" \
495                    : "=r" ((_error)) : "0" ((_error)) \
496    ); \
497  }
498
499#endif /* ASM */
500
501/* end of Fatal Error manager macros */
502
503/*
504 *  Bitfield handler macros
505 *
506 *  These macros perform the following functions:
507 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
508 */
509
510#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
511#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
512
513#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
514  { \
515    register uint16_t   __value_in_register = (_value); \
516    \
517    _output = 0; \
518    \
519    __asm__ volatile ( "bsfw    %0,%1 " \
520                    : "=r" (__value_in_register), "=r" (_output) \
521                    : "0"  (__value_in_register), "1"  (_output) \
522    ); \
523  }
524
525/* end of Bitfield handler macros */
526
527/*
528 *  Priority handler macros
529 *
530 *  These macros perform the following functions:
531 *    + return a mask with the bit for this major/minor portion of
532 *      of thread priority set.
533 *    + translate the bit number returned by "Bitfield_find_first_bit"
534 *      into an index into the thread ready chain bit maps
535 */
536
537#define _CPU_Priority_Mask( _bit_number ) \
538  ( 1 << (_bit_number) )
539
540#define _CPU_Priority_bits_index( _priority ) \
541  (_priority)
542
543/* functions */
544
545#ifndef ASM
546/*
547 *  _CPU_Initialize
548 *
549 *  This routine performs CPU dependent initialization.
550 */
551
552void _CPU_Initialize(void);
553
554/*
555 *  _CPU_ISR_install_raw_handler
556 *
557 *  This routine installs a "raw" interrupt handler directly into the
558 *  processor's vector table.
559 */
560
561void _CPU_ISR_install_raw_handler(
562  uint32_t    vector,
563  proc_ptr    new_handler,
564  proc_ptr   *old_handler
565);
566
567/*
568 *  _CPU_ISR_install_vector
569 *
570 *  This routine installs an interrupt vector.
571 */
572
573void _CPU_ISR_install_vector(
574  uint32_t    vector,
575  proc_ptr    new_handler,
576  proc_ptr   *old_handler
577);
578
579/*
580 *  _CPU_Thread_Idle_body
581 *
582 *  Use the halt instruction of low power mode of a particular i386 model.
583 */
584
585#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
586
587void *_CPU_Thread_Idle_body( uintptr_t ignored );
588
589#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
590
591/*
592 *  _CPU_Context_switch
593 *
594 *  This routine switches from the run context to the heir context.
595 */
596
597void _CPU_Context_switch(
598  Context_Control  *run,
599  Context_Control  *heir
600);
601
602/*
603 *  _CPU_Context_restore
604 *
605 *  This routine is generally used only to restart self in an
606 *  efficient manner and avoid stack conflicts.
607 */
608
609void _CPU_Context_restore(
610  Context_Control *new_context
611) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
612
613/*
614 *  _CPU_Context_save_fp
615 *
616 *  This routine saves the floating point context passed to it.
617 */
618
619#ifdef __SSE__
620#define _CPU_Context_save_fp(fp_context_pp) \
621  do {                                      \
622    __asm__ __volatile__(                   \
623      "fstcw %0"                            \
624      :"=m"((*(fp_context_pp))->fpucw)      \
625    );                                      \
626        __asm__ __volatile__(                   \
627      "stmxcsr %0"                          \
628      :"=m"((*(fp_context_pp))->mxcsr)      \
629    );                                      \
630  } while (0)
631#else
632void _CPU_Context_save_fp(
633  Context_Control_fp **fp_context_ptr
634);
635#endif
636
637/*
638 *  _CPU_Context_restore_fp
639 *
640 *  This routine restores the floating point context passed to it.
641 */
642#ifdef __SSE__
643#define _CPU_Context_restore_fp(fp_context_pp) \
644  do {                                         \
645    __asm__ __volatile__(                      \
646      "fldcw %0"                               \
647      ::"m"((*(fp_context_pp))->fpucw)         \
648      :"fpcr"                                  \
649    );                                         \
650    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
651  } while (0)
652#else
653void _CPU_Context_restore_fp(
654  Context_Control_fp **fp_context_ptr
655);
656#endif
657
658#ifdef __SSE__
659#define _CPU_Context_Initialization_at_thread_begin() \
660  do {                                                \
661    __asm__ __volatile__(                             \
662      "finit"                                         \
663      :                                               \
664      :                                               \
665      :"st","st(1)","st(2)","st(3)",                  \
666       "st(4)","st(5)","st(6)","st(7)",               \
667       "fpsr","fpcr"                                  \
668    );                                                \
669        if ( _Thread_Executing->fp_context ) {            \
670          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
671   }                                                  \
672  } while (0)
673#endif
674
675static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
676{
677  /* TODO */
678}
679
680static inline void _CPU_Context_validate( uintptr_t pattern )
681{
682  while (1) {
683    /* TODO */
684  }
685}
686
687void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
688
689#endif /* ASM */
690
691#ifdef __cplusplus
692}
693#endif
694
695#endif
Note: See TracBrowser for help on using the repository browser.