source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ 815994f

4.115
Last change on this file since 815994f was 815994f, checked in by Sebastian Huber <sebastian.huber@…>, on 11/25/12 at 16:48:11

score: Add CPU_Exception_frame

Add CPU port type CPU_Exception_frame and function
_CPU_Exception_frame_print().

The CPU ports of avr, bfin, h8300, lm32, m32c, m32r, m68k, nios2, sh,
sparc64, and v850 use an empty default implementation of
_CPU_Exception_frame_print().

Add rtems_exception_frame and rtems_exception_frame_print().

Add RTEMS_FATAL_SOURCE_EXCEPTION for CPU exceptions. Use rtems_fatal()
with source RTEMS_FATAL_SOURCE_EXCEPTION in CPU ports of i386, powerpc,
and sparc for unexpected exceptions.

Add third parameter to RTEMS_BSP_CLEANUP_OPTIONS() which controls the
BSP_PRINT_EXCEPTION_CONTEXT define used in the default
bsp_fatal_extension().

Add test sptests/spfatal26.

  • Property mode set to 100644
File size: 17.8 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains information pertaining to the Intel
7 *  i386 processor.
8 *
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 */
16
17#ifndef _RTEMS_SCORE_CPU_H
18#define _RTEMS_SCORE_CPU_H
19
20#ifndef ASM
21#include <string.h> /* for memcpy */
22#endif
23
24#ifdef __cplusplus
25extern "C" {
26#endif
27
28#include <rtems/score/types.h>
29#include <rtems/score/i386.h>
30
31#ifndef ASM
32#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
33#include <rtems/score/registers.h>      /* formerly part of libcpu */
34#endif
35
36/* conditional compilation parameters */
37
38#define CPU_INLINE_ENABLE_DISPATCH       TRUE
39#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
40
41/*
42 *  Does the CPU follow the simple vectored interrupt model?
43 *
44 *  If TRUE, then RTEMS allocates the vector table it internally manages.
45 *  If FALSE, then the BSP is assumed to allocate and manage the vector
46 *  table
47 *
48 *  PowerPC Specific Information:
49 *
50 *  The PowerPC and x86 were the first to use the PIC interrupt model.
51 *  They do not use the simple vectored interrupt model.
52 */
53#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
54
55/*
56 *  i386 has an RTEMS allocated and managed interrupt stack.
57 */
58
59#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
60#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
61#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
62
63/*
64 *  Does the RTEMS invoke the user's ISR with the vector number and
65 *  a pointer to the saved interrupt frame (1) or just the vector
66 *  number (0)?
67 */
68
69#define CPU_ISR_PASSES_FRAME_POINTER 0
70
71/*
72 *  Some family members have no FP, some have an FPU such as the i387
73 *  for the i386, others have it built in (i486DX, Pentium).
74 */
75
76#ifdef __SSE__
77#define CPU_HARDWARE_FP                  TRUE
78#define CPU_SOFTWARE_FP                  FALSE
79
80#define CPU_ALL_TASKS_ARE_FP             TRUE
81#define CPU_IDLE_TASK_IS_FP              TRUE
82#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
83#else /* __SSE__ */
84
85#if ( I386_HAS_FPU == 1 )
86#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
87#else
88#define CPU_HARDWARE_FP     FALSE
89#endif
90#define CPU_SOFTWARE_FP     FALSE
91
92#define CPU_ALL_TASKS_ARE_FP             FALSE
93#define CPU_IDLE_TASK_IS_FP              FALSE
94#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
95#endif /* __SSE__ */
96
97#define CPU_STACK_GROWS_UP               FALSE
98#define CPU_STRUCTURE_ALIGNMENT
99
100#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
101
102/*
103 *  Does this port provide a CPU dependent IDLE task implementation?
104 *
105 *  If TRUE, then the routine _CPU_Thread_Idle_body
106 *  must be provided and is the default IDLE thread body instead of
107 *  _CPU_Thread_Idle_body.
108 *
109 *  If FALSE, then use the generic IDLE thread body if the BSP does
110 *  not provide one.
111 */
112
113#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
114
115/*
116 *  Define what is required to specify how the network to host conversion
117 *  routines are handled.
118 */
119
120#define CPU_BIG_ENDIAN                           FALSE
121#define CPU_LITTLE_ENDIAN                        TRUE
122
123/* structures */
124
125#ifndef ASM
126
127/*
128 *  Basic integer context for the i386 family.
129 */
130
131typedef struct {
132  uint32_t    eflags;   /* extended flags register                   */
133  void       *esp;      /* extended stack pointer register           */
134  void       *ebp;      /* extended base pointer register            */
135  uint32_t    ebx;      /* extended bx register                      */
136  uint32_t    esi;      /* extended source index register            */
137  uint32_t    edi;      /* extended destination index flags register */
138}   Context_Control;
139
140#define _CPU_Context_Get_SP( _context ) \
141  (_context)->esp
142
143/*
144 *  FP context save area for the i387 numeric coprocessors.
145 */
146#ifdef __SSE__
147/* All FPU and SSE registers are volatile; hence, as long
148 * as we are within normally executing C code (including
149 * a task switch) there is no need for saving/restoring
150 * any of those registers.
151 * We must save/restore the full FPU/SSE context across
152 * interrupts and exceptions, however:
153 *   -  after ISR execution a _Thread_Dispatch() may happen
154 *      and it is therefore necessary to save the FPU/SSE
155 *      registers to be restored when control is returned
156 *      to the interrupted task.
157 *   -  gcc may implicitly use FPU/SSE instructions in
158 *      an ISR.
159 *
160 * Even though there is no explicit mentioning of the FPU
161 * control word in the SYSV ABI (i386) being non-volatile
162 * we maintain MXCSR and the FPU control-word for each task.
163 */
164typedef struct {
165        uint32_t  mxcsr;
166        uint16_t  fpucw;
167} Context_Control_fp;
168
169#else
170
171typedef struct {
172  uint8_t     fp_save_area[108];    /* context size area for I80387 */
173                                    /*  28 bytes for environment    */
174} Context_Control_fp;
175
176#endif
177
178
179/*
180 *  The following structure defines the set of information saved
181 *  on the current stack by RTEMS upon receipt of execptions.
182 *
183 * idtIndex is either the interrupt number or the trap/exception number.
184 * faultCode is the code pushed by the processor on some exceptions.
185 *
186 * Since the first registers are directly pushed by the CPU they
187 * may not respect 16-byte stack alignment, which is, however,
188 * mandatory for the SSE register area.
189 * Therefore, these registers are stored at an aligned address
190 * and a pointer is stored in the CPU_Exception_frame.
191 * If the executive was compiled without SSE support then
192 * this pointer is NULL.
193 */
194
195struct Context_Control_sse;
196
197typedef struct {
198  struct Context_Control_sse *fp_ctxt;
199  uint32_t    edi;
200  uint32_t    esi;
201  uint32_t    ebp;
202  uint32_t    esp0;
203  uint32_t    ebx;
204  uint32_t    edx;
205  uint32_t    ecx;
206  uint32_t    eax;
207  uint32_t    idtIndex;
208  uint32_t    faultCode;
209  uint32_t    eip;
210  uint32_t    cs;
211  uint32_t    eflags;
212} CPU_Exception_frame;
213
214#ifdef __SSE__
215typedef struct Context_Control_sse {
216  uint16_t  fcw;
217  uint16_t  fsw;
218  uint8_t   ftw;
219  uint8_t   res_1;
220  uint16_t  fop;
221  uint32_t  fpu_ip;
222  uint16_t  cs;
223  uint16_t  res_2;
224  uint32_t  fpu_dp;
225  uint16_t  ds;
226  uint16_t  res_3;
227  uint32_t  mxcsr;
228  uint32_t  mxcsr_mask;
229  struct {
230        uint8_t fpreg[10];
231        uint8_t res_4[ 6];
232  } fp_mmregs[8];
233  uint8_t   xmmregs[8][16];
234  uint8_t   res_5[224];
235} Context_Control_sse
236__attribute__((aligned(16)))
237;
238#endif
239
240typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
241extern cpuExcHandlerType _currentExcHandler;
242extern void rtems_exception_init_mngt(void);
243
244/*
245 * This port does not pass any frame info to the
246 * interrupt handler.
247 */
248
249typedef void CPU_Interrupt_frame;
250
251typedef enum {
252  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
253  I386_EXCEPTION_DEBUG               = 1,
254  I386_EXCEPTION_NMI                 = 2,
255  I386_EXCEPTION_BREAKPOINT          = 3,
256  I386_EXCEPTION_OVERFLOW            = 4,
257  I386_EXCEPTION_BOUND               = 5,
258  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
259  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
260  I386_EXCEPTION_DOUBLE_FAULT        = 8,
261  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
262  I386_EXCEPTION_INVALID_TSS         = 10,
263  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
264  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
265  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
266  I386_EXCEPTION_PAGE_FAULT          = 14,
267  I386_EXCEPTION_INTEL_RES15         = 15,
268  I386_EXCEPTION_FLOAT_ERROR         = 16,
269  I386_EXCEPTION_ALIGN_CHECK         = 17,
270  I386_EXCEPTION_MACHINE_CHECK       = 18,
271  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
272
273} Intel_symbolic_exception_name;
274
275
276/*
277 *  context size area for floating point
278 *
279 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
280 */
281
282#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
283
284/* variables */
285
286SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
287
288#endif /* ASM */
289
290/* constants */
291
292/*
293 *  This defines the number of levels and the mask used to pick those
294 *  bits out of a thread mode.
295 */
296
297#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
298#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
299
300/*
301 *  extra stack required by the MPCI receive server thread
302 */
303
304#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
305
306/*
307 *  i386 family supports 256 distinct vectors.
308 */
309
310#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
311#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
312
313/*
314 *  This is defined if the port has a special way to report the ISR nesting
315 *  level.  Most ports maintain the variable _ISR_Nest_level.
316 */
317
318#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
319
320/*
321 *  Minimum size of a thread's stack.
322 */
323
324#define CPU_STACK_MINIMUM_SIZE          4096
325
326#define CPU_SIZEOF_POINTER 4
327
328/*
329 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
330 */
331
332#define CPU_ALIGNMENT                    4
333#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
334#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
335
336/*
337 *  On i386 thread stacks require no further alignment after allocation
338 *  from the Workspace. However, since gcc maintains 16-byte alignment
339 *  we try to respect that. If you find an option to let gcc squeeze
340 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
341 *  doesn't waste much space since this only determines the *initial*
342 *  alignment.
343 */
344
345#define CPU_STACK_ALIGNMENT             16
346
347/* macros */
348
349#ifndef ASM
350/*
351 *  ISR handler macros
352 *
353 *  These macros perform the following functions:
354 *     + initialize the RTEMS vector table
355 *     + disable all maskable CPU interrupts
356 *     + restore previous interrupt level (enable)
357 *     + temporarily restore interrupts (flash)
358 *     + set a particular level
359 */
360
361#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
362
363#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
364
365#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
366
367#define _CPU_ISR_Set_level( _new_level ) \
368  { \
369    if ( _new_level ) __asm__ volatile ( "cli" ); \
370    else              __asm__ volatile ( "sti" ); \
371  }
372
373uint32_t   _CPU_ISR_Get_level( void );
374
375/*  Make sure interrupt stack has space for ISR
376 *  'vector' arg at the top and that it is aligned
377 *  properly.
378 */
379
380#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
381        do {                                        \
382                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
383        } while (0)
384
385#endif /* ASM */
386
387/* end of ISR handler macros */
388
389/*
390 *  Context handler macros
391 *
392 *  These macros perform the following functions:
393 *     + initialize a context area
394 *     + restart the current thread
395 *     + calculate the initial pointer into a FP context area
396 *     + initialize an FP context area
397 */
398
399#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
400#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
401
402#ifndef ASM
403
404/*
405 * Stack alignment note:
406 *
407 * We want the stack to look to the '_entry_point' routine
408 * like an ordinary stack frame as if '_entry_point' was
409 * called from C-code.
410 * Note that '_entry_point' is jumped-to by the 'ret'
411 * instruction returning from _CPU_Context_switch() or
412 * _CPU_Context_restore() thus popping the _entry_point
413 * from the stack.
414 * However, _entry_point expects a frame to look like this:
415 *
416 *      args        [_Thread_Handler expects no args, however]
417 *      ------      (alignment boundary)
418 * SP-> return_addr return here when _entry_point returns which (never happens)
419 *
420 *
421 * Hence we must initialize the stack as follows
422 *
423 *         [arg1          ]:  n/a
424 *         [arg0 (aligned)]:  n/a
425 *         [ret. addr     ]:  NULL
426 * SP->    [jump-target   ]:  _entry_point
427 *
428 * When Context_switch returns it pops the _entry_point from
429 * the stack which then finds a standard layout.
430 */
431
432
433#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
434                                   _isr, _entry_point, _is_fp ) \
435  do { \
436    uint32_t   _stack; \
437    \
438    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
439    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
440    \
441    _stack  = ((uint32_t)(_stack_base)) + (_size); \
442        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
443    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
444    *((proc_ptr *)(_stack)) = (_entry_point); \
445    (_the_context)->ebp     = (void *) 0; \
446    (_the_context)->esp     = (void *) _stack; \
447  } while (0)
448
449#define _CPU_Context_Restart_self( _the_context ) \
450   _CPU_Context_restore( (_the_context) );
451
452#if defined(RTEMS_SMP)
453  #define _CPU_Context_switch_to_first_task_smp( _the_context ) \
454     _CPU_Context_restore( (_the_context) );
455
456  /* address space 1 is uncacheable */
457  #define SMP_CPU_SWAP( _address, _value, _previous ) \
458    do { \
459      asm volatile("lock; xchgl %0, %1" : \
460        "+m" (*_address), "=a" (_previous) : \
461        "1" (_value) : \
462        "cc"); \
463    } while (0)
464#endif
465
466#define _CPU_Context_Fp_start( _base, _offset ) \
467   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
468
469#define _CPU_Context_Initialize_fp( _fp_area ) \
470  { \
471    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
472  }
473
474/* end of Context handler macros */
475
476/*
477 *  Fatal Error manager macros
478 *
479 *  These macros perform the following functions:
480 *    + disable interrupts and halt the CPU
481 */
482
483#define _CPU_Fatal_halt( _error ) \
484  { \
485    __asm__ volatile ( "cli ; \
486                    movl %0,%%eax ; \
487                    hlt" \
488                    : "=r" ((_error)) : "0" ((_error)) \
489    ); \
490  }
491
492#endif /* ASM */
493
494/* end of Fatal Error manager macros */
495
496/*
497 *  Bitfield handler macros
498 *
499 *  These macros perform the following functions:
500 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
501 */
502
503#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
504#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
505
506#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
507  { \
508    register uint16_t   __value_in_register = (_value); \
509    \
510    _output = 0; \
511    \
512    __asm__ volatile ( "bsfw    %0,%1 " \
513                    : "=r" (__value_in_register), "=r" (_output) \
514                    : "0"  (__value_in_register), "1"  (_output) \
515    ); \
516  }
517
518/* end of Bitfield handler macros */
519
520/*
521 *  Priority handler macros
522 *
523 *  These macros perform the following functions:
524 *    + return a mask with the bit for this major/minor portion of
525 *      of thread priority set.
526 *    + translate the bit number returned by "Bitfield_find_first_bit"
527 *      into an index into the thread ready chain bit maps
528 */
529
530#define _CPU_Priority_Mask( _bit_number ) \
531  ( 1 << (_bit_number) )
532
533#define _CPU_Priority_bits_index( _priority ) \
534  (_priority)
535
536/* functions */
537
538#ifndef ASM
539/*
540 *  _CPU_Initialize
541 *
542 *  This routine performs CPU dependent initialization.
543 */
544
545void _CPU_Initialize(void);
546
547/*
548 *  _CPU_ISR_install_raw_handler
549 *
550 *  This routine installs a "raw" interrupt handler directly into the
551 *  processor's vector table.
552 */
553
554void _CPU_ISR_install_raw_handler(
555  uint32_t    vector,
556  proc_ptr    new_handler,
557  proc_ptr   *old_handler
558);
559
560/*
561 *  _CPU_ISR_install_vector
562 *
563 *  This routine installs an interrupt vector.
564 */
565
566void _CPU_ISR_install_vector(
567  uint32_t    vector,
568  proc_ptr    new_handler,
569  proc_ptr   *old_handler
570);
571
572/*
573 *  _CPU_Thread_Idle_body
574 *
575 *  Use the halt instruction of low power mode of a particular i386 model.
576 */
577
578#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
579
580void *_CPU_Thread_Idle_body( uintptr_t ignored );
581
582#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
583
584/*
585 *  _CPU_Context_switch
586 *
587 *  This routine switches from the run context to the heir context.
588 */
589
590void _CPU_Context_switch(
591  Context_Control  *run,
592  Context_Control  *heir
593);
594
595/*
596 *  _CPU_Context_restore
597 *
598 *  This routine is generally used only to restart self in an
599 *  efficient manner and avoid stack conflicts.
600 */
601
602void _CPU_Context_restore(
603  Context_Control *new_context
604) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
605
606/*
607 *  _CPU_Context_save_fp
608 *
609 *  This routine saves the floating point context passed to it.
610 */
611
612#ifdef __SSE__
613#define _CPU_Context_save_fp(fp_context_pp) \
614  do {                                      \
615    __asm__ __volatile__(                   \
616      "fstcw %0"                            \
617      :"=m"((*(fp_context_pp))->fpucw)      \
618    );                                      \
619        __asm__ __volatile__(                   \
620      "stmxcsr %0"                          \
621      :"=m"((*(fp_context_pp))->mxcsr)      \
622    );                                      \
623  } while (0)
624#else
625void _CPU_Context_save_fp(
626  Context_Control_fp **fp_context_ptr
627);
628#endif
629
630/*
631 *  _CPU_Context_restore_fp
632 *
633 *  This routine restores the floating point context passed to it.
634 */
635#ifdef __SSE__
636#define _CPU_Context_restore_fp(fp_context_pp) \
637  do {                                         \
638    __asm__ __volatile__(                      \
639      "fldcw %0"                               \
640      ::"m"((*(fp_context_pp))->fpucw)         \
641      :"fpcr"                                  \
642    );                                         \
643    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
644  } while (0)
645#else
646void _CPU_Context_restore_fp(
647  Context_Control_fp **fp_context_ptr
648);
649#endif
650
651#ifdef __SSE__
652#define _CPU_Context_Initialization_at_thread_begin() \
653  do {                                                \
654    __asm__ __volatile__(                             \
655      "finit"                                         \
656      :                                               \
657      :                                               \
658      :"st","st(1)","st(2)","st(3)",                  \
659       "st(4)","st(5)","st(6)","st(7)",               \
660       "fpsr","fpcr"                                  \
661    );                                                \
662        if ( _Thread_Executing->fp_context ) {            \
663          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
664   }                                                  \
665  } while (0)
666#endif
667
668void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
669
670#endif /* ASM */
671
672#ifdef __cplusplus
673}
674#endif
675
676#endif
Note: See TracBrowser for help on using the repository browser.