source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ c05f6238

4.115
Last change on this file since c05f6238 was c05f6238, checked in by Ralf Corsepius <ralf.corsepius@…>, on 02/11/11 at 09:14:20

2011-02-11 Ralf Corsépius <ralf.corsepius@…>

  • cpu.c, sse_test.c, rtems/score/cpu.h, rtems/score/i386.h, rtems/score/interrupts.h: Use "asm" instead of "asm" for improved c99-compliance.
  • Property mode set to 100644
File size: 17.0 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *  This include file contains information pertaining to the Intel
7 *  i386 processor.
8 *
9 *  COPYRIGHT (c) 1989-2008.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.com/license/LICENSE.
15 *
16 *  $Id$
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  i386 has an RTEMS allocated and managed interrupt stack.
45 */
46
47#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
48#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
49#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
50
51/*
52 *  Does the RTEMS invoke the user's ISR with the vector number and
53 *  a pointer to the saved interrupt frame (1) or just the vector
54 *  number (0)?
55 */
56
57#define CPU_ISR_PASSES_FRAME_POINTER 0
58
59/*
60 *  Some family members have no FP, some have an FPU such as the i387
61 *  for the i386, others have it built in (i486DX, Pentium).
62 */
63
64#ifdef __SSE__
65#define CPU_HARDWARE_FP                  TRUE
66#define CPU_SOFTWARE_FP                  FALSE
67
68#define CPU_ALL_TASKS_ARE_FP             TRUE
69#define CPU_IDLE_TASK_IS_FP              TRUE
70#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
71#else /* __SSE__ */
72
73#if ( I386_HAS_FPU == 1 )
74#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
75#else
76#define CPU_HARDWARE_FP     FALSE
77#endif
78#define CPU_SOFTWARE_FP     FALSE
79
80#define CPU_ALL_TASKS_ARE_FP             FALSE
81#define CPU_IDLE_TASK_IS_FP              FALSE
82#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
83#endif /* __SSE__ */
84
85#define CPU_STACK_GROWS_UP               FALSE
86#define CPU_STRUCTURE_ALIGNMENT
87
88/*
89 *  Does this port provide a CPU dependent IDLE task implementation?
90 *
91 *  If TRUE, then the routine _CPU_Thread_Idle_body
92 *  must be provided and is the default IDLE thread body instead of
93 *  _CPU_Thread_Idle_body.
94 *
95 *  If FALSE, then use the generic IDLE thread body if the BSP does
96 *  not provide one.
97 */
98
99#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
100
101/*
102 *  Define what is required to specify how the network to host conversion
103 *  routines are handled.
104 */
105
106#define CPU_BIG_ENDIAN                           FALSE
107#define CPU_LITTLE_ENDIAN                        TRUE
108
109/* structures */
110
111#ifndef ASM
112
113/*
114 *  Basic integer context for the i386 family.
115 */
116
117typedef struct {
118  uint32_t    eflags;   /* extended flags register                   */
119  void       *esp;      /* extended stack pointer register           */
120  void       *ebp;      /* extended base pointer register            */
121  uint32_t    ebx;      /* extended bx register                      */
122  uint32_t    esi;      /* extended source index register            */
123  uint32_t    edi;      /* extended destination index flags register */
124}   Context_Control;
125
126#define _CPU_Context_Get_SP( _context ) \
127  (_context)->esp
128
129/*
130 *  FP context save area for the i387 numeric coprocessors.
131 */
132#ifdef __SSE__
133/* All FPU and SSE registers are volatile; hence, as long
134 * as we are within normally executing C code (including
135 * a task switch) there is no need for saving/restoring
136 * any of those registers.
137 * We must save/restore the full FPU/SSE context across
138 * interrupts and exceptions, however:
139 *   -  after ISR execution a _Thread_Dispatch() may happen
140 *      and it is therefore necessary to save the FPU/SSE
141 *      registers to be restored when control is returned
142 *      to the interrupted task.
143 *   -  gcc may implicitly use FPU/SSE instructions in
144 *      an ISR.
145 *
146 * Even though there is no explicit mentioning of the FPU
147 * control word in the SYSV ABI (i386) being non-volatile
148 * we maintain MXCSR and the FPU control-word for each task.
149 */
150typedef struct {
151        uint32_t  mxcsr;
152        uint16_t  fpucw;
153} Context_Control_fp;
154
155#else
156
157typedef struct {
158  uint8_t     fp_save_area[108];    /* context size area for I80387 */
159                                    /*  28 bytes for environment    */
160} Context_Control_fp;
161
162#endif
163
164
165/*
166 *  The following structure defines the set of information saved
167 *  on the current stack by RTEMS upon receipt of execptions.
168 *
169 * idtIndex is either the interrupt number or the trap/exception number.
170 * faultCode is the code pushed by the processor on some exceptions.
171 *
172 * Since the first registers are directly pushed by the CPU they
173 * may not respect 16-byte stack alignment, which is, however,
174 * mandatory for the SSE register area.
175 * Therefore, these registers are stored at an aligned address
176 * and a pointer is stored in the CPU_Exception_frame.
177 * If the executive was compiled without SSE support then
178 * this pointer is NULL.
179 */
180
181struct Context_Control_sse;
182
183typedef struct {
184  struct Context_Control_sse *fp_ctxt;
185  uint32_t    edi;
186  uint32_t    esi;
187  uint32_t    ebp;
188  uint32_t    esp0;
189  uint32_t    ebx;
190  uint32_t    edx;
191  uint32_t    ecx;
192  uint32_t    eax;
193  uint32_t    idtIndex;
194  uint32_t    faultCode;
195  uint32_t    eip;
196  uint32_t    cs;
197  uint32_t    eflags;
198} CPU_Exception_frame;
199
200#ifdef __SSE__
201typedef struct Context_Control_sse {
202  uint16_t  fcw;
203  uint16_t  fsw;
204  uint8_t   ftw;
205  uint8_t   res_1;
206  uint16_t  fop;
207  uint32_t  fpu_ip;
208  uint16_t  cs;
209  uint16_t  res_2;
210  uint32_t  fpu_dp;
211  uint16_t  ds;
212  uint16_t  res_3;
213  uint32_t  mxcsr;
214  uint32_t  mxcsr_mask;
215  struct {
216        uint8_t fpreg[10];
217        uint8_t res_4[ 6];
218  } fp_mmregs[8];
219  uint8_t   xmmregs[8][16];
220  uint8_t   res_5[224];
221} Context_Control_sse
222__attribute__((aligned(16)))
223;
224#endif
225
226typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
227extern cpuExcHandlerType _currentExcHandler;
228extern void rtems_exception_init_mngt(void);
229
230/*
231 * This port does not pass any frame info to the
232 * interrupt handler.
233 */
234
235typedef void CPU_Interrupt_frame;
236
237typedef enum {
238  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
239  I386_EXCEPTION_DEBUG               = 1,
240  I386_EXCEPTION_NMI                 = 2,
241  I386_EXCEPTION_BREAKPOINT          = 3,
242  I386_EXCEPTION_OVERFLOW            = 4,
243  I386_EXCEPTION_BOUND               = 5,
244  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
245  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
246  I386_EXCEPTION_DOUBLE_FAULT        = 8,
247  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
248  I386_EXCEPTION_INVALID_TSS         = 10,
249  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
250  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
251  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
252  I386_EXCEPTION_PAGE_FAULT          = 14,
253  I386_EXCEPTION_INTEL_RES15         = 15,
254  I386_EXCEPTION_FLOAT_ERROR         = 16,
255  I386_EXCEPTION_ALIGN_CHECK         = 17,
256  I386_EXCEPTION_MACHINE_CHECK       = 18,
257  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
258
259} Intel_symbolic_exception_name;
260
261
262/*
263 *  context size area for floating point
264 *
265 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
266 */
267
268#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
269
270/* variables */
271
272SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
273SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
274SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
275
276#endif /* ASM */
277
278/* constants */
279
280/*
281 *  This defines the number of levels and the mask used to pick those
282 *  bits out of a thread mode.
283 */
284
285#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
286#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
287
288/*
289 *  extra stack required by the MPCI receive server thread
290 */
291
292#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
293
294/*
295 *  i386 family supports 256 distinct vectors.
296 */
297
298#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
299#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
300
301/*
302 *  This is defined if the port has a special way to report the ISR nesting
303 *  level.  Most ports maintain the variable _ISR_Nest_level.
304 */
305
306#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
307
308/*
309 *  Minimum size of a thread's stack.
310 */
311
312#define CPU_STACK_MINIMUM_SIZE          4096
313
314/*
315 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
316 */
317
318#define CPU_ALIGNMENT                    4
319#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
320#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
321
322/*
323 *  On i386 thread stacks require no further alignment after allocation
324 *  from the Workspace. However, since gcc maintains 16-byte alignment
325 *  we try to respect that. If you find an option to let gcc squeeze
326 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
327 *  doesn't waste much space since this only determines the *initial*
328 *  alignment.
329 */
330
331#define CPU_STACK_ALIGNMENT             16
332
333/* macros */
334
335#ifndef ASM
336/*
337 *  ISR handler macros
338 *
339 *  These macros perform the following functions:
340 *     + initialize the RTEMS vector table
341 *     + disable all maskable CPU interrupts
342 *     + restore previous interrupt level (enable)
343 *     + temporarily restore interrupts (flash)
344 *     + set a particular level
345 */
346
347#define _CPU_Initialize_vectors()
348
349#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
350
351#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
352
353#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
354
355#define _CPU_ISR_Set_level( _new_level ) \
356  { \
357    if ( _new_level ) __asm__ volatile ( "cli" ); \
358    else              __asm__ volatile ( "sti" ); \
359  }
360
361uint32_t   _CPU_ISR_Get_level( void );
362
363/*  Make sure interrupt stack has space for ISR
364 *  'vector' arg at the top and that it is aligned
365 *  properly.
366 */
367
368#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
369        do {                                        \
370                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
371        } while (0)
372
373#endif /* ASM */
374
375/* end of ISR handler macros */
376
377/*
378 *  Context handler macros
379 *
380 *  These macros perform the following functions:
381 *     + initialize a context area
382 *     + restart the current thread
383 *     + calculate the initial pointer into a FP context area
384 *     + initialize an FP context area
385 */
386
387#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
388#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
389
390#ifndef ASM
391
392/*
393 * Stack alignment note:
394 *
395 * We want the stack to look to the '_entry_point' routine
396 * like an ordinary stack frame as if '_entry_point' was
397 * called from C-code.
398 * Note that '_entry_point' is jumped-to by the 'ret'
399 * instruction returning from _CPU_Context_switch() or
400 * _CPU_Context_restore() thus popping the _entry_point
401 * from the stack.
402 * However, _entry_point expects a frame to look like this:
403 *
404 *      args        [_Thread_Handler expects no args, however]
405 *      ------      (alignment boundary)
406 * SP-> return_addr return here when _entry_point returns which (never happens)
407 *
408 *
409 * Hence we must initialize the stack as follows
410 *
411 *         [arg1          ]:  n/a
412 *         [arg0 (aligned)]:  n/a
413 *         [ret. addr     ]:  NULL
414 * SP->    [jump-target   ]:  _entry_point
415 *
416 * When Context_switch returns it pops the _entry_point from
417 * the stack which then finds a standard layout.
418 */
419
420
421#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
422                                   _isr, _entry_point, _is_fp ) \
423  do { \
424    uint32_t   _stack; \
425    \
426    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
427    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
428    \
429    _stack  = ((uint32_t)(_stack_base)) + (_size); \
430        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
431    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
432    *((proc_ptr *)(_stack)) = (_entry_point); \
433    (_the_context)->ebp     = (void *) 0; \
434    (_the_context)->esp     = (void *) _stack; \
435  } while (0)
436
437#define _CPU_Context_Restart_self( _the_context ) \
438   _CPU_Context_restore( (_the_context) );
439
440#define _CPU_Context_Fp_start( _base, _offset ) \
441   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
442
443#define _CPU_Context_Initialize_fp( _fp_area ) \
444  { \
445    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
446  }
447
448/* end of Context handler macros */
449
450/*
451 *  Fatal Error manager macros
452 *
453 *  These macros perform the following functions:
454 *    + disable interrupts and halt the CPU
455 */
456
457#define _CPU_Fatal_halt( _error ) \
458  { \
459    __asm__ volatile ( "cli ; \
460                    movl %0,%%eax ; \
461                    hlt" \
462                    : "=r" ((_error)) : "0" ((_error)) \
463    ); \
464  }
465
466#endif /* ASM */
467
468/* end of Fatal Error manager macros */
469
470/*
471 *  Bitfield handler macros
472 *
473 *  These macros perform the following functions:
474 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
475 */
476
477#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
478#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
479
480#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
481  { \
482    register uint16_t   __value_in_register = (_value); \
483    \
484    _output = 0; \
485    \
486    __asm__ volatile ( "bsfw    %0,%1 " \
487                    : "=r" (__value_in_register), "=r" (_output) \
488                    : "0"  (__value_in_register), "1"  (_output) \
489    ); \
490  }
491
492/* end of Bitfield handler macros */
493
494/*
495 *  Priority handler macros
496 *
497 *  These macros perform the following functions:
498 *    + return a mask with the bit for this major/minor portion of
499 *      of thread priority set.
500 *    + translate the bit number returned by "Bitfield_find_first_bit"
501 *      into an index into the thread ready chain bit maps
502 */
503
504#define _CPU_Priority_Mask( _bit_number ) \
505  ( 1 << (_bit_number) )
506
507#define _CPU_Priority_bits_index( _priority ) \
508  (_priority)
509
510/* functions */
511
512#ifndef ASM
513/*
514 *  _CPU_Initialize
515 *
516 *  This routine performs CPU dependent initialization.
517 */
518
519void _CPU_Initialize(void);
520
521/*
522 *  _CPU_ISR_install_raw_handler
523 *
524 *  This routine installs a "raw" interrupt handler directly into the
525 *  processor's vector table.
526 */
527
528void _CPU_ISR_install_raw_handler(
529  uint32_t    vector,
530  proc_ptr    new_handler,
531  proc_ptr   *old_handler
532);
533
534/*
535 *  _CPU_ISR_install_vector
536 *
537 *  This routine installs an interrupt vector.
538 */
539
540void _CPU_ISR_install_vector(
541  uint32_t    vector,
542  proc_ptr    new_handler,
543  proc_ptr   *old_handler
544);
545
546/*
547 *  _CPU_Thread_Idle_body
548 *
549 *  Use the halt instruction of low power mode of a particular i386 model.
550 */
551
552#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
553
554void *_CPU_Thread_Idle_body( uintptr_t ignored );
555
556#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
557
558/*
559 *  _CPU_Context_switch
560 *
561 *  This routine switches from the run context to the heir context.
562 */
563
564void _CPU_Context_switch(
565  Context_Control  *run,
566  Context_Control  *heir
567);
568
569/*
570 *  _CPU_Context_restore
571 *
572 *  This routine is generally used only to restart self in an
573 *  efficient manner and avoid stack conflicts.
574 */
575
576void _CPU_Context_restore(
577  Context_Control *new_context
578) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
579
580/*
581 *  _CPU_Context_save_fp
582 *
583 *  This routine saves the floating point context passed to it.
584 */
585
586#ifdef __SSE__
587#define _CPU_Context_save_fp(fp_context_pp) \
588  do {                                      \
589    __asm__ __volatile__(                   \
590      "fstcw %0"                            \
591      :"=m"((*(fp_context_pp))->fpucw)      \
592    );                                      \
593        __asm__ __volatile__(                   \
594      "stmxcsr %0"                          \
595      :"=m"((*(fp_context_pp))->mxcsr)      \
596    );                                      \
597  } while (0)
598#else
599void _CPU_Context_save_fp(
600  Context_Control_fp **fp_context_ptr
601);
602#endif
603
604/*
605 *  _CPU_Context_restore_fp
606 *
607 *  This routine restores the floating point context passed to it.
608 */
609#ifdef __SSE__
610#define _CPU_Context_restore_fp(fp_context_pp) \
611  do {                                         \
612    __asm__ __volatile__(                      \
613      "fldcw %0"                               \
614      ::"m"((*(fp_context_pp))->fpucw)         \
615      :"fpcr"                                  \
616    );                                         \
617    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
618  } while (0)
619#else
620void _CPU_Context_restore_fp(
621  Context_Control_fp **fp_context_ptr
622);
623#endif
624
625#ifdef __SSE__
626#define _CPU_Context_Initialization_at_thread_begin() \
627  do {                                                \
628    __asm__ __volatile__(                             \
629      "finit"                                         \
630      :                                               \
631      :                                               \
632      :"st","st(1)","st(2)","st(3)",                  \
633       "st(4)","st(5)","st(6)","st(7)",               \
634       "fpsr","fpcr"                                  \
635    );                                                \
636        if ( _Thread_Executing->fp_context ) {            \
637          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
638   }                                                  \
639  } while (0)
640#endif
641
642#endif /* ASM */
643
644#ifdef __cplusplus
645}
646#endif
647
648#endif
Note: See TracBrowser for help on using the repository browser.