source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ dda25b1

5
Last change on this file since dda25b1 was dda25b1, checked in by Joel Sherrill <joel@…>, on 01/09/16 at 21:22:16

i386 ..score/cpu.h: Fix spacing

  • Property mode set to 100644
File size: 19.4 KB
RevLine 
[6d6891e]1/**
[d9e0006]2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
[6d6891e]8 */
9
10/*
[06dcaf0]11 *  COPYRIGHT (c) 1989-2011.
[7908ba5b]12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
[c499856]16 *  http://www.rtems.org/license/LICENSE.
[7908ba5b]17 */
18
[7f70d1b7]19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
[7908ba5b]21
[a6d48e3]22#ifndef ASM
[af063f6]23#include <string.h> /* for memcpy */
[a6d48e3]24#endif
[af063f6]25
[7908ba5b]26#ifdef __cplusplus
27extern "C" {
28#endif
29
[89b85e51]30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
[7908ba5b]32
33#ifndef ASM
[a324355]34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
[7908ba5b]36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41
[562cadfa]42/*
43 *  Does the CPU follow the simple vectored interrupt model?
44 *
45 *  If TRUE, then RTEMS allocates the vector table it internally manages.
46 *  If FALSE, then the BSP is assumed to allocate and manage the vector
47 *  table
48 *
49 *  PowerPC Specific Information:
50 *
51 *  The PowerPC and x86 were the first to use the PIC interrupt model.
52 *  They do not use the simple vectored interrupt model.
53 */
54#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
55
[7908ba5b]56/*
57 *  i386 has an RTEMS allocated and managed interrupt stack.
58 */
59
60#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
61#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
62#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
63
64/*
65 *  Does the RTEMS invoke the user's ISR with the vector number and
[84c53452]66 *  a pointer to the saved interrupt frame (1) or just the vector
[7908ba5b]67 *  number (0)?
68 */
69
70#define CPU_ISR_PASSES_FRAME_POINTER 0
71
72/*
73 *  Some family members have no FP, some have an FPU such as the i387
74 *  for the i386, others have it built in (i486DX, Pentium).
75 */
76
[b02f4cc1]77#ifdef __SSE__
78#define CPU_HARDWARE_FP                  TRUE
79#define CPU_SOFTWARE_FP                  FALSE
80
81#define CPU_ALL_TASKS_ARE_FP             TRUE
82#define CPU_IDLE_TASK_IS_FP              TRUE
83#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
84#else /* __SSE__ */
85
[7908ba5b]86#if ( I386_HAS_FPU == 1 )
87#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
88#else
89#define CPU_HARDWARE_FP     FALSE
90#endif
[17508d02]91#define CPU_SOFTWARE_FP     FALSE
[7908ba5b]92
93#define CPU_ALL_TASKS_ARE_FP             FALSE
94#define CPU_IDLE_TASK_IS_FP              FALSE
[965ef82]95#if defined(RTEMS_SMP)
96  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
97#else
98  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
99#endif
[b02f4cc1]100#endif /* __SSE__ */
[7908ba5b]101
102#define CPU_STACK_GROWS_UP               FALSE
[a8865f8]103
104/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
105#define CPU_CACHE_LINE_BYTES 64
106
[7908ba5b]107#define CPU_STRUCTURE_ALIGNMENT
108
109/*
110 *  Does this port provide a CPU dependent IDLE task implementation?
[84c53452]111 *
[7908ba5b]112 *  If TRUE, then the routine _CPU_Thread_Idle_body
113 *  must be provided and is the default IDLE thread body instead of
114 *  _CPU_Thread_Idle_body.
115 *
116 *  If FALSE, then use the generic IDLE thread body if the BSP does
117 *  not provide one.
118 */
[84c53452]119
[fd05a05]120#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
[7908ba5b]121
122/*
123 *  Define what is required to specify how the network to host conversion
124 *  routines are handled.
125 */
126
127#define CPU_BIG_ENDIAN                           FALSE
128#define CPU_LITTLE_ENDIAN                        TRUE
129
[10fd4aac]130#define CPU_PER_CPU_CONTROL_SIZE 0
131
[38b59a6]132#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
133#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
134#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
135#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
136#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
137#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
138
139#ifdef RTEMS_SMP
140  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
141#endif
142
[7908ba5b]143/* structures */
144
[a6d48e3]145#ifndef ASM
146
[10fd4aac]147typedef struct {
148  /* There is no CPU specific per-CPU state */
149} CPU_Per_CPU_control;
150
[7908ba5b]151/*
152 *  Basic integer context for the i386 family.
153 */
154
155typedef struct {
[e6aeabd]156  uint32_t    eflags;   /* extended flags register                   */
[7908ba5b]157  void       *esp;      /* extended stack pointer register           */
158  void       *ebp;      /* extended base pointer register            */
[e6aeabd]159  uint32_t    ebx;      /* extended bx register                      */
160  uint32_t    esi;      /* extended source index register            */
161  uint32_t    edi;      /* extended destination index flags register */
[38b59a6]162#ifdef RTEMS_SMP
163  volatile bool is_executing;
164#endif
[7908ba5b]165}   Context_Control;
166
[0ca6d0d9]167#define _CPU_Context_Get_SP( _context ) \
168  (_context)->esp
169
[38b59a6]170#ifdef RTEMS_SMP
[11b05f1]171  static inline bool _CPU_Context_Get_is_executing(
172    const Context_Control *context
173  )
174  {
175    return context->is_executing;
176  }
177
178  static inline void _CPU_Context_Set_is_executing(
179    Context_Control *context,
180    bool is_executing
181  )
182  {
183    context->is_executing = is_executing;
184  }
[38b59a6]185#endif
186
[7908ba5b]187/*
188 *  FP context save area for the i387 numeric coprocessors.
189 */
[b02f4cc1]190#ifdef __SSE__
191/* All FPU and SSE registers are volatile; hence, as long
192 * as we are within normally executing C code (including
193 * a task switch) there is no need for saving/restoring
194 * any of those registers.
195 * We must save/restore the full FPU/SSE context across
196 * interrupts and exceptions, however:
197 *   -  after ISR execution a _Thread_Dispatch() may happen
198 *      and it is therefore necessary to save the FPU/SSE
199 *      registers to be restored when control is returned
200 *      to the interrupted task.
201 *   -  gcc may implicitly use FPU/SSE instructions in
202 *      an ISR.
203 *
204 * Even though there is no explicit mentioning of the FPU
205 * control word in the SYSV ABI (i386) being non-volatile
206 * we maintain MXCSR and the FPU control-word for each task.
207 */
208typedef struct {
209        uint32_t  mxcsr;
210        uint16_t  fpucw;
211} Context_Control_fp;
212
213#else
[7908ba5b]214
215typedef struct {
[e6aeabd]216  uint8_t     fp_save_area[108];    /* context size area for I80387 */
[7908ba5b]217                                    /*  28 bytes for environment    */
218} Context_Control_fp;
219
[b02f4cc1]220#endif
221
[7908ba5b]222
223/*
224 *  The following structure defines the set of information saved
225 *  on the current stack by RTEMS upon receipt of execptions.
226 *
227 * idtIndex is either the interrupt number or the trap/exception number.
228 * faultCode is the code pushed by the processor on some exceptions.
[b02f4cc1]229 *
230 * Since the first registers are directly pushed by the CPU they
231 * may not respect 16-byte stack alignment, which is, however,
232 * mandatory for the SSE register area.
233 * Therefore, these registers are stored at an aligned address
234 * and a pointer is stored in the CPU_Exception_frame.
235 * If the executive was compiled without SSE support then
236 * this pointer is NULL.
[7908ba5b]237 */
238
[b02f4cc1]239struct Context_Control_sse;
240
[7908ba5b]241typedef struct {
[b02f4cc1]242  struct Context_Control_sse *fp_ctxt;
[e6aeabd]243  uint32_t    edi;
244  uint32_t    esi;
245  uint32_t    ebp;
246  uint32_t    esp0;
247  uint32_t    ebx;
248  uint32_t    edx;
249  uint32_t    ecx;
250  uint32_t    eax;
251  uint32_t    idtIndex;
252  uint32_t    faultCode;
253  uint32_t    eip;
254  uint32_t    cs;
255  uint32_t    eflags;
[7908ba5b]256} CPU_Exception_frame;
257
[b02f4cc1]258#ifdef __SSE__
259typedef struct Context_Control_sse {
260  uint16_t  fcw;
261  uint16_t  fsw;
262  uint8_t   ftw;
263  uint8_t   res_1;
264  uint16_t  fop;
265  uint32_t  fpu_ip;
266  uint16_t  cs;
267  uint16_t  res_2;
268  uint32_t  fpu_dp;
269  uint16_t  ds;
270  uint16_t  res_3;
271  uint32_t  mxcsr;
272  uint32_t  mxcsr_mask;
273  struct {
274        uint8_t fpreg[10];
275        uint8_t res_4[ 6];
276  } fp_mmregs[8];
277  uint8_t   xmmregs[8][16];
278  uint8_t   res_5[224];
279} Context_Control_sse
280__attribute__((aligned(16)))
281;
282#endif
283
[7908ba5b]284typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
285extern cpuExcHandlerType _currentExcHandler;
[1b502424]286extern void rtems_exception_init_mngt(void);
[7908ba5b]287
288/*
[78667e3]289 * This port does not pass any frame info to the
290 * interrupt handler.
[7908ba5b]291 */
292
[78667e3]293typedef void CPU_Interrupt_frame;
[7908ba5b]294
295typedef enum {
296  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
297  I386_EXCEPTION_DEBUG               = 1,
298  I386_EXCEPTION_NMI                 = 2,
299  I386_EXCEPTION_BREAKPOINT          = 3,
300  I386_EXCEPTION_OVERFLOW            = 4,
301  I386_EXCEPTION_BOUND               = 5,
302  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
303  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
304  I386_EXCEPTION_DOUBLE_FAULT        = 8,
305  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
306  I386_EXCEPTION_INVALID_TSS         = 10,
307  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
308  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
309  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
310  I386_EXCEPTION_PAGE_FAULT          = 14,
311  I386_EXCEPTION_INTEL_RES15         = 15,
312  I386_EXCEPTION_FLOAT_ERROR         = 16,
313  I386_EXCEPTION_ALIGN_CHECK         = 17,
314  I386_EXCEPTION_MACHINE_CHECK       = 18,
315  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
316
317} Intel_symbolic_exception_name;
[84c53452]318
[7908ba5b]319
320/*
321 *  context size area for floating point
322 *
323 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
324 */
325
326#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
327
328/* variables */
329
[deaf716]330extern Context_Control_fp _CPU_Null_fp_context;
[7908ba5b]331
[a6d48e3]332#endif /* ASM */
333
[7908ba5b]334/* constants */
335
336/*
337 *  This defines the number of levels and the mask used to pick those
338 *  bits out of a thread mode.
339 */
340
341#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
342#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
343
344/*
345 *  extra stack required by the MPCI receive server thread
346 */
347
348#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
349
[4db30283]350/*
351 *  This is defined if the port has a special way to report the ISR nesting
352 *  level.  Most ports maintain the variable _ISR_Nest_level.
353 */
354
355#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
356
[7908ba5b]357/*
358 *  Minimum size of a thread's stack.
359 */
360
[6952f3d]361#define CPU_STACK_MINIMUM_SIZE          4096
[7908ba5b]362
[f1738ed]363#define CPU_SIZEOF_POINTER 4
364
[7908ba5b]365/*
366 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
367 */
368
369#define CPU_ALIGNMENT                    4
370#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
371#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
372
373/*
374 *  On i386 thread stacks require no further alignment after allocation
[a6d48e3]375 *  from the Workspace. However, since gcc maintains 16-byte alignment
376 *  we try to respect that. If you find an option to let gcc squeeze
377 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
378 *  doesn't waste much space since this only determines the *initial*
379 *  alignment.
[7908ba5b]380 */
381
[a6d48e3]382#define CPU_STACK_ALIGNMENT             16
[7908ba5b]383
384/* macros */
385
[a6d48e3]386#ifndef ASM
[7908ba5b]387/*
388 *  ISR handler macros
389 *
390 *  These macros perform the following functions:
[effa6593]391 *     + initialize the RTEMS vector table
[7908ba5b]392 *     + disable all maskable CPU interrupts
393 *     + restore previous interrupt level (enable)
394 *     + temporarily restore interrupts (flash)
395 *     + set a particular level
396 */
397
[3267f95]398#if !defined(RTEMS_PARAVIRT)
[7908ba5b]399#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
400
401#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
402
403#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
404
405#define _CPU_ISR_Set_level( _new_level ) \
406  { \
[c05f6238]407    if ( _new_level ) __asm__ volatile ( "cli" ); \
408    else              __asm__ volatile ( "sti" ); \
[7908ba5b]409  }
[3267f95]410#else
[dda25b1]411#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
[3267f95]412#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
413#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
[dda25b1]414#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
[3267f95]415#endif
[7908ba5b]416
[e6aeabd]417uint32_t   _CPU_ISR_Get_level( void );
[7908ba5b]418
[42e243e]419/*  Make sure interrupt stack has space for ISR
[b01d7c7]420 *  'vector' arg at the top and that it is aligned
421 *  properly.
422 */
423
424#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
425        do {                                        \
426                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
427        } while (0)
428
[a6d48e3]429#endif /* ASM */
430
[7908ba5b]431/* end of ISR handler macros */
432
433/*
434 *  Context handler macros
435 *
436 *  These macros perform the following functions:
437 *     + initialize a context area
438 *     + restart the current thread
439 *     + calculate the initial pointer into a FP context area
440 *     + initialize an FP context area
441 */
442
443#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
444#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
445
[a6d48e3]446#ifndef ASM
447
448/*
449 * Stack alignment note:
[42e243e]450 *
[a6d48e3]451 * We want the stack to look to the '_entry_point' routine
452 * like an ordinary stack frame as if '_entry_point' was
453 * called from C-code.
454 * Note that '_entry_point' is jumped-to by the 'ret'
455 * instruction returning from _CPU_Context_switch() or
456 * _CPU_Context_restore() thus popping the _entry_point
457 * from the stack.
458 * However, _entry_point expects a frame to look like this:
459 *
460 *      args        [_Thread_Handler expects no args, however]
461 *      ------      (alignment boundary)
462 * SP-> return_addr return here when _entry_point returns which (never happens)
463 *
[42e243e]464 *
[a6d48e3]465 * Hence we must initialize the stack as follows
466 *
467 *         [arg1          ]:  n/a
468 *         [arg0 (aligned)]:  n/a
469 *         [ret. addr     ]:  NULL
470 * SP->    [jump-target   ]:  _entry_point
471 *
472 * When Context_switch returns it pops the _entry_point from
473 * the stack which then finds a standard layout.
474 */
475
476
[38b59a6]477
[7908ba5b]478#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
[022851a]479                                   _isr, _entry_point, _is_fp, _tls_area ) \
[7908ba5b]480  do { \
[e6aeabd]481    uint32_t   _stack; \
[7908ba5b]482    \
[6c8e0dc8]483    (void) _is_fp; /* avoid warning for being unused */ \
[7908ba5b]484    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
485    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
486    \
[a6d48e3]487    _stack  = ((uint32_t)(_stack_base)) + (_size); \
488        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
489    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
[7908ba5b]490    *((proc_ptr *)(_stack)) = (_entry_point); \
[020363d]491    (_the_context)->ebp     = (void *) 0; \
[7908ba5b]492    (_the_context)->esp     = (void *) _stack; \
493  } while (0)
494
495#define _CPU_Context_Restart_self( _the_context ) \
496   _CPU_Context_restore( (_the_context) );
497
[06dcaf0]498#if defined(RTEMS_SMP)
[53e008b]499  uint32_t _CPU_SMP_Initialize( void );
500
501  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
502
503  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
[4627fcd]504
[c34f94f7]505  void _CPU_SMP_Prepare_start_multitasking( void );
506
[47d60134]507  uint32_t _CPU_SMP_Get_current_processor( void );
[39e51758]508
[ca63ae2]509  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
510
[07f6e419]511  static inline void _CPU_SMP_Processor_event_broadcast( void )
[2f6108f9]512  {
513    __asm__ volatile ( "" : : : "memory" );
514  }
515
[f7740e97]516  static inline void _CPU_SMP_Processor_event_receive( void )
[2f6108f9]517  {
518    __asm__ volatile ( "" : : : "memory" );
519  }
[06dcaf0]520#endif
521
[7908ba5b]522#define _CPU_Context_Fp_start( _base, _offset ) \
523   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
524
525#define _CPU_Context_Initialize_fp( _fp_area ) \
526  { \
[af063f6]527    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
[7908ba5b]528  }
529
530/* end of Context handler macros */
531
532/*
533 *  Fatal Error manager macros
534 *
535 *  These macros perform the following functions:
536 *    + disable interrupts and halt the CPU
537 */
538
[505dc61]539extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
540  RTEMS_NO_RETURN;
[7908ba5b]541
[a6d48e3]542#endif /* ASM */
543
[7908ba5b]544/* end of Fatal Error manager macros */
545
546/*
547 *  Bitfield handler macros
548 *
549 *  These macros perform the following functions:
550 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
551 */
552
553#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
554#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
555
556#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
557  { \
[e6aeabd]558    register uint16_t   __value_in_register = (_value); \
[7908ba5b]559    \
560    _output = 0; \
561    \
[c05f6238]562    __asm__ volatile ( "bsfw    %0,%1 " \
[7908ba5b]563                    : "=r" (__value_in_register), "=r" (_output) \
564                    : "0"  (__value_in_register), "1"  (_output) \
565    ); \
566  }
567
568/* end of Bitfield handler macros */
569
570/*
571 *  Priority handler macros
572 *
573 *  These macros perform the following functions:
574 *    + return a mask with the bit for this major/minor portion of
575 *      of thread priority set.
576 *    + translate the bit number returned by "Bitfield_find_first_bit"
577 *      into an index into the thread ready chain bit maps
578 */
579
580#define _CPU_Priority_Mask( _bit_number ) \
581  ( 1 << (_bit_number) )
582
583#define _CPU_Priority_bits_index( _priority ) \
584  (_priority)
585
586/* functions */
587
[a6d48e3]588#ifndef ASM
[7908ba5b]589/*
590 *  _CPU_Initialize
591 *
592 *  This routine performs CPU dependent initialization.
593 */
594
[c03e2bc]595void _CPU_Initialize(void);
[7908ba5b]596
597/*
598 *  _CPU_ISR_install_raw_handler
599 *
[84c53452]600 *  This routine installs a "raw" interrupt handler directly into the
[7908ba5b]601 *  processor's vector table.
602 */
[84c53452]603
[7908ba5b]604void _CPU_ISR_install_raw_handler(
[e6aeabd]605  uint32_t    vector,
[7908ba5b]606  proc_ptr    new_handler,
607  proc_ptr   *old_handler
608);
609
610/*
611 *  _CPU_ISR_install_vector
612 *
613 *  This routine installs an interrupt vector.
614 */
615
616void _CPU_ISR_install_vector(
[e6aeabd]617  uint32_t    vector,
[7908ba5b]618  proc_ptr    new_handler,
619  proc_ptr   *old_handler
620);
621
622/*
623 *  _CPU_Thread_Idle_body
624 *
625 *  Use the halt instruction of low power mode of a particular i386 model.
626 */
627
628#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
629
[cca8379]630void *_CPU_Thread_Idle_body( uintptr_t ignored );
[7908ba5b]631
632#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
633
634/*
635 *  _CPU_Context_switch
636 *
637 *  This routine switches from the run context to the heir context.
638 */
639
640void _CPU_Context_switch(
641  Context_Control  *run,
642  Context_Control  *heir
643);
644
645/*
646 *  _CPU_Context_restore
647 *
648 *  This routine is generally used only to restart self in an
649 *  efficient manner and avoid stack conflicts.
650 */
651
652void _CPU_Context_restore(
653  Context_Control *new_context
[143696a]654) RTEMS_NO_RETURN;
[7908ba5b]655
656/*
657 *  _CPU_Context_save_fp
658 *
659 *  This routine saves the floating point context passed to it.
660 */
661
[b02f4cc1]662#ifdef __SSE__
663#define _CPU_Context_save_fp(fp_context_pp) \
664  do {                                      \
665    __asm__ __volatile__(                   \
666      "fstcw %0"                            \
667      :"=m"((*(fp_context_pp))->fpucw)      \
668    );                                      \
669        __asm__ __volatile__(                   \
670      "stmxcsr %0"                          \
671      :"=m"((*(fp_context_pp))->mxcsr)      \
672    );                                      \
673  } while (0)
674#else
[7908ba5b]675void _CPU_Context_save_fp(
[3c86f88]676  Context_Control_fp **fp_context_ptr
[7908ba5b]677);
[b02f4cc1]678#endif
[7908ba5b]679
680/*
681 *  _CPU_Context_restore_fp
682 *
683 *  This routine restores the floating point context passed to it.
684 */
[b02f4cc1]685#ifdef __SSE__
686#define _CPU_Context_restore_fp(fp_context_pp) \
687  do {                                         \
688    __asm__ __volatile__(                      \
689      "fldcw %0"                               \
690      ::"m"((*(fp_context_pp))->fpucw)         \
691      :"fpcr"                                  \
692    );                                         \
693    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
694  } while (0)
695#else
[7908ba5b]696void _CPU_Context_restore_fp(
[3c86f88]697  Context_Control_fp **fp_context_ptr
[7908ba5b]698);
[b02f4cc1]699#endif
700
701#ifdef __SSE__
702#define _CPU_Context_Initialization_at_thread_begin() \
703  do {                                                \
704    __asm__ __volatile__(                             \
705      "finit"                                         \
706      :                                               \
707      :                                               \
708      :"st","st(1)","st(2)","st(3)",                  \
709       "st(4)","st(5)","st(6)","st(7)",               \
710       "fpsr","fpcr"                                  \
711    );                                                \
712        if ( _Thread_Executing->fp_context ) {            \
713          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
714   }                                                  \
715  } while (0)
716#endif
[7908ba5b]717
[39993d6]718static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
719{
720  /* TODO */
721}
722
723static inline void _CPU_Context_validate( uintptr_t pattern )
724{
725  while (1) {
726    /* TODO */
727  }
728}
729
[815994f]730void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
731
[24bf11e]732typedef uint32_t CPU_Counter_ticks;
733
734CPU_Counter_ticks _CPU_Counter_read( void );
735
736static inline CPU_Counter_ticks _CPU_Counter_difference(
737  CPU_Counter_ticks second,
738  CPU_Counter_ticks first
739)
740{
741  return second - first;
742}
743
[a6d48e3]744#endif /* ASM */
745
[7908ba5b]746#ifdef __cplusplus
747}
748#endif
749
750#endif
Note: See TracBrowser for help on using the repository browser.