source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ 38b59a6

4.115
Last change on this file since 38b59a6 was 38b59a6, checked in by Sebastian Huber <sebastian.huber@…>, on 05/02/14 at 08:31:09

score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.

  • Property mode set to 100644
File size: 19.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  Does the CPU follow the simple vectored interrupt model?
45 *
46 *  If TRUE, then RTEMS allocates the vector table it internally manages.
47 *  If FALSE, then the BSP is assumed to allocate and manage the vector
48 *  table
49 *
50 *  PowerPC Specific Information:
51 *
52 *  The PowerPC and x86 were the first to use the PIC interrupt model.
53 *  They do not use the simple vectored interrupt model.
54 */
55#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
56
57/*
58 *  i386 has an RTEMS allocated and managed interrupt stack.
59 */
60
61#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
62#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
63#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
64
65/*
66 *  Does the RTEMS invoke the user's ISR with the vector number and
67 *  a pointer to the saved interrupt frame (1) or just the vector
68 *  number (0)?
69 */
70
71#define CPU_ISR_PASSES_FRAME_POINTER 0
72
73/*
74 *  Some family members have no FP, some have an FPU such as the i387
75 *  for the i386, others have it built in (i486DX, Pentium).
76 */
77
78#ifdef __SSE__
79#define CPU_HARDWARE_FP                  TRUE
80#define CPU_SOFTWARE_FP                  FALSE
81
82#define CPU_ALL_TASKS_ARE_FP             TRUE
83#define CPU_IDLE_TASK_IS_FP              TRUE
84#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
85#else /* __SSE__ */
86
87#if ( I386_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
89#else
90#define CPU_HARDWARE_FP     FALSE
91#endif
92#define CPU_SOFTWARE_FP     FALSE
93
94#define CPU_ALL_TASKS_ARE_FP             FALSE
95#define CPU_IDLE_TASK_IS_FP              FALSE
96#if defined(RTEMS_SMP)
97  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
98#else
99  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
100#endif
101#endif /* __SSE__ */
102
103#define CPU_STACK_GROWS_UP               FALSE
104#define CPU_STRUCTURE_ALIGNMENT
105
106#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
107
108/*
109 *  Does this port provide a CPU dependent IDLE task implementation?
110 *
111 *  If TRUE, then the routine _CPU_Thread_Idle_body
112 *  must be provided and is the default IDLE thread body instead of
113 *  _CPU_Thread_Idle_body.
114 *
115 *  If FALSE, then use the generic IDLE thread body if the BSP does
116 *  not provide one.
117 */
118
119#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
120
121/*
122 *  Define what is required to specify how the network to host conversion
123 *  routines are handled.
124 */
125
126#define CPU_BIG_ENDIAN                           FALSE
127#define CPU_LITTLE_ENDIAN                        TRUE
128
129#define CPU_PER_CPU_CONTROL_SIZE 0
130
131#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
132#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
133#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
134#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
135#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
136#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
137
138#ifdef RTEMS_SMP
139  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
140#endif
141
142/* structures */
143
144#ifndef ASM
145
146typedef struct {
147  /* There is no CPU specific per-CPU state */
148} CPU_Per_CPU_control;
149
150/*
151 *  Basic integer context for the i386 family.
152 */
153
154typedef struct {
155  uint32_t    eflags;   /* extended flags register                   */
156  void       *esp;      /* extended stack pointer register           */
157  void       *ebp;      /* extended base pointer register            */
158  uint32_t    ebx;      /* extended bx register                      */
159  uint32_t    esi;      /* extended source index register            */
160  uint32_t    edi;      /* extended destination index flags register */
161#ifdef RTEMS_SMP
162  volatile bool is_executing;
163#endif
164}   Context_Control;
165
166#define _CPU_Context_Get_SP( _context ) \
167  (_context)->esp
168
169#ifdef RTEMS_SMP
170  #define _CPU_Context_Get_is_executing( _context ) \
171    (_context)->is_executing
172#endif
173
174/*
175 *  FP context save area for the i387 numeric coprocessors.
176 */
177#ifdef __SSE__
178/* All FPU and SSE registers are volatile; hence, as long
179 * as we are within normally executing C code (including
180 * a task switch) there is no need for saving/restoring
181 * any of those registers.
182 * We must save/restore the full FPU/SSE context across
183 * interrupts and exceptions, however:
184 *   -  after ISR execution a _Thread_Dispatch() may happen
185 *      and it is therefore necessary to save the FPU/SSE
186 *      registers to be restored when control is returned
187 *      to the interrupted task.
188 *   -  gcc may implicitly use FPU/SSE instructions in
189 *      an ISR.
190 *
191 * Even though there is no explicit mentioning of the FPU
192 * control word in the SYSV ABI (i386) being non-volatile
193 * we maintain MXCSR and the FPU control-word for each task.
194 */
195typedef struct {
196        uint32_t  mxcsr;
197        uint16_t  fpucw;
198} Context_Control_fp;
199
200#else
201
202typedef struct {
203  uint8_t     fp_save_area[108];    /* context size area for I80387 */
204                                    /*  28 bytes for environment    */
205} Context_Control_fp;
206
207#endif
208
209
210/*
211 *  The following structure defines the set of information saved
212 *  on the current stack by RTEMS upon receipt of execptions.
213 *
214 * idtIndex is either the interrupt number or the trap/exception number.
215 * faultCode is the code pushed by the processor on some exceptions.
216 *
217 * Since the first registers are directly pushed by the CPU they
218 * may not respect 16-byte stack alignment, which is, however,
219 * mandatory for the SSE register area.
220 * Therefore, these registers are stored at an aligned address
221 * and a pointer is stored in the CPU_Exception_frame.
222 * If the executive was compiled without SSE support then
223 * this pointer is NULL.
224 */
225
226struct Context_Control_sse;
227
228typedef struct {
229  struct Context_Control_sse *fp_ctxt;
230  uint32_t    edi;
231  uint32_t    esi;
232  uint32_t    ebp;
233  uint32_t    esp0;
234  uint32_t    ebx;
235  uint32_t    edx;
236  uint32_t    ecx;
237  uint32_t    eax;
238  uint32_t    idtIndex;
239  uint32_t    faultCode;
240  uint32_t    eip;
241  uint32_t    cs;
242  uint32_t    eflags;
243} CPU_Exception_frame;
244
245#ifdef __SSE__
246typedef struct Context_Control_sse {
247  uint16_t  fcw;
248  uint16_t  fsw;
249  uint8_t   ftw;
250  uint8_t   res_1;
251  uint16_t  fop;
252  uint32_t  fpu_ip;
253  uint16_t  cs;
254  uint16_t  res_2;
255  uint32_t  fpu_dp;
256  uint16_t  ds;
257  uint16_t  res_3;
258  uint32_t  mxcsr;
259  uint32_t  mxcsr_mask;
260  struct {
261        uint8_t fpreg[10];
262        uint8_t res_4[ 6];
263  } fp_mmregs[8];
264  uint8_t   xmmregs[8][16];
265  uint8_t   res_5[224];
266} Context_Control_sse
267__attribute__((aligned(16)))
268;
269#endif
270
271typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
272extern cpuExcHandlerType _currentExcHandler;
273extern void rtems_exception_init_mngt(void);
274
275/*
276 * This port does not pass any frame info to the
277 * interrupt handler.
278 */
279
280typedef void CPU_Interrupt_frame;
281
282typedef enum {
283  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
284  I386_EXCEPTION_DEBUG               = 1,
285  I386_EXCEPTION_NMI                 = 2,
286  I386_EXCEPTION_BREAKPOINT          = 3,
287  I386_EXCEPTION_OVERFLOW            = 4,
288  I386_EXCEPTION_BOUND               = 5,
289  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
290  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
291  I386_EXCEPTION_DOUBLE_FAULT        = 8,
292  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
293  I386_EXCEPTION_INVALID_TSS         = 10,
294  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
295  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
296  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
297  I386_EXCEPTION_PAGE_FAULT          = 14,
298  I386_EXCEPTION_INTEL_RES15         = 15,
299  I386_EXCEPTION_FLOAT_ERROR         = 16,
300  I386_EXCEPTION_ALIGN_CHECK         = 17,
301  I386_EXCEPTION_MACHINE_CHECK       = 18,
302  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
303
304} Intel_symbolic_exception_name;
305
306
307/*
308 *  context size area for floating point
309 *
310 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
311 */
312
313#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
314
315/* variables */
316
317SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
318
319#endif /* ASM */
320
321/* constants */
322
323/*
324 *  This defines the number of levels and the mask used to pick those
325 *  bits out of a thread mode.
326 */
327
328#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
329#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
330
331/*
332 *  extra stack required by the MPCI receive server thread
333 */
334
335#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
336
337/*
338 *  This is defined if the port has a special way to report the ISR nesting
339 *  level.  Most ports maintain the variable _ISR_Nest_level.
340 */
341
342#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
343
344/*
345 *  Minimum size of a thread's stack.
346 */
347
348#define CPU_STACK_MINIMUM_SIZE          4096
349
350#define CPU_SIZEOF_POINTER 4
351
352/*
353 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
354 */
355
356#define CPU_ALIGNMENT                    4
357#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
358#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
359
360/*
361 *  On i386 thread stacks require no further alignment after allocation
362 *  from the Workspace. However, since gcc maintains 16-byte alignment
363 *  we try to respect that. If you find an option to let gcc squeeze
364 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
365 *  doesn't waste much space since this only determines the *initial*
366 *  alignment.
367 */
368
369#define CPU_STACK_ALIGNMENT             16
370
371/* macros */
372
373#ifndef ASM
374/*
375 *  ISR handler macros
376 *
377 *  These macros perform the following functions:
378 *     + initialize the RTEMS vector table
379 *     + disable all maskable CPU interrupts
380 *     + restore previous interrupt level (enable)
381 *     + temporarily restore interrupts (flash)
382 *     + set a particular level
383 */
384
385#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
386
387#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
388
389#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
390
391#define _CPU_ISR_Set_level( _new_level ) \
392  { \
393    if ( _new_level ) __asm__ volatile ( "cli" ); \
394    else              __asm__ volatile ( "sti" ); \
395  }
396
397uint32_t   _CPU_ISR_Get_level( void );
398
399/*  Make sure interrupt stack has space for ISR
400 *  'vector' arg at the top and that it is aligned
401 *  properly.
402 */
403
404#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
405        do {                                        \
406                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
407        } while (0)
408
409#endif /* ASM */
410
411/* end of ISR handler macros */
412
413/*
414 *  Context handler macros
415 *
416 *  These macros perform the following functions:
417 *     + initialize a context area
418 *     + restart the current thread
419 *     + calculate the initial pointer into a FP context area
420 *     + initialize an FP context area
421 */
422
423#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
424#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
425
426#ifndef ASM
427
428/*
429 * Stack alignment note:
430 *
431 * We want the stack to look to the '_entry_point' routine
432 * like an ordinary stack frame as if '_entry_point' was
433 * called from C-code.
434 * Note that '_entry_point' is jumped-to by the 'ret'
435 * instruction returning from _CPU_Context_switch() or
436 * _CPU_Context_restore() thus popping the _entry_point
437 * from the stack.
438 * However, _entry_point expects a frame to look like this:
439 *
440 *      args        [_Thread_Handler expects no args, however]
441 *      ------      (alignment boundary)
442 * SP-> return_addr return here when _entry_point returns which (never happens)
443 *
444 *
445 * Hence we must initialize the stack as follows
446 *
447 *         [arg1          ]:  n/a
448 *         [arg0 (aligned)]:  n/a
449 *         [ret. addr     ]:  NULL
450 * SP->    [jump-target   ]:  _entry_point
451 *
452 * When Context_switch returns it pops the _entry_point from
453 * the stack which then finds a standard layout.
454 */
455
456
457#ifdef RTEMS_SMP
458  #define _I386_Context_Initialize_is_executing( _the_context ) \
459    (_the_context)->is_executing = false
460#else
461  #define _I386_Context_Initialize_is_executing( _the_context )
462#endif
463
464#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
465                                   _isr, _entry_point, _is_fp, _tls_area ) \
466  do { \
467    uint32_t   _stack; \
468    \
469    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
470    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
471    \
472    _stack  = ((uint32_t)(_stack_base)) + (_size); \
473        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
474    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
475    *((proc_ptr *)(_stack)) = (_entry_point); \
476    (_the_context)->ebp     = (void *) 0; \
477    (_the_context)->esp     = (void *) _stack; \
478    _I386_Context_Initialize_is_executing( _the_context ); \
479  } while (0)
480
481#define _CPU_Context_Restart_self( _the_context ) \
482   _CPU_Context_restore( (_the_context) );
483
484#if defined(RTEMS_SMP)
485  uint32_t _CPU_SMP_Initialize( void );
486
487  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
488
489  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
490
491  uint32_t _CPU_SMP_Get_current_processor( void );
492
493  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
494
495  static inline void _CPU_SMP_Processor_event_broadcast( void )
496  {
497    __asm__ volatile ( "" : : : "memory" );
498  }
499
500  static inline void _CPU_SMP_Processor_event_receive( void )
501  {
502    __asm__ volatile ( "" : : : "memory" );
503  }
504#endif
505
506#define _CPU_Context_Fp_start( _base, _offset ) \
507   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
508
509#define _CPU_Context_Initialize_fp( _fp_area ) \
510  { \
511    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
512  }
513
514/* end of Context handler macros */
515
516/*
517 *  Fatal Error manager macros
518 *
519 *  These macros perform the following functions:
520 *    + disable interrupts and halt the CPU
521 */
522
523#define _CPU_Fatal_halt( _error ) \
524  { \
525    uint32_t _error_lvalue = ( _error ); \
526    __asm__ volatile ( "cli ; \
527                    movl %0,%%eax ; \
528                    hlt" \
529                    : "=r" ((_error_lvalue)) : "0" ((_error_lvalue)) \
530    ); \
531  }
532
533#endif /* ASM */
534
535/* end of Fatal Error manager macros */
536
537/*
538 *  Bitfield handler macros
539 *
540 *  These macros perform the following functions:
541 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
542 */
543
544#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
545#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
546
547#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
548  { \
549    register uint16_t   __value_in_register = (_value); \
550    \
551    _output = 0; \
552    \
553    __asm__ volatile ( "bsfw    %0,%1 " \
554                    : "=r" (__value_in_register), "=r" (_output) \
555                    : "0"  (__value_in_register), "1"  (_output) \
556    ); \
557  }
558
559/* end of Bitfield handler macros */
560
561/*
562 *  Priority handler macros
563 *
564 *  These macros perform the following functions:
565 *    + return a mask with the bit for this major/minor portion of
566 *      of thread priority set.
567 *    + translate the bit number returned by "Bitfield_find_first_bit"
568 *      into an index into the thread ready chain bit maps
569 */
570
571#define _CPU_Priority_Mask( _bit_number ) \
572  ( 1 << (_bit_number) )
573
574#define _CPU_Priority_bits_index( _priority ) \
575  (_priority)
576
577/* functions */
578
579#ifndef ASM
580/*
581 *  _CPU_Initialize
582 *
583 *  This routine performs CPU dependent initialization.
584 */
585
586void _CPU_Initialize(void);
587
588/*
589 *  _CPU_ISR_install_raw_handler
590 *
591 *  This routine installs a "raw" interrupt handler directly into the
592 *  processor's vector table.
593 */
594
595void _CPU_ISR_install_raw_handler(
596  uint32_t    vector,
597  proc_ptr    new_handler,
598  proc_ptr   *old_handler
599);
600
601/*
602 *  _CPU_ISR_install_vector
603 *
604 *  This routine installs an interrupt vector.
605 */
606
607void _CPU_ISR_install_vector(
608  uint32_t    vector,
609  proc_ptr    new_handler,
610  proc_ptr   *old_handler
611);
612
613/*
614 *  _CPU_Thread_Idle_body
615 *
616 *  Use the halt instruction of low power mode of a particular i386 model.
617 */
618
619#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
620
621void *_CPU_Thread_Idle_body( uintptr_t ignored );
622
623#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
624
625/*
626 *  _CPU_Context_switch
627 *
628 *  This routine switches from the run context to the heir context.
629 */
630
631void _CPU_Context_switch(
632  Context_Control  *run,
633  Context_Control  *heir
634);
635
636/*
637 *  _CPU_Context_restore
638 *
639 *  This routine is generally used only to restart self in an
640 *  efficient manner and avoid stack conflicts.
641 */
642
643void _CPU_Context_restore(
644  Context_Control *new_context
645) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
646
647/*
648 *  _CPU_Context_save_fp
649 *
650 *  This routine saves the floating point context passed to it.
651 */
652
653#ifdef __SSE__
654#define _CPU_Context_save_fp(fp_context_pp) \
655  do {                                      \
656    __asm__ __volatile__(                   \
657      "fstcw %0"                            \
658      :"=m"((*(fp_context_pp))->fpucw)      \
659    );                                      \
660        __asm__ __volatile__(                   \
661      "stmxcsr %0"                          \
662      :"=m"((*(fp_context_pp))->mxcsr)      \
663    );                                      \
664  } while (0)
665#else
666void _CPU_Context_save_fp(
667  Context_Control_fp **fp_context_ptr
668);
669#endif
670
671/*
672 *  _CPU_Context_restore_fp
673 *
674 *  This routine restores the floating point context passed to it.
675 */
676#ifdef __SSE__
677#define _CPU_Context_restore_fp(fp_context_pp) \
678  do {                                         \
679    __asm__ __volatile__(                      \
680      "fldcw %0"                               \
681      ::"m"((*(fp_context_pp))->fpucw)         \
682      :"fpcr"                                  \
683    );                                         \
684    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
685  } while (0)
686#else
687void _CPU_Context_restore_fp(
688  Context_Control_fp **fp_context_ptr
689);
690#endif
691
692#ifdef __SSE__
693#define _CPU_Context_Initialization_at_thread_begin() \
694  do {                                                \
695    __asm__ __volatile__(                             \
696      "finit"                                         \
697      :                                               \
698      :                                               \
699      :"st","st(1)","st(2)","st(3)",                  \
700       "st(4)","st(5)","st(6)","st(7)",               \
701       "fpsr","fpcr"                                  \
702    );                                                \
703        if ( _Thread_Executing->fp_context ) {            \
704          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
705   }                                                  \
706  } while (0)
707#endif
708
709static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
710{
711  /* TODO */
712}
713
714static inline void _CPU_Context_validate( uintptr_t pattern )
715{
716  while (1) {
717    /* TODO */
718  }
719}
720
721void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
722
723typedef uint32_t CPU_Counter_ticks;
724
725CPU_Counter_ticks _CPU_Counter_read( void );
726
727static inline CPU_Counter_ticks _CPU_Counter_difference(
728  CPU_Counter_ticks second,
729  CPU_Counter_ticks first
730)
731{
732  return second - first;
733}
734
735#endif /* ASM */
736
737#ifdef __cplusplus
738}
739#endif
740
741#endif
Note: See TracBrowser for help on using the repository browser.