source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ 3148438

4.115
Last change on this file since 3148438 was 2f6108f9, checked in by Sebastian Huber <sebastian.huber@…>, on 05/28/13 at 08:58:19

smp: Simplify SMP initialization sequence

Delete bsp_smp_wait_for(). Other parts of the system work without
timeout, e.g. the spinlocks. Using a timeout here does not make the
system more robust.

Delete bsp_smp_cpu_state and replace it with Per_CPU_State. The
Per_CPU_State follows the Score naming conventions. Add
_Per_CPU_Change_state() and _Per_CPU_Wait_for_state() functions to
change and observe states.

Use Per_CPU_State in Per_CPU_Control instead of the anonymous integer.

Add _CPU_Processor_event_broadcast() and _CPU_Processor_event_receive()
functions provided by the CPU port. Use these functions in
_Per_CPU_Change_state() and _Per_CPU_Wait_for_state().

Add prototype for _SMP_Send_message().

Delete RTEMS_BSP_SMP_FIRST_TASK message. The first context switch is
now performed in rtems_smp_secondary_cpu_initialize(). Issuing the
first context switch in the context of the inter-processor interrupt is
not possible on systems with a modern interrupt controller. Such an
interrupt controler usually requires a handshake protocol with interrupt
acknowledge and end of interrupt signals. A direct context switch in an
interrupt handler circumvents the interrupt processing epilogue and may
leave the system in an inconsistent state.

Release lock in rtems_smp_process_interrupt() even if no message was
delivered. This prevents deadlock of the system.

Simplify and format _SMP_Send_message(),
_SMP_Request_other_cores_to_perform_first_context_switch(),
_SMP_Request_other_cores_to_dispatch() and
_SMP_Request_other_cores_to_shutdown().

  • Property mode set to 100644
File size: 18.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.com/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  Does the CPU follow the simple vectored interrupt model?
45 *
46 *  If TRUE, then RTEMS allocates the vector table it internally manages.
47 *  If FALSE, then the BSP is assumed to allocate and manage the vector
48 *  table
49 *
50 *  PowerPC Specific Information:
51 *
52 *  The PowerPC and x86 were the first to use the PIC interrupt model.
53 *  They do not use the simple vectored interrupt model.
54 */
55#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
56
57/*
58 *  i386 has an RTEMS allocated and managed interrupt stack.
59 */
60
61#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
62#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
63#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
64
65/*
66 *  Does the RTEMS invoke the user's ISR with the vector number and
67 *  a pointer to the saved interrupt frame (1) or just the vector
68 *  number (0)?
69 */
70
71#define CPU_ISR_PASSES_FRAME_POINTER 0
72
73/*
74 *  Some family members have no FP, some have an FPU such as the i387
75 *  for the i386, others have it built in (i486DX, Pentium).
76 */
77
78#ifdef __SSE__
79#define CPU_HARDWARE_FP                  TRUE
80#define CPU_SOFTWARE_FP                  FALSE
81
82#define CPU_ALL_TASKS_ARE_FP             TRUE
83#define CPU_IDLE_TASK_IS_FP              TRUE
84#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
85#else /* __SSE__ */
86
87#if ( I386_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
89#else
90#define CPU_HARDWARE_FP     FALSE
91#endif
92#define CPU_SOFTWARE_FP     FALSE
93
94#define CPU_ALL_TASKS_ARE_FP             FALSE
95#define CPU_IDLE_TASK_IS_FP              FALSE
96#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
97#endif /* __SSE__ */
98
99#define CPU_STACK_GROWS_UP               FALSE
100#define CPU_STRUCTURE_ALIGNMENT
101
102#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
103
104/*
105 *  Does this port provide a CPU dependent IDLE task implementation?
106 *
107 *  If TRUE, then the routine _CPU_Thread_Idle_body
108 *  must be provided and is the default IDLE thread body instead of
109 *  _CPU_Thread_Idle_body.
110 *
111 *  If FALSE, then use the generic IDLE thread body if the BSP does
112 *  not provide one.
113 */
114
115#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
116
117/*
118 *  Define what is required to specify how the network to host conversion
119 *  routines are handled.
120 */
121
122#define CPU_BIG_ENDIAN                           FALSE
123#define CPU_LITTLE_ENDIAN                        TRUE
124
125/* structures */
126
127#ifndef ASM
128
129/*
130 *  Basic integer context for the i386 family.
131 */
132
133typedef struct {
134  uint32_t    eflags;   /* extended flags register                   */
135  void       *esp;      /* extended stack pointer register           */
136  void       *ebp;      /* extended base pointer register            */
137  uint32_t    ebx;      /* extended bx register                      */
138  uint32_t    esi;      /* extended source index register            */
139  uint32_t    edi;      /* extended destination index flags register */
140}   Context_Control;
141
142#define _CPU_Context_Get_SP( _context ) \
143  (_context)->esp
144
145/*
146 *  FP context save area for the i387 numeric coprocessors.
147 */
148#ifdef __SSE__
149/* All FPU and SSE registers are volatile; hence, as long
150 * as we are within normally executing C code (including
151 * a task switch) there is no need for saving/restoring
152 * any of those registers.
153 * We must save/restore the full FPU/SSE context across
154 * interrupts and exceptions, however:
155 *   -  after ISR execution a _Thread_Dispatch() may happen
156 *      and it is therefore necessary to save the FPU/SSE
157 *      registers to be restored when control is returned
158 *      to the interrupted task.
159 *   -  gcc may implicitly use FPU/SSE instructions in
160 *      an ISR.
161 *
162 * Even though there is no explicit mentioning of the FPU
163 * control word in the SYSV ABI (i386) being non-volatile
164 * we maintain MXCSR and the FPU control-word for each task.
165 */
166typedef struct {
167        uint32_t  mxcsr;
168        uint16_t  fpucw;
169} Context_Control_fp;
170
171#else
172
173typedef struct {
174  uint8_t     fp_save_area[108];    /* context size area for I80387 */
175                                    /*  28 bytes for environment    */
176} Context_Control_fp;
177
178#endif
179
180
181/*
182 *  The following structure defines the set of information saved
183 *  on the current stack by RTEMS upon receipt of execptions.
184 *
185 * idtIndex is either the interrupt number or the trap/exception number.
186 * faultCode is the code pushed by the processor on some exceptions.
187 *
188 * Since the first registers are directly pushed by the CPU they
189 * may not respect 16-byte stack alignment, which is, however,
190 * mandatory for the SSE register area.
191 * Therefore, these registers are stored at an aligned address
192 * and a pointer is stored in the CPU_Exception_frame.
193 * If the executive was compiled without SSE support then
194 * this pointer is NULL.
195 */
196
197struct Context_Control_sse;
198
199typedef struct {
200  struct Context_Control_sse *fp_ctxt;
201  uint32_t    edi;
202  uint32_t    esi;
203  uint32_t    ebp;
204  uint32_t    esp0;
205  uint32_t    ebx;
206  uint32_t    edx;
207  uint32_t    ecx;
208  uint32_t    eax;
209  uint32_t    idtIndex;
210  uint32_t    faultCode;
211  uint32_t    eip;
212  uint32_t    cs;
213  uint32_t    eflags;
214} CPU_Exception_frame;
215
216#ifdef __SSE__
217typedef struct Context_Control_sse {
218  uint16_t  fcw;
219  uint16_t  fsw;
220  uint8_t   ftw;
221  uint8_t   res_1;
222  uint16_t  fop;
223  uint32_t  fpu_ip;
224  uint16_t  cs;
225  uint16_t  res_2;
226  uint32_t  fpu_dp;
227  uint16_t  ds;
228  uint16_t  res_3;
229  uint32_t  mxcsr;
230  uint32_t  mxcsr_mask;
231  struct {
232        uint8_t fpreg[10];
233        uint8_t res_4[ 6];
234  } fp_mmregs[8];
235  uint8_t   xmmregs[8][16];
236  uint8_t   res_5[224];
237} Context_Control_sse
238__attribute__((aligned(16)))
239;
240#endif
241
242typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
243extern cpuExcHandlerType _currentExcHandler;
244extern void rtems_exception_init_mngt(void);
245
246/*
247 * This port does not pass any frame info to the
248 * interrupt handler.
249 */
250
251typedef void CPU_Interrupt_frame;
252
253typedef enum {
254  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
255  I386_EXCEPTION_DEBUG               = 1,
256  I386_EXCEPTION_NMI                 = 2,
257  I386_EXCEPTION_BREAKPOINT          = 3,
258  I386_EXCEPTION_OVERFLOW            = 4,
259  I386_EXCEPTION_BOUND               = 5,
260  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
261  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
262  I386_EXCEPTION_DOUBLE_FAULT        = 8,
263  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
264  I386_EXCEPTION_INVALID_TSS         = 10,
265  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
266  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
267  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
268  I386_EXCEPTION_PAGE_FAULT          = 14,
269  I386_EXCEPTION_INTEL_RES15         = 15,
270  I386_EXCEPTION_FLOAT_ERROR         = 16,
271  I386_EXCEPTION_ALIGN_CHECK         = 17,
272  I386_EXCEPTION_MACHINE_CHECK       = 18,
273  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
274
275} Intel_symbolic_exception_name;
276
277
278/*
279 *  context size area for floating point
280 *
281 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
282 */
283
284#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
285
286/* variables */
287
288SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
289
290#endif /* ASM */
291
292/* constants */
293
294/*
295 *  This defines the number of levels and the mask used to pick those
296 *  bits out of a thread mode.
297 */
298
299#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
300#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
301
302/*
303 *  extra stack required by the MPCI receive server thread
304 */
305
306#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
307
308/*
309 *  i386 family supports 256 distinct vectors.
310 */
311
312#define CPU_INTERRUPT_NUMBER_OF_VECTORS      256
313#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
314
315/*
316 *  This is defined if the port has a special way to report the ISR nesting
317 *  level.  Most ports maintain the variable _ISR_Nest_level.
318 */
319
320#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
321
322/*
323 *  Minimum size of a thread's stack.
324 */
325
326#define CPU_STACK_MINIMUM_SIZE          4096
327
328#define CPU_SIZEOF_POINTER 4
329
330/*
331 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
332 */
333
334#define CPU_ALIGNMENT                    4
335#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
336#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
337
338/*
339 *  On i386 thread stacks require no further alignment after allocation
340 *  from the Workspace. However, since gcc maintains 16-byte alignment
341 *  we try to respect that. If you find an option to let gcc squeeze
342 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
343 *  doesn't waste much space since this only determines the *initial*
344 *  alignment.
345 */
346
347#define CPU_STACK_ALIGNMENT             16
348
349/* macros */
350
351#ifndef ASM
352/*
353 *  ISR handler macros
354 *
355 *  These macros perform the following functions:
356 *     + initialize the RTEMS vector table
357 *     + disable all maskable CPU interrupts
358 *     + restore previous interrupt level (enable)
359 *     + temporarily restore interrupts (flash)
360 *     + set a particular level
361 */
362
363#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
364
365#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
366
367#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
368
369#define _CPU_ISR_Set_level( _new_level ) \
370  { \
371    if ( _new_level ) __asm__ volatile ( "cli" ); \
372    else              __asm__ volatile ( "sti" ); \
373  }
374
375uint32_t   _CPU_ISR_Get_level( void );
376
377/*  Make sure interrupt stack has space for ISR
378 *  'vector' arg at the top and that it is aligned
379 *  properly.
380 */
381
382#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
383        do {                                        \
384                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
385        } while (0)
386
387#endif /* ASM */
388
389/* end of ISR handler macros */
390
391/*
392 *  Context handler macros
393 *
394 *  These macros perform the following functions:
395 *     + initialize a context area
396 *     + restart the current thread
397 *     + calculate the initial pointer into a FP context area
398 *     + initialize an FP context area
399 */
400
401#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
402#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
403
404#ifndef ASM
405
406/*
407 * Stack alignment note:
408 *
409 * We want the stack to look to the '_entry_point' routine
410 * like an ordinary stack frame as if '_entry_point' was
411 * called from C-code.
412 * Note that '_entry_point' is jumped-to by the 'ret'
413 * instruction returning from _CPU_Context_switch() or
414 * _CPU_Context_restore() thus popping the _entry_point
415 * from the stack.
416 * However, _entry_point expects a frame to look like this:
417 *
418 *      args        [_Thread_Handler expects no args, however]
419 *      ------      (alignment boundary)
420 * SP-> return_addr return here when _entry_point returns which (never happens)
421 *
422 *
423 * Hence we must initialize the stack as follows
424 *
425 *         [arg1          ]:  n/a
426 *         [arg0 (aligned)]:  n/a
427 *         [ret. addr     ]:  NULL
428 * SP->    [jump-target   ]:  _entry_point
429 *
430 * When Context_switch returns it pops the _entry_point from
431 * the stack which then finds a standard layout.
432 */
433
434
435#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
436                                   _isr, _entry_point, _is_fp ) \
437  do { \
438    uint32_t   _stack; \
439    \
440    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
441    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
442    \
443    _stack  = ((uint32_t)(_stack_base)) + (_size); \
444        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
445    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
446    *((proc_ptr *)(_stack)) = (_entry_point); \
447    (_the_context)->ebp     = (void *) 0; \
448    (_the_context)->esp     = (void *) _stack; \
449  } while (0)
450
451#define _CPU_Context_Restart_self( _the_context ) \
452   _CPU_Context_restore( (_the_context) );
453
454#if defined(RTEMS_SMP)
455  #define _CPU_Context_switch_to_first_task_smp( _the_context ) \
456     _CPU_Context_restore( (_the_context) );
457
458  /* address space 1 is uncacheable */
459  #define SMP_CPU_SWAP( _address, _value, _previous ) \
460    do { \
461      asm volatile("lock; xchgl %0, %1" : \
462        "+m" (*_address), "=a" (_previous) : \
463        "1" (_value) : \
464        "cc"); \
465    } while (0)
466
467  static inline void _CPU_Processor_event_broadcast( void )
468  {
469    __asm__ volatile ( "" : : : "memory" );
470  }
471
472  static inline void _CPU_Processor_event_receive( void )
473  {
474    __asm__ volatile ( "" : : : "memory" );
475  }
476#endif
477
478#define _CPU_Context_Fp_start( _base, _offset ) \
479   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
480
481#define _CPU_Context_Initialize_fp( _fp_area ) \
482  { \
483    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
484  }
485
486/* end of Context handler macros */
487
488/*
489 *  Fatal Error manager macros
490 *
491 *  These macros perform the following functions:
492 *    + disable interrupts and halt the CPU
493 */
494
495#define _CPU_Fatal_halt( _error ) \
496  { \
497    __asm__ volatile ( "cli ; \
498                    movl %0,%%eax ; \
499                    hlt" \
500                    : "=r" ((_error)) : "0" ((_error)) \
501    ); \
502  }
503
504#endif /* ASM */
505
506/* end of Fatal Error manager macros */
507
508/*
509 *  Bitfield handler macros
510 *
511 *  These macros perform the following functions:
512 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
513 */
514
515#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
516#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
517
518#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
519  { \
520    register uint16_t   __value_in_register = (_value); \
521    \
522    _output = 0; \
523    \
524    __asm__ volatile ( "bsfw    %0,%1 " \
525                    : "=r" (__value_in_register), "=r" (_output) \
526                    : "0"  (__value_in_register), "1"  (_output) \
527    ); \
528  }
529
530/* end of Bitfield handler macros */
531
532/*
533 *  Priority handler macros
534 *
535 *  These macros perform the following functions:
536 *    + return a mask with the bit for this major/minor portion of
537 *      of thread priority set.
538 *    + translate the bit number returned by "Bitfield_find_first_bit"
539 *      into an index into the thread ready chain bit maps
540 */
541
542#define _CPU_Priority_Mask( _bit_number ) \
543  ( 1 << (_bit_number) )
544
545#define _CPU_Priority_bits_index( _priority ) \
546  (_priority)
547
548/* functions */
549
550#ifndef ASM
551/*
552 *  _CPU_Initialize
553 *
554 *  This routine performs CPU dependent initialization.
555 */
556
557void _CPU_Initialize(void);
558
559/*
560 *  _CPU_ISR_install_raw_handler
561 *
562 *  This routine installs a "raw" interrupt handler directly into the
563 *  processor's vector table.
564 */
565
566void _CPU_ISR_install_raw_handler(
567  uint32_t    vector,
568  proc_ptr    new_handler,
569  proc_ptr   *old_handler
570);
571
572/*
573 *  _CPU_ISR_install_vector
574 *
575 *  This routine installs an interrupt vector.
576 */
577
578void _CPU_ISR_install_vector(
579  uint32_t    vector,
580  proc_ptr    new_handler,
581  proc_ptr   *old_handler
582);
583
584/*
585 *  _CPU_Thread_Idle_body
586 *
587 *  Use the halt instruction of low power mode of a particular i386 model.
588 */
589
590#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
591
592void *_CPU_Thread_Idle_body( uintptr_t ignored );
593
594#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
595
596/*
597 *  _CPU_Context_switch
598 *
599 *  This routine switches from the run context to the heir context.
600 */
601
602void _CPU_Context_switch(
603  Context_Control  *run,
604  Context_Control  *heir
605);
606
607/*
608 *  _CPU_Context_restore
609 *
610 *  This routine is generally used only to restart self in an
611 *  efficient manner and avoid stack conflicts.
612 */
613
614void _CPU_Context_restore(
615  Context_Control *new_context
616) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
617
618/*
619 *  _CPU_Context_save_fp
620 *
621 *  This routine saves the floating point context passed to it.
622 */
623
624#ifdef __SSE__
625#define _CPU_Context_save_fp(fp_context_pp) \
626  do {                                      \
627    __asm__ __volatile__(                   \
628      "fstcw %0"                            \
629      :"=m"((*(fp_context_pp))->fpucw)      \
630    );                                      \
631        __asm__ __volatile__(                   \
632      "stmxcsr %0"                          \
633      :"=m"((*(fp_context_pp))->mxcsr)      \
634    );                                      \
635  } while (0)
636#else
637void _CPU_Context_save_fp(
638  Context_Control_fp **fp_context_ptr
639);
640#endif
641
642/*
643 *  _CPU_Context_restore_fp
644 *
645 *  This routine restores the floating point context passed to it.
646 */
647#ifdef __SSE__
648#define _CPU_Context_restore_fp(fp_context_pp) \
649  do {                                         \
650    __asm__ __volatile__(                      \
651      "fldcw %0"                               \
652      ::"m"((*(fp_context_pp))->fpucw)         \
653      :"fpcr"                                  \
654    );                                         \
655    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
656  } while (0)
657#else
658void _CPU_Context_restore_fp(
659  Context_Control_fp **fp_context_ptr
660);
661#endif
662
663#ifdef __SSE__
664#define _CPU_Context_Initialization_at_thread_begin() \
665  do {                                                \
666    __asm__ __volatile__(                             \
667      "finit"                                         \
668      :                                               \
669      :                                               \
670      :"st","st(1)","st(2)","st(3)",                  \
671       "st(4)","st(5)","st(6)","st(7)",               \
672       "fpsr","fpcr"                                  \
673    );                                                \
674        if ( _Thread_Executing->fp_context ) {            \
675          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
676   }                                                  \
677  } while (0)
678#endif
679
680static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
681{
682  /* TODO */
683}
684
685static inline void _CPU_Context_validate( uintptr_t pattern )
686{
687  while (1) {
688    /* TODO */
689  }
690}
691
692void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
693
694#endif /* ASM */
695
696#ifdef __cplusplus
697}
698#endif
699
700#endif
Note: See TracBrowser for help on using the repository browser.