source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ f82752a4

4.115
Last change on this file since f82752a4 was f82752a4, checked in by Daniel Hellstrom <daniel@…>, on Jun 4, 2014 at 9:23:34 AM

Let CPU/BSP Fatal handler have access to source

Without the source the error code does not say that much.
Let it be up to the CPU/BSP to determine the error code
reported on fatal shutdown.

This patch does not change the current behaviour, just
adds the option to handle the source of the fatal halt.

  • Property mode set to 100644
File size: 19.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33#ifndef ASM
34#include <rtems/score/interrupts.h>     /* formerly in libcpu/cpu.h> */
35#include <rtems/score/registers.h>      /* formerly part of libcpu */
36#endif
37
38/* conditional compilation parameters */
39
40#define CPU_INLINE_ENABLE_DISPATCH       TRUE
41#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
42
43/*
44 *  Does the CPU follow the simple vectored interrupt model?
45 *
46 *  If TRUE, then RTEMS allocates the vector table it internally manages.
47 *  If FALSE, then the BSP is assumed to allocate and manage the vector
48 *  table
49 *
50 *  PowerPC Specific Information:
51 *
52 *  The PowerPC and x86 were the first to use the PIC interrupt model.
53 *  They do not use the simple vectored interrupt model.
54 */
55#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
56
57/*
58 *  i386 has an RTEMS allocated and managed interrupt stack.
59 */
60
61#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
62#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
63#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
64
65/*
66 *  Does the RTEMS invoke the user's ISR with the vector number and
67 *  a pointer to the saved interrupt frame (1) or just the vector
68 *  number (0)?
69 */
70
71#define CPU_ISR_PASSES_FRAME_POINTER 0
72
73/*
74 *  Some family members have no FP, some have an FPU such as the i387
75 *  for the i386, others have it built in (i486DX, Pentium).
76 */
77
78#ifdef __SSE__
79#define CPU_HARDWARE_FP                  TRUE
80#define CPU_SOFTWARE_FP                  FALSE
81
82#define CPU_ALL_TASKS_ARE_FP             TRUE
83#define CPU_IDLE_TASK_IS_FP              TRUE
84#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
85#else /* __SSE__ */
86
87#if ( I386_HAS_FPU == 1 )
88#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
89#else
90#define CPU_HARDWARE_FP     FALSE
91#endif
92#define CPU_SOFTWARE_FP     FALSE
93
94#define CPU_ALL_TASKS_ARE_FP             FALSE
95#define CPU_IDLE_TASK_IS_FP              FALSE
96#if defined(RTEMS_SMP)
97  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
98#else
99  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
100#endif
101#endif /* __SSE__ */
102
103#define CPU_STACK_GROWS_UP               FALSE
104#define CPU_STRUCTURE_ALIGNMENT
105
106#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
107
108/*
109 *  Does this port provide a CPU dependent IDLE task implementation?
110 *
111 *  If TRUE, then the routine _CPU_Thread_Idle_body
112 *  must be provided and is the default IDLE thread body instead of
113 *  _CPU_Thread_Idle_body.
114 *
115 *  If FALSE, then use the generic IDLE thread body if the BSP does
116 *  not provide one.
117 */
118
119#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
120
121/*
122 *  Define what is required to specify how the network to host conversion
123 *  routines are handled.
124 */
125
126#define CPU_BIG_ENDIAN                           FALSE
127#define CPU_LITTLE_ENDIAN                        TRUE
128
129#define CPU_PER_CPU_CONTROL_SIZE 0
130
131#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
132#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
133#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
134#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
135#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
136#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
137
138#ifdef RTEMS_SMP
139  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
140#endif
141
142/* structures */
143
144#ifndef ASM
145
146typedef struct {
147  /* There is no CPU specific per-CPU state */
148} CPU_Per_CPU_control;
149
150/*
151 *  Basic integer context for the i386 family.
152 */
153
154typedef struct {
155  uint32_t    eflags;   /* extended flags register                   */
156  void       *esp;      /* extended stack pointer register           */
157  void       *ebp;      /* extended base pointer register            */
158  uint32_t    ebx;      /* extended bx register                      */
159  uint32_t    esi;      /* extended source index register            */
160  uint32_t    edi;      /* extended destination index flags register */
161#ifdef RTEMS_SMP
162  volatile bool is_executing;
163#endif
164}   Context_Control;
165
166#define _CPU_Context_Get_SP( _context ) \
167  (_context)->esp
168
169#ifdef RTEMS_SMP
170  static inline bool _CPU_Context_Get_is_executing(
171    const Context_Control *context
172  )
173  {
174    return context->is_executing;
175  }
176
177  static inline void _CPU_Context_Set_is_executing(
178    Context_Control *context,
179    bool is_executing
180  )
181  {
182    context->is_executing = is_executing;
183  }
184#endif
185
186/*
187 *  FP context save area for the i387 numeric coprocessors.
188 */
189#ifdef __SSE__
190/* All FPU and SSE registers are volatile; hence, as long
191 * as we are within normally executing C code (including
192 * a task switch) there is no need for saving/restoring
193 * any of those registers.
194 * We must save/restore the full FPU/SSE context across
195 * interrupts and exceptions, however:
196 *   -  after ISR execution a _Thread_Dispatch() may happen
197 *      and it is therefore necessary to save the FPU/SSE
198 *      registers to be restored when control is returned
199 *      to the interrupted task.
200 *   -  gcc may implicitly use FPU/SSE instructions in
201 *      an ISR.
202 *
203 * Even though there is no explicit mentioning of the FPU
204 * control word in the SYSV ABI (i386) being non-volatile
205 * we maintain MXCSR and the FPU control-word for each task.
206 */
207typedef struct {
208        uint32_t  mxcsr;
209        uint16_t  fpucw;
210} Context_Control_fp;
211
212#else
213
214typedef struct {
215  uint8_t     fp_save_area[108];    /* context size area for I80387 */
216                                    /*  28 bytes for environment    */
217} Context_Control_fp;
218
219#endif
220
221
222/*
223 *  The following structure defines the set of information saved
224 *  on the current stack by RTEMS upon receipt of execptions.
225 *
226 * idtIndex is either the interrupt number or the trap/exception number.
227 * faultCode is the code pushed by the processor on some exceptions.
228 *
229 * Since the first registers are directly pushed by the CPU they
230 * may not respect 16-byte stack alignment, which is, however,
231 * mandatory for the SSE register area.
232 * Therefore, these registers are stored at an aligned address
233 * and a pointer is stored in the CPU_Exception_frame.
234 * If the executive was compiled without SSE support then
235 * this pointer is NULL.
236 */
237
238struct Context_Control_sse;
239
240typedef struct {
241  struct Context_Control_sse *fp_ctxt;
242  uint32_t    edi;
243  uint32_t    esi;
244  uint32_t    ebp;
245  uint32_t    esp0;
246  uint32_t    ebx;
247  uint32_t    edx;
248  uint32_t    ecx;
249  uint32_t    eax;
250  uint32_t    idtIndex;
251  uint32_t    faultCode;
252  uint32_t    eip;
253  uint32_t    cs;
254  uint32_t    eflags;
255} CPU_Exception_frame;
256
257#ifdef __SSE__
258typedef struct Context_Control_sse {
259  uint16_t  fcw;
260  uint16_t  fsw;
261  uint8_t   ftw;
262  uint8_t   res_1;
263  uint16_t  fop;
264  uint32_t  fpu_ip;
265  uint16_t  cs;
266  uint16_t  res_2;
267  uint32_t  fpu_dp;
268  uint16_t  ds;
269  uint16_t  res_3;
270  uint32_t  mxcsr;
271  uint32_t  mxcsr_mask;
272  struct {
273        uint8_t fpreg[10];
274        uint8_t res_4[ 6];
275  } fp_mmregs[8];
276  uint8_t   xmmregs[8][16];
277  uint8_t   res_5[224];
278} Context_Control_sse
279__attribute__((aligned(16)))
280;
281#endif
282
283typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
284extern cpuExcHandlerType _currentExcHandler;
285extern void rtems_exception_init_mngt(void);
286
287/*
288 * This port does not pass any frame info to the
289 * interrupt handler.
290 */
291
292typedef void CPU_Interrupt_frame;
293
294typedef enum {
295  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
296  I386_EXCEPTION_DEBUG               = 1,
297  I386_EXCEPTION_NMI                 = 2,
298  I386_EXCEPTION_BREAKPOINT          = 3,
299  I386_EXCEPTION_OVERFLOW            = 4,
300  I386_EXCEPTION_BOUND               = 5,
301  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
302  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
303  I386_EXCEPTION_DOUBLE_FAULT        = 8,
304  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
305  I386_EXCEPTION_INVALID_TSS         = 10,
306  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
307  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
308  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
309  I386_EXCEPTION_PAGE_FAULT          = 14,
310  I386_EXCEPTION_INTEL_RES15         = 15,
311  I386_EXCEPTION_FLOAT_ERROR         = 16,
312  I386_EXCEPTION_ALIGN_CHECK         = 17,
313  I386_EXCEPTION_MACHINE_CHECK       = 18,
314  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
315
316} Intel_symbolic_exception_name;
317
318
319/*
320 *  context size area for floating point
321 *
322 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
323 */
324
325#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
326
327/* variables */
328
329SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
330
331#endif /* ASM */
332
333/* constants */
334
335/*
336 *  This defines the number of levels and the mask used to pick those
337 *  bits out of a thread mode.
338 */
339
340#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
341#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
342
343/*
344 *  extra stack required by the MPCI receive server thread
345 */
346
347#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
348
349/*
350 *  This is defined if the port has a special way to report the ISR nesting
351 *  level.  Most ports maintain the variable _ISR_Nest_level.
352 */
353
354#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
355
356/*
357 *  Minimum size of a thread's stack.
358 */
359
360#define CPU_STACK_MINIMUM_SIZE          4096
361
362#define CPU_SIZEOF_POINTER 4
363
364/*
365 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
366 */
367
368#define CPU_ALIGNMENT                    4
369#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
370#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
371
372/*
373 *  On i386 thread stacks require no further alignment after allocation
374 *  from the Workspace. However, since gcc maintains 16-byte alignment
375 *  we try to respect that. If you find an option to let gcc squeeze
376 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
377 *  doesn't waste much space since this only determines the *initial*
378 *  alignment.
379 */
380
381#define CPU_STACK_ALIGNMENT             16
382
383/* macros */
384
385#ifndef ASM
386/*
387 *  ISR handler macros
388 *
389 *  These macros perform the following functions:
390 *     + initialize the RTEMS vector table
391 *     + disable all maskable CPU interrupts
392 *     + restore previous interrupt level (enable)
393 *     + temporarily restore interrupts (flash)
394 *     + set a particular level
395 */
396
397#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
398
399#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
400
401#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
402
403#define _CPU_ISR_Set_level( _new_level ) \
404  { \
405    if ( _new_level ) __asm__ volatile ( "cli" ); \
406    else              __asm__ volatile ( "sti" ); \
407  }
408
409uint32_t   _CPU_ISR_Get_level( void );
410
411/*  Make sure interrupt stack has space for ISR
412 *  'vector' arg at the top and that it is aligned
413 *  properly.
414 */
415
416#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
417        do {                                        \
418                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
419        } while (0)
420
421#endif /* ASM */
422
423/* end of ISR handler macros */
424
425/*
426 *  Context handler macros
427 *
428 *  These macros perform the following functions:
429 *     + initialize a context area
430 *     + restart the current thread
431 *     + calculate the initial pointer into a FP context area
432 *     + initialize an FP context area
433 */
434
435#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
436#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
437
438#ifndef ASM
439
440/*
441 * Stack alignment note:
442 *
443 * We want the stack to look to the '_entry_point' routine
444 * like an ordinary stack frame as if '_entry_point' was
445 * called from C-code.
446 * Note that '_entry_point' is jumped-to by the 'ret'
447 * instruction returning from _CPU_Context_switch() or
448 * _CPU_Context_restore() thus popping the _entry_point
449 * from the stack.
450 * However, _entry_point expects a frame to look like this:
451 *
452 *      args        [_Thread_Handler expects no args, however]
453 *      ------      (alignment boundary)
454 * SP-> return_addr return here when _entry_point returns which (never happens)
455 *
456 *
457 * Hence we must initialize the stack as follows
458 *
459 *         [arg1          ]:  n/a
460 *         [arg0 (aligned)]:  n/a
461 *         [ret. addr     ]:  NULL
462 * SP->    [jump-target   ]:  _entry_point
463 *
464 * When Context_switch returns it pops the _entry_point from
465 * the stack which then finds a standard layout.
466 */
467
468
469
470#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
471                                   _isr, _entry_point, _is_fp, _tls_area ) \
472  do { \
473    uint32_t   _stack; \
474    \
475    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
476    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
477    \
478    _stack  = ((uint32_t)(_stack_base)) + (_size); \
479        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
480    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
481    *((proc_ptr *)(_stack)) = (_entry_point); \
482    (_the_context)->ebp     = (void *) 0; \
483    (_the_context)->esp     = (void *) _stack; \
484  } while (0)
485
486#define _CPU_Context_Restart_self( _the_context ) \
487   _CPU_Context_restore( (_the_context) );
488
489#if defined(RTEMS_SMP)
490  uint32_t _CPU_SMP_Initialize( void );
491
492  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
493
494  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
495
496  uint32_t _CPU_SMP_Get_current_processor( void );
497
498  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
499
500  static inline void _CPU_SMP_Processor_event_broadcast( void )
501  {
502    __asm__ volatile ( "" : : : "memory" );
503  }
504
505  static inline void _CPU_SMP_Processor_event_receive( void )
506  {
507    __asm__ volatile ( "" : : : "memory" );
508  }
509#endif
510
511#define _CPU_Context_Fp_start( _base, _offset ) \
512   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
513
514#define _CPU_Context_Initialize_fp( _fp_area ) \
515  { \
516    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
517  }
518
519/* end of Context handler macros */
520
521/*
522 *  Fatal Error manager macros
523 *
524 *  These macros perform the following functions:
525 *    + disable interrupts and halt the CPU
526 */
527
528#define _CPU_Fatal_halt( _source, _error ) \
529  { \
530    uint32_t _error_lvalue = ( _error ); \
531    __asm__ volatile ( "cli ; \
532                    movl %0,%%eax ; \
533                    hlt" \
534                    : "=r" ((_error_lvalue)) : "0" ((_error_lvalue)) \
535    ); \
536  }
537
538#endif /* ASM */
539
540/* end of Fatal Error manager macros */
541
542/*
543 *  Bitfield handler macros
544 *
545 *  These macros perform the following functions:
546 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
547 */
548
549#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
550#define CPU_USE_GENERIC_BITFIELD_DATA FALSE
551
552#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
553  { \
554    register uint16_t   __value_in_register = (_value); \
555    \
556    _output = 0; \
557    \
558    __asm__ volatile ( "bsfw    %0,%1 " \
559                    : "=r" (__value_in_register), "=r" (_output) \
560                    : "0"  (__value_in_register), "1"  (_output) \
561    ); \
562  }
563
564/* end of Bitfield handler macros */
565
566/*
567 *  Priority handler macros
568 *
569 *  These macros perform the following functions:
570 *    + return a mask with the bit for this major/minor portion of
571 *      of thread priority set.
572 *    + translate the bit number returned by "Bitfield_find_first_bit"
573 *      into an index into the thread ready chain bit maps
574 */
575
576#define _CPU_Priority_Mask( _bit_number ) \
577  ( 1 << (_bit_number) )
578
579#define _CPU_Priority_bits_index( _priority ) \
580  (_priority)
581
582/* functions */
583
584#ifndef ASM
585/*
586 *  _CPU_Initialize
587 *
588 *  This routine performs CPU dependent initialization.
589 */
590
591void _CPU_Initialize(void);
592
593/*
594 *  _CPU_ISR_install_raw_handler
595 *
596 *  This routine installs a "raw" interrupt handler directly into the
597 *  processor's vector table.
598 */
599
600void _CPU_ISR_install_raw_handler(
601  uint32_t    vector,
602  proc_ptr    new_handler,
603  proc_ptr   *old_handler
604);
605
606/*
607 *  _CPU_ISR_install_vector
608 *
609 *  This routine installs an interrupt vector.
610 */
611
612void _CPU_ISR_install_vector(
613  uint32_t    vector,
614  proc_ptr    new_handler,
615  proc_ptr   *old_handler
616);
617
618/*
619 *  _CPU_Thread_Idle_body
620 *
621 *  Use the halt instruction of low power mode of a particular i386 model.
622 */
623
624#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
625
626void *_CPU_Thread_Idle_body( uintptr_t ignored );
627
628#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
629
630/*
631 *  _CPU_Context_switch
632 *
633 *  This routine switches from the run context to the heir context.
634 */
635
636void _CPU_Context_switch(
637  Context_Control  *run,
638  Context_Control  *heir
639);
640
641/*
642 *  _CPU_Context_restore
643 *
644 *  This routine is generally used only to restart self in an
645 *  efficient manner and avoid stack conflicts.
646 */
647
648void _CPU_Context_restore(
649  Context_Control *new_context
650) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
651
652/*
653 *  _CPU_Context_save_fp
654 *
655 *  This routine saves the floating point context passed to it.
656 */
657
658#ifdef __SSE__
659#define _CPU_Context_save_fp(fp_context_pp) \
660  do {                                      \
661    __asm__ __volatile__(                   \
662      "fstcw %0"                            \
663      :"=m"((*(fp_context_pp))->fpucw)      \
664    );                                      \
665        __asm__ __volatile__(                   \
666      "stmxcsr %0"                          \
667      :"=m"((*(fp_context_pp))->mxcsr)      \
668    );                                      \
669  } while (0)
670#else
671void _CPU_Context_save_fp(
672  Context_Control_fp **fp_context_ptr
673);
674#endif
675
676/*
677 *  _CPU_Context_restore_fp
678 *
679 *  This routine restores the floating point context passed to it.
680 */
681#ifdef __SSE__
682#define _CPU_Context_restore_fp(fp_context_pp) \
683  do {                                         \
684    __asm__ __volatile__(                      \
685      "fldcw %0"                               \
686      ::"m"((*(fp_context_pp))->fpucw)         \
687      :"fpcr"                                  \
688    );                                         \
689    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
690  } while (0)
691#else
692void _CPU_Context_restore_fp(
693  Context_Control_fp **fp_context_ptr
694);
695#endif
696
697#ifdef __SSE__
698#define _CPU_Context_Initialization_at_thread_begin() \
699  do {                                                \
700    __asm__ __volatile__(                             \
701      "finit"                                         \
702      :                                               \
703      :                                               \
704      :"st","st(1)","st(2)","st(3)",                  \
705       "st(4)","st(5)","st(6)","st(7)",               \
706       "fpsr","fpcr"                                  \
707    );                                                \
708        if ( _Thread_Executing->fp_context ) {            \
709          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
710   }                                                  \
711  } while (0)
712#endif
713
714static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
715{
716  /* TODO */
717}
718
719static inline void _CPU_Context_validate( uintptr_t pattern )
720{
721  while (1) {
722    /* TODO */
723  }
724}
725
726void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
727
728typedef uint32_t CPU_Counter_ticks;
729
730CPU_Counter_ticks _CPU_Counter_read( void );
731
732static inline CPU_Counter_ticks _CPU_Counter_difference(
733  CPU_Counter_ticks second,
734  CPU_Counter_ticks first
735)
736{
737  return second - first;
738}
739
740#endif /* ASM */
741
742#ifdef __cplusplus
743}
744#endif
745
746#endif
Note: See TracBrowser for help on using the repository browser.