source: rtems/cpukit/score/cpu/i386/rtems/score/cpu.h @ 4aa23c96

5
Last change on this file since 4aa23c96 was 4aa23c96, checked in by Sebastian Huber <sebastian.huber@…>, on 01/23/17 at 07:12:18

Remove CPU_BIG_ENDIAN and CPU_LITTLE_ENDIAN

Use de-facto standard BYTE_ORDER instead.

Close #2803.

  • Property mode set to 100644
File size: 19.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Intel I386 CPU Dependent Source
5 *
6 * This include file contains information pertaining to the Intel
7 * i386 processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2011.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifndef ASM
23#include <string.h> /* for memcpy */
24#endif
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <rtems/score/types.h>
31#include <rtems/score/i386.h>
32
33/* conditional compilation parameters */
34
35/*
36 *  Does the CPU follow the simple vectored interrupt model?
37 *
38 *  If TRUE, then RTEMS allocates the vector table it internally manages.
39 *  If FALSE, then the BSP is assumed to allocate and manage the vector
40 *  table
41 *
42 *  PowerPC Specific Information:
43 *
44 *  The PowerPC and x86 were the first to use the PIC interrupt model.
45 *  They do not use the simple vectored interrupt model.
46 */
47#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
48
49/*
50 *  i386 has an RTEMS allocated and managed interrupt stack.
51 */
52
53#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
54#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
55#define CPU_ALLOCATE_INTERRUPT_STACK     TRUE
56
57/*
58 *  Does the RTEMS invoke the user's ISR with the vector number and
59 *  a pointer to the saved interrupt frame (1) or just the vector
60 *  number (0)?
61 */
62
63#define CPU_ISR_PASSES_FRAME_POINTER FALSE
64
65/*
66 *  Some family members have no FP, some have an FPU such as the i387
67 *  for the i386, others have it built in (i486DX, Pentium).
68 */
69
70#ifdef __SSE__
71#define CPU_HARDWARE_FP                  TRUE
72#define CPU_SOFTWARE_FP                  FALSE
73
74#define CPU_ALL_TASKS_ARE_FP             TRUE
75#define CPU_IDLE_TASK_IS_FP              TRUE
76#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
77#else /* __SSE__ */
78
79#if ( I386_HAS_FPU == 1 )
80#define CPU_HARDWARE_FP     TRUE    /* i387 for i386 */
81#else
82#define CPU_HARDWARE_FP     FALSE
83#endif
84#define CPU_SOFTWARE_FP     FALSE
85
86#define CPU_ALL_TASKS_ARE_FP             FALSE
87#define CPU_IDLE_TASK_IS_FP              FALSE
88#if defined(RTEMS_SMP)
89  #define CPU_USE_DEFERRED_FP_SWITCH     FALSE
90#else
91  #define CPU_USE_DEFERRED_FP_SWITCH     TRUE
92#endif
93#endif /* __SSE__ */
94
95#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
96
97#define CPU_STACK_GROWS_UP               FALSE
98
99/* FIXME: The Pentium 4 used 128 bytes, it this processor still relevant? */
100#define CPU_CACHE_LINE_BYTES 64
101
102#define CPU_STRUCTURE_ALIGNMENT
103
104/*
105 *  Does this port provide a CPU dependent IDLE task implementation?
106 *
107 *  If TRUE, then the routine _CPU_Thread_Idle_body
108 *  must be provided and is the default IDLE thread body instead of
109 *  _CPU_Thread_Idle_body.
110 *
111 *  If FALSE, then use the generic IDLE thread body if the BSP does
112 *  not provide one.
113 */
114
115#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
116
117#define CPU_MAXIMUM_PROCESSORS 32
118
119#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
120#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
121#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
122#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
123#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
124#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
125
126#ifdef RTEMS_SMP
127  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
128#endif
129
130/* structures */
131
132#ifndef ASM
133
134/*
135 *  Basic integer context for the i386 family.
136 */
137
138typedef struct {
139  uint32_t    eflags;   /* extended flags register                   */
140  void       *esp;      /* extended stack pointer register           */
141  void       *ebp;      /* extended base pointer register            */
142  uint32_t    ebx;      /* extended bx register                      */
143  uint32_t    esi;      /* extended source index register            */
144  uint32_t    edi;      /* extended destination index flags register */
145#ifdef RTEMS_SMP
146  volatile bool is_executing;
147#endif
148}   Context_Control;
149
150#define _CPU_Context_Get_SP( _context ) \
151  (_context)->esp
152
153#ifdef RTEMS_SMP
154  static inline bool _CPU_Context_Get_is_executing(
155    const Context_Control *context
156  )
157  {
158    return context->is_executing;
159  }
160
161  static inline void _CPU_Context_Set_is_executing(
162    Context_Control *context,
163    bool is_executing
164  )
165  {
166    context->is_executing = is_executing;
167  }
168#endif
169
170/*
171 *  FP context save area for the i387 numeric coprocessors.
172 */
173#ifdef __SSE__
174/* All FPU and SSE registers are volatile; hence, as long
175 * as we are within normally executing C code (including
176 * a task switch) there is no need for saving/restoring
177 * any of those registers.
178 * We must save/restore the full FPU/SSE context across
179 * interrupts and exceptions, however:
180 *   -  after ISR execution a _Thread_Dispatch() may happen
181 *      and it is therefore necessary to save the FPU/SSE
182 *      registers to be restored when control is returned
183 *      to the interrupted task.
184 *   -  gcc may implicitly use FPU/SSE instructions in
185 *      an ISR.
186 *
187 * Even though there is no explicit mentioning of the FPU
188 * control word in the SYSV ABI (i386) being non-volatile
189 * we maintain MXCSR and the FPU control-word for each task.
190 */
191typedef struct {
192        uint32_t  mxcsr;
193        uint16_t  fpucw;
194} Context_Control_fp;
195
196#else
197
198typedef struct {
199  uint8_t     fp_save_area[108];    /* context size area for I80387 */
200                                    /*  28 bytes for environment    */
201} Context_Control_fp;
202
203#endif
204
205
206/*
207 *  The following structure defines the set of information saved
208 *  on the current stack by RTEMS upon receipt of execptions.
209 *
210 * idtIndex is either the interrupt number or the trap/exception number.
211 * faultCode is the code pushed by the processor on some exceptions.
212 *
213 * Since the first registers are directly pushed by the CPU they
214 * may not respect 16-byte stack alignment, which is, however,
215 * mandatory for the SSE register area.
216 * Therefore, these registers are stored at an aligned address
217 * and a pointer is stored in the CPU_Exception_frame.
218 * If the executive was compiled without SSE support then
219 * this pointer is NULL.
220 */
221
222struct Context_Control_sse;
223
224typedef struct {
225  struct Context_Control_sse *fp_ctxt;
226  uint32_t    edi;
227  uint32_t    esi;
228  uint32_t    ebp;
229  uint32_t    esp0;
230  uint32_t    ebx;
231  uint32_t    edx;
232  uint32_t    ecx;
233  uint32_t    eax;
234  uint32_t    idtIndex;
235  uint32_t    faultCode;
236  uint32_t    eip;
237  uint32_t    cs;
238  uint32_t    eflags;
239} CPU_Exception_frame;
240
241#ifdef __SSE__
242typedef struct Context_Control_sse {
243  uint16_t  fcw;
244  uint16_t  fsw;
245  uint8_t   ftw;
246  uint8_t   res_1;
247  uint16_t  fop;
248  uint32_t  fpu_ip;
249  uint16_t  cs;
250  uint16_t  res_2;
251  uint32_t  fpu_dp;
252  uint16_t  ds;
253  uint16_t  res_3;
254  uint32_t  mxcsr;
255  uint32_t  mxcsr_mask;
256  struct {
257        uint8_t fpreg[10];
258        uint8_t res_4[ 6];
259  } fp_mmregs[8];
260  uint8_t   xmmregs[8][16];
261  uint8_t   res_5[224];
262} Context_Control_sse
263__attribute__((aligned(16)))
264;
265#endif
266
267typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
268extern cpuExcHandlerType _currentExcHandler;
269extern void rtems_exception_init_mngt(void);
270
271/*
272 * This port does not pass any frame info to the
273 * interrupt handler.
274 */
275
276typedef void CPU_Interrupt_frame;
277
278typedef enum {
279  I386_EXCEPTION_DIVIDE_BY_ZERO      = 0,
280  I386_EXCEPTION_DEBUG               = 1,
281  I386_EXCEPTION_NMI                 = 2,
282  I386_EXCEPTION_BREAKPOINT          = 3,
283  I386_EXCEPTION_OVERFLOW            = 4,
284  I386_EXCEPTION_BOUND               = 5,
285  I386_EXCEPTION_ILLEGAL_INSTR       = 6,
286  I386_EXCEPTION_MATH_COPROC_UNAVAIL = 7,
287  I386_EXCEPTION_DOUBLE_FAULT        = 8,
288  I386_EXCEPTION_I386_COPROC_SEG_ERR = 9,
289  I386_EXCEPTION_INVALID_TSS         = 10,
290  I386_EXCEPTION_SEGMENT_NOT_PRESENT = 11,
291  I386_EXCEPTION_STACK_SEGMENT_FAULT = 12,
292  I386_EXCEPTION_GENERAL_PROT_ERR    = 13,
293  I386_EXCEPTION_PAGE_FAULT          = 14,
294  I386_EXCEPTION_INTEL_RES15         = 15,
295  I386_EXCEPTION_FLOAT_ERROR         = 16,
296  I386_EXCEPTION_ALIGN_CHECK         = 17,
297  I386_EXCEPTION_MACHINE_CHECK       = 18,
298  I386_EXCEPTION_ENTER_RDBG          = 50     /* to enter manually RDBG */
299
300} Intel_symbolic_exception_name;
301
302
303/*
304 *  context size area for floating point
305 *
306 *  NOTE:  This is out of place on the i386 to avoid a forward reference.
307 */
308
309#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
310
311/* variables */
312
313extern Context_Control_fp _CPU_Null_fp_context;
314
315#endif /* ASM */
316
317/* constants */
318
319/*
320 *  This defines the number of levels and the mask used to pick those
321 *  bits out of a thread mode.
322 */
323
324#define CPU_MODES_INTERRUPT_LEVEL  0x00000001 /* interrupt level in mode */
325#define CPU_MODES_INTERRUPT_MASK   0x00000001 /* interrupt level in mode */
326
327/*
328 *  extra stack required by the MPCI receive server thread
329 */
330
331#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
332
333/*
334 *  This is defined if the port has a special way to report the ISR nesting
335 *  level.  Most ports maintain the variable _ISR_Nest_level.
336 */
337
338#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
339
340/*
341 *  Minimum size of a thread's stack.
342 */
343
344#define CPU_STACK_MINIMUM_SIZE          4096
345
346#define CPU_SIZEOF_POINTER 4
347
348/*
349 *  i386 is pretty tolerant of alignment.  Just put things on 4 byte boundaries.
350 */
351
352#define CPU_ALIGNMENT                    4
353#define CPU_HEAP_ALIGNMENT               CPU_ALIGNMENT
354#define CPU_PARTITION_ALIGNMENT          CPU_ALIGNMENT
355
356/*
357 *  On i386 thread stacks require no further alignment after allocation
358 *  from the Workspace. However, since gcc maintains 16-byte alignment
359 *  we try to respect that. If you find an option to let gcc squeeze
360 *  the stack more tightly then setting CPU_STACK_ALIGNMENT to 16 still
361 *  doesn't waste much space since this only determines the *initial*
362 *  alignment.
363 */
364
365#define CPU_STACK_ALIGNMENT             16
366
367/* macros */
368
369#ifndef ASM
370/*
371 *  ISR handler macros
372 *
373 *  These macros perform the following functions:
374 *     + initialize the RTEMS vector table
375 *     + disable all maskable CPU interrupts
376 *     + restore previous interrupt level (enable)
377 *     + temporarily restore interrupts (flash)
378 *     + set a particular level
379 */
380
381#if !defined(RTEMS_PARAVIRT)
382#define _CPU_ISR_Disable( _level ) i386_disable_interrupts( _level )
383
384#define _CPU_ISR_Enable( _level )  i386_enable_interrupts( _level )
385
386#define _CPU_ISR_Flash( _level )   i386_flash_interrupts( _level )
387
388#define _CPU_ISR_Set_level( _new_level ) \
389  { \
390    if ( _new_level ) __asm__ volatile ( "cli" ); \
391    else              __asm__ volatile ( "sti" ); \
392  }
393#else
394#define _CPU_ISR_Disable( _level ) _level = i386_disable_interrupts()
395#define _CPU_ISR_Enable( _level ) i386_enable_interrupts( _level )
396#define _CPU_ISR_Flash( _level ) i386_flash_interrupts( _level )
397#define _CPU_ISR_Set_level( _new_level ) i386_set_interrupt_level(_new_level)
398#endif
399
400RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
401{
402  return ( level & EFLAGS_INTR_ENABLE ) != 0;
403}
404
405uint32_t   _CPU_ISR_Get_level( void );
406
407/*  Make sure interrupt stack has space for ISR
408 *  'vector' arg at the top and that it is aligned
409 *  properly.
410 */
411
412#define _CPU_Interrupt_stack_setup( _lo, _hi )  \
413        do {                                        \
414                _hi = (void*)(((uintptr_t)(_hi) - 4) & ~ (CPU_STACK_ALIGNMENT - 1)); \
415        } while (0)
416
417#endif /* ASM */
418
419/* end of ISR handler macros */
420
421/*
422 *  Context handler macros
423 *
424 *  These macros perform the following functions:
425 *     + initialize a context area
426 *     + restart the current thread
427 *     + calculate the initial pointer into a FP context area
428 *     + initialize an FP context area
429 */
430
431#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
432#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002
433
434#ifndef ASM
435
436/*
437 * Stack alignment note:
438 *
439 * We want the stack to look to the '_entry_point' routine
440 * like an ordinary stack frame as if '_entry_point' was
441 * called from C-code.
442 * Note that '_entry_point' is jumped-to by the 'ret'
443 * instruction returning from _CPU_Context_switch() or
444 * _CPU_Context_restore() thus popping the _entry_point
445 * from the stack.
446 * However, _entry_point expects a frame to look like this:
447 *
448 *      args        [_Thread_Handler expects no args, however]
449 *      ------      (alignment boundary)
450 * SP-> return_addr return here when _entry_point returns which (never happens)
451 *
452 *
453 * Hence we must initialize the stack as follows
454 *
455 *         [arg1          ]:  n/a
456 *         [arg0 (aligned)]:  n/a
457 *         [ret. addr     ]:  NULL
458 * SP->    [jump-target   ]:  _entry_point
459 *
460 * When Context_switch returns it pops the _entry_point from
461 * the stack which then finds a standard layout.
462 */
463
464
465
466#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
467                                   _isr, _entry_point, _is_fp, _tls_area ) \
468  do { \
469    uint32_t   _stack; \
470    \
471    (void) _is_fp; /* avoid warning for being unused */ \
472    if ( (_isr) ) (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_OFF; \
473    else          (_the_context)->eflags = CPU_EFLAGS_INTERRUPTS_ON; \
474    \
475    _stack  = ((uint32_t)(_stack_base)) + (_size); \
476        _stack &= ~ (CPU_STACK_ALIGNMENT - 1); \
477    _stack -= 2*sizeof(proc_ptr*); /* see above for why we need to do this */ \
478    *((proc_ptr *)(_stack)) = (_entry_point); \
479    (_the_context)->ebp     = (void *) 0; \
480    (_the_context)->esp     = (void *) _stack; \
481  } while (0)
482
483#define _CPU_Context_Restart_self( _the_context ) \
484   _CPU_Context_restore( (_the_context) );
485
486#if defined(RTEMS_SMP)
487  uint32_t _CPU_SMP_Initialize( void );
488
489  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
490
491  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
492
493  /* Nothing to do */
494  #define _CPU_SMP_Prepare_start_multitasking() do { } while ( 0 )
495
496  uint32_t _CPU_SMP_Get_current_processor( void );
497
498  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
499
500  static inline void _CPU_SMP_Processor_event_broadcast( void )
501  {
502    __asm__ volatile ( "" : : : "memory" );
503  }
504
505  static inline void _CPU_SMP_Processor_event_receive( void )
506  {
507    __asm__ volatile ( "" : : : "memory" );
508  }
509#endif
510
511#define _CPU_Context_Fp_start( _base, _offset ) \
512   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
513
514#define _CPU_Context_Initialize_fp( _fp_area ) \
515  { \
516    memcpy( *_fp_area, &_CPU_Null_fp_context, CPU_CONTEXT_FP_SIZE ); \
517  }
518
519/* end of Context handler macros */
520
521/*
522 *  Fatal Error manager macros
523 *
524 *  These macros perform the following functions:
525 *    + disable interrupts and halt the CPU
526 */
527
528extern void _CPU_Fatal_halt(uint32_t source, uint32_t error)
529  RTEMS_NO_RETURN;
530
531#endif /* ASM */
532
533/* end of Fatal Error manager macros */
534
535/*
536 *  Bitfield handler macros
537 *
538 *  These macros perform the following functions:
539 *     + scan for the highest numbered (MSB) set in a 16 bit bitfield
540 */
541
542#define CPU_USE_GENERIC_BITFIELD_CODE FALSE
543
544#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
545  { \
546    register uint16_t __value_in_register = ( _value ); \
547    uint16_t          __output = 0; \
548    __asm__ volatile ( "bsfw    %0,%1 " \
549                    : "=r" ( __value_in_register ), "=r" ( __output ) \
550                    : "0"  ( __value_in_register ), "1"  ( __output ) \
551    ); \
552    ( _output ) = __output; \
553  }
554
555/* end of Bitfield handler macros */
556
557/*
558 *  Priority handler macros
559 *
560 *  These macros perform the following functions:
561 *    + return a mask with the bit for this major/minor portion of
562 *      of thread priority set.
563 *    + translate the bit number returned by "Bitfield_find_first_bit"
564 *      into an index into the thread ready chain bit maps
565 */
566
567#define _CPU_Priority_Mask( _bit_number ) \
568  ( 1 << (_bit_number) )
569
570#define _CPU_Priority_bits_index( _priority ) \
571  (_priority)
572
573/* functions */
574
575#ifndef ASM
576/*
577 *  _CPU_Initialize
578 *
579 *  This routine performs CPU dependent initialization.
580 */
581
582void _CPU_Initialize(void);
583
584/*
585 *  _CPU_ISR_install_raw_handler
586 *
587 *  This routine installs a "raw" interrupt handler directly into the
588 *  processor's vector table.
589 */
590
591void _CPU_ISR_install_raw_handler(
592  uint32_t    vector,
593  proc_ptr    new_handler,
594  proc_ptr   *old_handler
595);
596
597/*
598 *  _CPU_ISR_install_vector
599 *
600 *  This routine installs an interrupt vector.
601 */
602
603void _CPU_ISR_install_vector(
604  uint32_t    vector,
605  proc_ptr    new_handler,
606  proc_ptr   *old_handler
607);
608
609/*
610 *  _CPU_Thread_Idle_body
611 *
612 *  Use the halt instruction of low power mode of a particular i386 model.
613 */
614
615#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
616
617void *_CPU_Thread_Idle_body( uintptr_t ignored );
618
619#endif /* CPU_PROVIDES_IDLE_THREAD_BODY */
620
621/*
622 *  _CPU_Context_switch
623 *
624 *  This routine switches from the run context to the heir context.
625 */
626
627void _CPU_Context_switch(
628  Context_Control  *run,
629  Context_Control  *heir
630);
631
632/*
633 *  _CPU_Context_restore
634 *
635 *  This routine is generally used only to restart self in an
636 *  efficient manner and avoid stack conflicts.
637 */
638
639void _CPU_Context_restore(
640  Context_Control *new_context
641) RTEMS_NO_RETURN;
642
643/*
644 *  _CPU_Context_save_fp
645 *
646 *  This routine saves the floating point context passed to it.
647 */
648
649#ifdef __SSE__
650#define _CPU_Context_save_fp(fp_context_pp) \
651  do {                                      \
652    __asm__ __volatile__(                   \
653      "fstcw %0"                            \
654      :"=m"((*(fp_context_pp))->fpucw)      \
655    );                                      \
656        __asm__ __volatile__(                   \
657      "stmxcsr %0"                          \
658      :"=m"((*(fp_context_pp))->mxcsr)      \
659    );                                      \
660  } while (0)
661#else
662void _CPU_Context_save_fp(
663  Context_Control_fp **fp_context_ptr
664);
665#endif
666
667/*
668 *  _CPU_Context_restore_fp
669 *
670 *  This routine restores the floating point context passed to it.
671 */
672#ifdef __SSE__
673#define _CPU_Context_restore_fp(fp_context_pp) \
674  do {                                         \
675    __asm__ __volatile__(                      \
676      "fldcw %0"                               \
677      ::"m"((*(fp_context_pp))->fpucw)         \
678      :"fpcr"                                  \
679    );                                         \
680    __builtin_ia32_ldmxcsr(_Thread_Executing->fp_context->mxcsr);  \
681  } while (0)
682#else
683void _CPU_Context_restore_fp(
684  Context_Control_fp **fp_context_ptr
685);
686#endif
687
688#ifdef __SSE__
689#define _CPU_Context_Initialization_at_thread_begin() \
690  do {                                                \
691    __asm__ __volatile__(                             \
692      "finit"                                         \
693      :                                               \
694      :                                               \
695      :"st","st(1)","st(2)","st(3)",                  \
696       "st(4)","st(5)","st(6)","st(7)",               \
697       "fpsr","fpcr"                                  \
698    );                                                \
699        if ( _Thread_Executing->fp_context ) {            \
700          _CPU_Context_restore_fp(&_Thread_Executing->fp_context); \
701   }                                                  \
702  } while (0)
703#endif
704
705static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
706{
707  /* TODO */
708}
709
710static inline void _CPU_Context_validate( uintptr_t pattern )
711{
712  while (1) {
713    /* TODO */
714  }
715}
716
717void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
718
719typedef uint32_t CPU_Counter_ticks;
720
721CPU_Counter_ticks _CPU_Counter_read( void );
722
723static inline CPU_Counter_ticks _CPU_Counter_difference(
724  CPU_Counter_ticks second,
725  CPU_Counter_ticks first
726)
727{
728  return second - first;
729}
730
731#endif /* ASM */
732
733#ifdef __cplusplus
734}
735#endif
736
737#endif
Note: See TracBrowser for help on using the repository browser.