source: rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h @ e6cea88

4.104.114.84.95
Last change on this file since e6cea88 was e6cea88, checked in by Ralf Corsepius <ralf.corsepius@…>, on 02/15/05 at 14:29:40

(CPU_Get_timebase_low, rtems_bsp_delay, rtems_bsp_delay_in_bus_cycles): New.

  • Property mode set to 100644
File size: 4.4 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 * $Id$
7 */
8 
9#ifndef _RTEMS_SCORE_CPU_H
10#define _RTEMS_SCORE_CPU_H
11
12#include <rtems/score/powerpc.h>              /* pick up machine definitions */
13#ifndef ASM
14#include <rtems/score/types.h>
15#endif
16
17/* conditional compilation parameters */
18
19/*
20 *  Should the calls to _Thread_Enable_dispatch be inlined?
21 *
22 *  If TRUE, then they are inlined.
23 *  If FALSE, then a subroutine call is made.
24 *
25 *  Basically this is an example of the classic trade-off of size
26 *  versus speed.  Inlining the call (TRUE) typically increases the
27 *  size of RTEMS while speeding up the enabling of dispatching.
28 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
29 *  only be 0 or 1 unless you are in an interrupt handler and that
30 *  interrupt handler invokes the executive.]  When not inlined
31 *  something calls _Thread_Enable_dispatch which in turns calls
32 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
33 *  one subroutine call is avoided entirely.]
34 */
35
36#define CPU_INLINE_ENABLE_DISPATCH       FALSE
37
38/*
39 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
40 *  be unrolled one time?  In unrolled each iteration of the loop examines
41 *  two "nodes" on the chain being searched.  Otherwise, only one node
42 *  is examined per iteration.
43 *
44 *  If TRUE, then the loops are unrolled.
45 *  If FALSE, then the loops are not unrolled.
46 *
47 *  The primary factor in making this decision is the cost of disabling
48 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
49 *  body of the loop.  On some CPUs, the flash is more expensive than
50 *  one iteration of the loop body.  In this case, it might be desirable
51 *  to unroll the loop.  It is important to note that on some CPUs, this
52 *  code is the longest interrupt disable period in RTEMS.  So it is
53 *  necessary to strike a balance when setting this parameter.
54 */
55
56#define CPU_UNROLL_ENQUEUE_PRIORITY      FALSE
57
58#ifdef _OLD_EXCEPTIONS
59#include <rtems/old-exceptions/cpu.h>
60#else
61#include <rtems/new-exceptions/cpu.h>
62#endif
63
64#ifndef ASM
65/*  The following routine swaps the endian format of an unsigned int.
66 *  It must be static because it is referenced indirectly.
67 *
68 *  This version will work on any processor, but if there is a better
69 *  way for your CPU PLEASE use it.  The most common way to do this is to:
70 *
71 *     swap least significant two bytes with 16-bit rotate
72 *     swap upper and lower 16-bits
73 *     swap most significant two bytes with 16-bit rotate
74 *
75 *  Some CPUs have special instructions which swap a 32-bit quantity in
76 *  a single instruction (e.g. i486).  It is probably best to avoid
77 *  an "endian swapping control bit" in the CPU.  One good reason is
78 *  that interrupts would probably have to be disabled to insure that
79 *  an interrupt does not try to access the same "chunk" with the wrong
80 *  endian.  Another good reason is that on some CPUs, the endian bit
81 *  endianness for ALL fetches -- both code and data -- so the code
82 *  will be fetched incorrectly.
83 */
84 
85static inline uint32_t CPU_swap_u32(
86  uint32_t value
87)
88{
89  uint32_t   swapped;
90 
91  asm volatile("rlwimi %0,%1,8,24,31;"
92               "rlwimi %0,%1,24,16,23;"
93               "rlwimi %0,%1,8,8,15;"
94               "rlwimi %0,%1,24,0,7;" :
95               "=&r" ((swapped)) : "r" ((value)));
96
97  return( swapped );
98}
99
100#define CPU_swap_u16( value ) \
101  (((value&0xff) << 8) | ((value >> 8)&0xff))
102
103#endif /* ASM */
104
105#ifndef ASM
106/*
107 *  Macros to access PowerPC specific additions to the CPU Table
108 */
109
110#define rtems_cpu_configuration_get_clicks_per_usec() \
111   (_CPU_Table.clicks_per_usec)
112
113#define rtems_cpu_configuration_get_exceptions_in_ram() \
114   (_CPU_Table.exceptions_in_RAM)
115
116#endif /* ASM */
117
118#ifndef /* ASM */
119/*
120 *  Simple spin delay in microsecond units for device drivers.
121 *  This is very dependent on the clock speed of the target.
122 */
123
124#define CPU_Get_timebase_low( _value ) \
125    asm volatile( "mftb  %0" : "=r" (_value) )
126
127#define rtems_bsp_delay( _microseconds ) \
128  do { \
129    uint32_t   start, ticks, now; \
130    CPU_Get_timebase_low( start ) ; \
131    ticks = (_microseconds) * rtems_cpu_configuration_get_clicks_per_usec(); \
132    do \
133      CPU_Get_timebase_low( now ) ; \
134    while (now - start < ticks); \
135  } while (0)
136
137#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
138  do { \
139    uint32_t   start, now; \
140    CPU_Get_timebase_low( start ); \
141    do \
142      CPU_Get_timebase_low( now ); \
143    while (now - start < (_cycles)); \
144  } while (0)
145
146#endif /* ASM */
147
148#endif /* _RTEMS_SCORE_CPU_H */
149
Note: See TracBrowser for help on using the repository browser.