source: rtems/cpukit/score/cpu/i386/rtems/score/cpuatomic.h @ 45c6d53

4.115
Last change on this file since 45c6d53 was 45c6d53, checked in by WeiY <wei.a.yang@…>, on 01/25/13 at 15:56:40

score: atomic support for RTEMS. Atomic operations for i386.

  • Property mode set to 100644
File size: 16.6 KB
Line 
1/**
2 * @file  rtems/score/cpuatomic.h
3 *
4 * This include file implements the atomic operations for i386 and defines
5 * atomic data types which are used by the atomic operations API file. This
6 * file should use fixed name cpuatomic.h and should be included in atomic
7 * operations API file atomic.h. Most of the parts of implementations are
8 * imported from FreeBSD kernel.
9 */
10
11/*
12 * Copyright (c) 1998 Doug Rabson
13 * All rights reserved.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $FreeBSD$
37 */
38
39#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
40#define _RTEMS_SCORE_ATOMIC_CPU_H
41
42#include <rtems/score/genericcpuatomic.h>
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48/**
49 * @defgroup RTEMS atomic implementation
50 *
51 */
52
53/**@{*/
54
55#if defined(RTEMS_SMP)
56#define MPLOCKED        "lock ; "
57#else
58#define MPLOCKED
59#endif
60
61#if !defined(RTEMS_SMP)
62/*
63 * We assume that a = b will do atomic loads and stores.  However, on a
64 * PentiumPro or higher, reads may pass writes, so for that case we have
65 * to use a serializing instruction (i.e. with LOCK) to do the load in
66 * SMP kernels.  For UP kernels, however, the cache of the single processor
67 * is always consistent, so we only need to take care of compiler.
68 */
69#define ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
70static inline Atomic_##TYPE                           \
71_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)      \
72{                                                       \
73  Atomic_##TYPE tmp;                                    \
74                                                        \
75  tmp = *p;                                             \
76  __asm __volatile("" : : : "memory");                  \
77  return (tmp);                                         \
78}                                                       \
79                                                        \
80static inline _CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)  \
81{                                                       \
82  Atomic_##TYPE tmp;                                    \
83                                                        \
84  tmp = *p;                                             \
85  __asm __volatile("" : : : "memory");                  \
86  return (tmp);                                         \
87}                                                       \
88                                                        \
89static inline void                                    \
90_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
91{                                                                    \
92  __asm __volatile("" : : : "memory");                               \
93  *p = v;                                                            \
94}                                                                    \
95                                                        \
96static inline void                                    \
97_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
98{                                                                        \
99  __asm __volatile("" : : : "memory");                                   \
100  *p = v;                                                                \
101}                                                                        \
102
103#else /* !(!SMP) */
104
105#define ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
106static inline Atomic_##TYPE                           \
107_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)      \
108{                                                       \
109  Atomic_##TYPE res;                                    \
110                                                        \
111  __asm __volatile(MPLOCKED LOP                         \
112  : "=a" (res),                 /* 0 */                 \
113  "=m" (*p)                     /* 1 */                 \
114  : "m" (*p)                    /* 2 */                 \
115  : "memory", "cc");                                    \
116                                                        \
117  return (res);                                         \
118}                                                       \
119                                                        \
120static inline Atomic_##TYPE                           \
121_CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)  \
122{                                                       \
123  Atomic_##TYPE res;                                    \
124                                                        \
125  __asm __volatile(MPLOCKED LOP                         \
126  : "=a" (res),                 /* 0 */                 \
127  "=m" (*p)                     /* 1 */                 \
128  : "m" (*p)                    /* 2 */                 \
129  : "memory", "cc");                                    \
130                                                        \
131  return (res);                                         \
132}                                                       \
133                                                        \
134/*                                                      \
135 * The XCHG instruction asserts LOCK automagically.     \
136 */                                                     \
137static inline void                                    \
138_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
139{                                                                    \
140  __asm __volatile(SOP                                               \
141  : "=m" (*p),                  /* 0 */                              \
142  "+r" (v)                      /* 1 */                              \
143  : "m" (*p)                    /* 2 */                              \
144  : "memory");                                                       \
145}                                                                    \
146static inline void                                                   \
147_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
148{                                                                        \
149  __asm __volatile(SOP                                                   \
150  : "=m" (*p),                  /* 0 */                                  \
151  "+r" (v)                      /* 1 */                                  \
152  : "m" (*p)                    /* 2 */                                  \
153  : "memory");                                                           \
154}                                                                        \
155
156#endif /* !SMP */
157
158/*
159 * The assembly is volatilized to avoid code chunk removal by the compiler.
160 * GCC aggressively reorders operations and memory clobbering is necessary
161 * in order to avoid that for memory barriers.
162 */
163#define ATOMIC_FETCH_GENERIC(NAME, TYPENAME, TYPE, OP, CONS, V)                         \
164static inline void                                                                      \
165_CPU_Atomic_Fetch_##NAME##_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
166{                                                                             \
167  __asm __volatile(MPLOCKED OP                                                \
168  : "=m" (*p)                                                                 \
169  : CONS (V), "m" (*p)                                                        \
170  : "cc");                                                                    \
171}                                                                             \
172                                                                              \
173static inline void                                                            \
174_CPU_Atomic_Fetch_##NAME##_barr_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v)\
175{                                                                             \
176  __asm __volatile(MPLOCKED OP                                                \
177  : "=m" (*p)                                                                 \
178  : CONS (V), "m" (*p)                                                        \
179  : "memory", "cc");                                                          \
180}                                                                             \
181
182/*
183 * Atomic compare and set, used by the mutex functions
184 *
185 * if (*dst == expect) *dst = src (all 32 bit words)
186 *
187 * Returns 0 on failure, non-zero on success
188 */
189static inline int
190_CPU_Atomic_Compare_exchange_int(volatile Atomic_Int *dst, Atomic_Int expect, Atomic_Int src)
191{
192  unsigned char res;
193
194  __asm __volatile(
195  "    " MPLOCKED "    "
196  "    cmpxchgl %2,%1 ;    "
197  "    sete     %0 ;       "
198  "1:                      "
199  "# atomic_cmpset_int"
200  : "=a" (res),              /* 0 */
201    "=m" (*dst)              /* 1 */
202  : "r" (src),               /* 2 */
203    "a" (expect),            /* 3 */
204    "m" (*dst)               /* 4 */
205  : "memory", "cc");
206
207  return (res);
208}
209
210static inline int
211_CPU_Atomic_Compare_exchange_long(volatile Atomic_Long *dst, Atomic_Long expect, Atomic_Long src)
212{
213
214  return (_CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)dst, (Atomic_Int)expect,
215         (Atomic_Int)src));
216}
217
218ATOMIC_STORE_LOAD(int, Int,     "cmpxchgl %0,%1",  "xchgl %1,%0");
219ATOMIC_STORE_LOAD(long, Long,   "cmpxchgl %0,%1",  "xchgl %1,%0");
220
221ATOMIC_FETCH_GENERIC(add, int, Int, "addl %1,%0", "ir", v);
222ATOMIC_FETCH_GENERIC(sub, int, Int, "subl %1,%0", "ir", v);
223ATOMIC_FETCH_GENERIC(or,  int, Int, "orl %1,%0",  "ir", v);
224ATOMIC_FETCH_GENERIC(and, int, Int, "andl %1,%0", "ir", v);
225
226ATOMIC_FETCH_GENERIC(add, long, Long, "addl %1,%0", "ir", v);
227ATOMIC_FETCH_GENERIC(sub, long, Long, "subl %1,%0", "ir", v);
228ATOMIC_FETCH_GENERIC(or,  long, Long, "orl %1,%0",  "ir", v);
229ATOMIC_FETCH_GENERIC(and, long, Long, "andl %1,%0", "ir", v);
230
231#define _CPU_Atomic_Fetch_or_acq_int            _CPU_Atomic_Fetch_or_barr_int
232#define _CPU_Atomic_Fetch_or_rel_int            _CPU_Atomic_Fetch_or_barr_int
233#define _CPU_Atomic_Fetch_and_acq_int           _CPU_Atomic_Fetch_and_barr_int
234#define _CPU_Atomic_Fetch_and_rel_int           _CPU_Atomic_Fetch_and_barr_int
235#define _CPU_Atomic_Fetch_add_acq_int           _CPU_Atomic_Fetch_add_barr_int
236#define _CPU_Atomic_Fetch_add_rel_int           _CPU_Atomic_Fetch_add_barr_int
237#define _CPU_Atomic_Fetch_sub_acq_int           _CPU_Atomic_Fetch_sub_barr_int
238#define _CPU_Atomic_Fetch_sub_rel_int           _CPU_Atomic_Fetch_sub_barr_int
239#define _CPU_Atomic_Compare_exchange_acq_int  _CPU_Atomic_Compare_exchange_int
240#define _CPU_Atomic_Compare_exchange_rel_int  _CPU_Atomic_Compare_exchange_int
241
242#define _CPU_Atomic_Fetch_or_acq_long           _CPU_Atomic_Fetch_or_barr_long
243#define _CPU_Atomic_Fetch_or_rel_long           _CPU_Atomic_Fetch_or_barr_long
244#define _CPU_Atomic_Fetch_and_acq_long          _CPU_Atomic_Fetch_and_barr_long
245#define _CPU_Atomic_Fetch_and_rel_long          _CPU_Atomic_Fetch_and_barr_long
246#define _CPU_Atomic_Fetch_add_acq_long          _CPU_Atomic_Fetch_add_barr_long
247#define _CPU_Atomic_Fetch_add_rel_long          _CPU_Atomic_Fetch_add_barr_long
248#define _CPU_Atomic_Fetch_sub_acq_long          _CPU_Atomic_Fetch_sub_barr_long
249#define _CPU_Atomic_Fetch_sub_rel_long          _CPU_Atomic_Fetch_sub_barr_long
250#define _CPU_Atomic_Compare_exchange_acq_long _CPU_Atomic_Compare_exchange_long
251#define _CPU_Atomic_Compare_exchange_rel_long _CPU_Atomic_Compare_exchange_long
252
253/* Operations on 32-bit double words. */
254#define _CPU_Atomic_Fetch_or_32(p, v)  \
255    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
256#define _CPU_Atomic_Fetch_or_acq_32(p, v)  \
257    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
258#define _CPU_Atomic_Fetch_or_rel_32(p, v)  \
259    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
260#define _CPU_Atomic_Fetch_and_32(p, v)  \
261    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
262#define _CPU_Atomic_Fetch_and_acq_32(p, v)  \
263    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
264#define _CPU_Atomic_Fetch_and_rel_32(p, v)  \
265    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
266#define _CPU_Atomic_Fetch_add_32(p, v)  \
267    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
268#define _CPU_Atomic_Fetch_add_acq_32(p, v)  \
269    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
270#define _CPU_Atomic_Fetch_add_rel_32(p, v)  \
271    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
272#define _CPU_Atomic_Fetch_sub_32(p, v)  \
273    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
274#define _CPU_Atomic_Fetch_sub_acq_32(p, v)  \
275    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
276#define _CPU_Atomic_Fetch_sub_rel_32(p, v)  \
277    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
278#define _CPU_Atomic_Load_32(p)  \
279    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
280#define _CPU_Atomic_Load_acq_32(p)  \
281    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
282#define _CPU_Atomic_Store_32(p, v)  \
283    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
284#define _CPU_Atomic_Store_rel_32(p, v)  \
285    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
286#define _CPU_Atomic_Compare_exchange_32(dst, old, new)  \
287    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
288#define _CPU_Atomic_Compare_exchange_acq_32(dst, old, new)  \
289    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
290#define _CPU_Atomic_Compare_exchange_rel_32(dst, old, new)  \
291    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
292
293/* Operations on pointers. */
294#define _CPU_Atomic_Fetch_or_ptr(p, v) \
295    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
296#define _CPU_Atomic_Fetch_or_acq_ptr(p, v) \
297    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
298#define _CPU_Atomic_Fetch_or_rel_ptr(p, v) \
299    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
300#define _CPU_Atomic_Fetch_and_ptr(p, v) \
301    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
302#define _CPU_Atomic_Fetch_and_acq_ptr(p, v) \
303    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
304#define _CPU_Atomic_Fetch_and_rel_ptr(p, v) \
305    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
306#define _CPU_Atomic_Fetch_add_ptr(p, v) \
307    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
308#define _CPU_Atomic_Fetch_add_acq_ptr(p, v) \
309    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
310#define _CPU_Atomic_Fetch_add_rel_ptr(p, v) \
311    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
312#define _CPU_Atomic_Fetch_sub_ptr(p, v) \
313    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
314#define _CPU_Atomic_Fetch_sub_acq_ptr(p, v) \
315    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
316#define _CPU_Atomic_Fetch_sub_rel_ptr(p, v) \
317    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
318#define _CPU_Atomic_Load_ptr(p) \
319    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
320#define _CPU_Atomic_Load_acq_ptr(p) \
321    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
322#define _CPU_Atomic_Store_ptr(p, v) \
323    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (v))
324#define _CPU_Atomic_Store_rel_ptr(p, v) \
325    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (v))
326#define _CPU_Atomic_Compare_exchange_ptr(dst, old, new) \
327    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
328#define _CPU_Atomic_Compare_exchange_acq_ptr(dst, old, new) \
329    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
330            (Atomic_Int)(new))
331#define _CPU_Atomic_Compare_exchange_rel_ptr(dst, old, new) \
332    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
333            (Atomic_Int)(new))
334
335#ifdef __cplusplus
336}
337#endif
338
339/**@}*/
340#endif
341/*  end of include file */
Note: See TracBrowser for help on using the repository browser.