1 | /* |
---|
2 | * Copyright 2011-2015 Samy Al Bahra. |
---|
3 | * All rights reserved. |
---|
4 | * |
---|
5 | * Redistribution and use in source and binary forms, with or without |
---|
6 | * modification, are permitted provided that the following conditions |
---|
7 | * are met: |
---|
8 | * 1. Redistributions of source code must retain the above copyright |
---|
9 | * notice, this list of conditions and the following disclaimer. |
---|
10 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
11 | * notice, this list of conditions and the following disclaimer in the |
---|
12 | * documentation and/or other materials provided with the distribution. |
---|
13 | * |
---|
14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
24 | * SUCH DAMAGE. |
---|
25 | */ |
---|
26 | |
---|
27 | #ifndef CK_EPOCH_H |
---|
28 | #define CK_EPOCH_H |
---|
29 | |
---|
30 | /* |
---|
31 | * The implementation here is inspired from the work described in: |
---|
32 | * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University |
---|
33 | * of Cambridge Computing Laboratory. |
---|
34 | */ |
---|
35 | |
---|
36 | #include <ck_cc.h> |
---|
37 | #include <ck_md.h> |
---|
38 | #include <ck_pr.h> |
---|
39 | #include <ck_stack.h> |
---|
40 | #include <ck_stdbool.h> |
---|
41 | |
---|
42 | #ifndef CK_EPOCH_LENGTH |
---|
43 | #define CK_EPOCH_LENGTH 4 |
---|
44 | #endif |
---|
45 | |
---|
46 | /* |
---|
47 | * This is used for sense detection with-respect to concurrent |
---|
48 | * epoch sections. |
---|
49 | */ |
---|
50 | #define CK_EPOCH_SENSE (2) |
---|
51 | |
---|
52 | struct ck_epoch_entry; |
---|
53 | typedef struct ck_epoch_entry ck_epoch_entry_t; |
---|
54 | typedef void ck_epoch_cb_t(ck_epoch_entry_t *); |
---|
55 | |
---|
56 | /* |
---|
57 | * This should be embedded into objects you wish to be the target of |
---|
58 | * ck_epoch_cb_t functions (with ck_epoch_call). |
---|
59 | */ |
---|
60 | struct ck_epoch_entry { |
---|
61 | ck_epoch_cb_t *function; |
---|
62 | ck_stack_entry_t stack_entry; |
---|
63 | }; |
---|
64 | |
---|
65 | /* |
---|
66 | * A section object may be passed to every begin-end pair to allow for |
---|
67 | * forward progress guarantees with-in prolonged active sections. |
---|
68 | */ |
---|
69 | struct ck_epoch_section { |
---|
70 | unsigned int bucket; |
---|
71 | }; |
---|
72 | typedef struct ck_epoch_section ck_epoch_section_t; |
---|
73 | |
---|
74 | /* |
---|
75 | * Return pointer to ck_epoch_entry container object. |
---|
76 | */ |
---|
77 | #define CK_EPOCH_CONTAINER(T, M, N) \ |
---|
78 | CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N) |
---|
79 | |
---|
80 | struct ck_epoch_ref { |
---|
81 | unsigned int epoch; |
---|
82 | unsigned int count; |
---|
83 | }; |
---|
84 | |
---|
85 | struct ck_epoch_record { |
---|
86 | ck_stack_entry_t record_next; |
---|
87 | struct ck_epoch *global; |
---|
88 | unsigned int state; |
---|
89 | unsigned int epoch; |
---|
90 | unsigned int active; |
---|
91 | struct { |
---|
92 | struct ck_epoch_ref bucket[CK_EPOCH_SENSE]; |
---|
93 | } local CK_CC_CACHELINE; |
---|
94 | unsigned int n_pending; |
---|
95 | unsigned int n_peak; |
---|
96 | unsigned int n_dispatch; |
---|
97 | void *ct; |
---|
98 | ck_stack_t pending[CK_EPOCH_LENGTH]; |
---|
99 | } CK_CC_CACHELINE; |
---|
100 | typedef struct ck_epoch_record ck_epoch_record_t; |
---|
101 | |
---|
102 | struct ck_epoch { |
---|
103 | unsigned int epoch; |
---|
104 | unsigned int n_free; |
---|
105 | ck_stack_t records; |
---|
106 | }; |
---|
107 | typedef struct ck_epoch ck_epoch_t; |
---|
108 | |
---|
109 | /* |
---|
110 | * Internal functions. |
---|
111 | */ |
---|
112 | void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *); |
---|
113 | bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *); |
---|
114 | |
---|
115 | CK_CC_FORCE_INLINE static void * |
---|
116 | ck_epoch_record_ct(const ck_epoch_record_t *record) |
---|
117 | { |
---|
118 | |
---|
119 | return ck_pr_load_ptr(&record->ct); |
---|
120 | } |
---|
121 | |
---|
122 | /* |
---|
123 | * Marks the beginning of an epoch-protected section. |
---|
124 | */ |
---|
125 | CK_CC_FORCE_INLINE static void |
---|
126 | ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section) |
---|
127 | { |
---|
128 | struct ck_epoch *epoch = record->global; |
---|
129 | |
---|
130 | /* |
---|
131 | * Only observe new epoch if thread is not recursing into a read |
---|
132 | * section. |
---|
133 | */ |
---|
134 | if (record->active == 0) { |
---|
135 | unsigned int g_epoch; |
---|
136 | |
---|
137 | /* |
---|
138 | * It is possible for loads to be re-ordered before the store |
---|
139 | * is committed into the caller's epoch and active fields. |
---|
140 | * For this reason, store to load serialization is necessary. |
---|
141 | */ |
---|
142 | #if defined(CK_MD_TSO) |
---|
143 | ck_pr_fas_uint(&record->active, 1); |
---|
144 | ck_pr_fence_atomic_load(); |
---|
145 | #else |
---|
146 | ck_pr_store_uint(&record->active, 1); |
---|
147 | ck_pr_fence_memory(); |
---|
148 | #endif |
---|
149 | |
---|
150 | /* |
---|
151 | * This load is allowed to be re-ordered prior to setting |
---|
152 | * active flag due to monotonic nature of the global epoch. |
---|
153 | * However, stale values lead to measurable performance |
---|
154 | * degradation in some torture tests so we disallow early load |
---|
155 | * of global epoch. |
---|
156 | */ |
---|
157 | g_epoch = ck_pr_load_uint(&epoch->epoch); |
---|
158 | ck_pr_store_uint(&record->epoch, g_epoch); |
---|
159 | } else { |
---|
160 | ck_pr_store_uint(&record->active, record->active + 1); |
---|
161 | } |
---|
162 | |
---|
163 | if (section != NULL) |
---|
164 | _ck_epoch_addref(record, section); |
---|
165 | |
---|
166 | return; |
---|
167 | } |
---|
168 | |
---|
169 | /* |
---|
170 | * Marks the end of an epoch-protected section. Returns true if no more |
---|
171 | * sections exist for the caller. |
---|
172 | */ |
---|
173 | CK_CC_FORCE_INLINE static bool |
---|
174 | ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section) |
---|
175 | { |
---|
176 | |
---|
177 | ck_pr_fence_release(); |
---|
178 | ck_pr_store_uint(&record->active, record->active - 1); |
---|
179 | |
---|
180 | if (section != NULL) |
---|
181 | return _ck_epoch_delref(record, section); |
---|
182 | |
---|
183 | return record->active == 0; |
---|
184 | } |
---|
185 | |
---|
186 | /* |
---|
187 | * Defers the execution of the function pointed to by the "cb" |
---|
188 | * argument until an epoch counter loop. This allows for a |
---|
189 | * non-blocking deferral. |
---|
190 | * |
---|
191 | * We can get away without a fence here due to the monotonic nature |
---|
192 | * of the epoch counter. Worst case, this will result in some delays |
---|
193 | * before object destruction. |
---|
194 | */ |
---|
195 | CK_CC_FORCE_INLINE static void |
---|
196 | ck_epoch_call(ck_epoch_record_t *record, |
---|
197 | ck_epoch_entry_t *entry, |
---|
198 | ck_epoch_cb_t *function) |
---|
199 | { |
---|
200 | struct ck_epoch *epoch = record->global; |
---|
201 | unsigned int e = ck_pr_load_uint(&epoch->epoch); |
---|
202 | unsigned int offset = e & (CK_EPOCH_LENGTH - 1); |
---|
203 | |
---|
204 | record->n_pending++; |
---|
205 | entry->function = function; |
---|
206 | ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry); |
---|
207 | return; |
---|
208 | } |
---|
209 | |
---|
210 | /* |
---|
211 | * Same as ck_epoch_call, but allows for records to be shared and is reentrant. |
---|
212 | */ |
---|
213 | CK_CC_FORCE_INLINE static void |
---|
214 | ck_epoch_call_strict(ck_epoch_record_t *record, |
---|
215 | ck_epoch_entry_t *entry, |
---|
216 | ck_epoch_cb_t *function) |
---|
217 | { |
---|
218 | struct ck_epoch *epoch = record->global; |
---|
219 | unsigned int e = ck_pr_load_uint(&epoch->epoch); |
---|
220 | unsigned int offset = e & (CK_EPOCH_LENGTH - 1); |
---|
221 | |
---|
222 | ck_pr_inc_uint(&record->n_pending); |
---|
223 | entry->function = function; |
---|
224 | |
---|
225 | /* Store fence is implied by push operation. */ |
---|
226 | ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry); |
---|
227 | return; |
---|
228 | } |
---|
229 | |
---|
230 | /* |
---|
231 | * This callback is used for synchronize_wait to allow for custom blocking |
---|
232 | * behavior. |
---|
233 | */ |
---|
234 | typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *, |
---|
235 | void *); |
---|
236 | |
---|
237 | /* |
---|
238 | * Return latest epoch value. This operation provides load ordering. |
---|
239 | */ |
---|
240 | CK_CC_FORCE_INLINE static unsigned int |
---|
241 | ck_epoch_value(const ck_epoch_t *ep) |
---|
242 | { |
---|
243 | |
---|
244 | ck_pr_fence_load(); |
---|
245 | return ck_pr_load_uint(&ep->epoch); |
---|
246 | } |
---|
247 | |
---|
248 | void ck_epoch_init(ck_epoch_t *); |
---|
249 | |
---|
250 | /* |
---|
251 | * Attempts to recycle an unused epoch record. If one is successfully |
---|
252 | * allocated, the record context pointer is also updated. |
---|
253 | */ |
---|
254 | ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *); |
---|
255 | |
---|
256 | /* |
---|
257 | * Registers an epoch record. An optional context pointer may be passed that |
---|
258 | * is retrievable with ck_epoch_record_ct. |
---|
259 | */ |
---|
260 | void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *); |
---|
261 | |
---|
262 | /* |
---|
263 | * Marks a record as available for re-use by a subsequent recycle operation. |
---|
264 | * Note that the record cannot be physically destroyed. |
---|
265 | */ |
---|
266 | void ck_epoch_unregister(ck_epoch_record_t *); |
---|
267 | |
---|
268 | bool ck_epoch_poll(ck_epoch_record_t *); |
---|
269 | bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred); |
---|
270 | void ck_epoch_synchronize(ck_epoch_record_t *); |
---|
271 | void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *); |
---|
272 | void ck_epoch_barrier(ck_epoch_record_t *); |
---|
273 | void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *); |
---|
274 | |
---|
275 | /* |
---|
276 | * Reclaim entries associated with a record. This is safe to call only on |
---|
277 | * the caller's record or records that are using call_strict. |
---|
278 | */ |
---|
279 | void ck_epoch_reclaim(ck_epoch_record_t *); |
---|
280 | |
---|
281 | #endif /* CK_EPOCH_H */ |
---|