source: rtems-libbsd/rtemsbsd/rtems/rtems-kernel-epoch.c @ 3489e3b

55-freebsd-126-freebsd-12
Last change on this file since 3489e3b was 3489e3b, checked in by Sebastian Huber <sebastian.huber@…>, on 08/22/18 at 12:59:50

Update to FreeBSD head 2018-09-17

Git mirror commit 6c2192b1ef8c50788c751f878552526800b1e319.

Update #3472.

  • Property mode set to 100644
File size: 8.1 KB
Line 
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 * Copyright (c) 2018, embedded brains GmbH
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#include <machine/rtems-bsd-kernel-space.h>
32
33#include <sys/types.h>
34#include <sys/kernel.h>
35#include <sys/epoch.h>
36
37#include <machine/cpu.h>
38
39#include <rtems.h>
40#include <rtems/irq-extension.h>
41#include <rtems/score/smpimpl.h>
42#include <rtems/score/threadimpl.h>
43#include <rtems/score/watchdogimpl.h>
44
45EPOCH_DEFINE(_bsd_global_epoch_preempt, EPOCH_PREEMPT);
46
47EPOCH_DEFINE(_bsd_global_epoch, 0);
48
49struct epoch_pcpu {
50        int cb_count;
51        Watchdog_Control wdg;
52        rtems_interrupt_server_request irq_srv_req;
53};
54
55static PER_CPU_DATA_ITEM(struct epoch_pcpu, epoch);
56
57static SLIST_HEAD(, epoch) epoch_list = SLIST_HEAD_INITIALIZER(epoch_list);
58
59CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
60    ck_epoch_entry_container)
61
62void
63_bsd_epoch_init(epoch_t epoch, uintptr_t pcpu_record_offset, int flags)
64{
65        uint32_t cpu_count;
66        uint32_t cpu_index;
67
68        ck_epoch_init(&epoch->e_epoch);
69        epoch->e_flags = flags;
70        epoch->e_pcpu_record_offset = pcpu_record_offset;
71
72        cpu_count = rtems_get_processor_count();
73
74        for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
75                Per_CPU_Control *cpu;
76                struct epoch_record *er;
77
78                cpu = _Per_CPU_Get_by_index(cpu_index);
79                er = EPOCH_GET_RECORD(cpu, epoch);
80                bzero(er, sizeof(*er));
81                ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
82                TAILQ_INIT(__DEVOLATILE(struct epoch_tdlist *,
83                    &er->er_tdlist));
84                er->er_cpuid = cpu_index;
85        }
86
87        SLIST_INSERT_HEAD(&epoch_list, epoch, e_link);
88}
89
90static void
91epoch_watchdog(Watchdog_Control *wdg)
92{
93        struct epoch_pcpu *epcpu;
94
95        epcpu = __containerof(wdg, struct epoch_pcpu, wdg);
96        _Watchdog_Per_CPU_insert_ticks(&epcpu->wdg,
97            _Watchdog_Get_CPU(&epcpu->wdg), 1);
98
99        if (RTEMS_PREDICT_FALSE(epcpu->cb_count != 0)) {
100                rtems_interrupt_server_request_submit(&epcpu->irq_srv_req);
101        }
102}
103
104static void
105epoch_call_handler(void *arg)
106{
107        struct epoch_pcpu *epcpu;
108        struct epoch *epoch;
109        ck_stack_entry_t *cursor, *head, *next;
110        ck_stack_t cb_stack;
111
112        epcpu = arg;
113        ck_stack_init(&cb_stack);
114
115        SLIST_FOREACH(epoch, &epoch_list, e_link) {
116                Per_CPU_Control *cpu_self;
117                struct epoch_record *er;
118                int npending;
119
120                cpu_self = _Thread_Dispatch_disable();
121                er = EPOCH_GET_RECORD(cpu_self, epoch);
122                npending = er->er_record.n_pending;
123
124                if (npending != 0) {
125                        ck_epoch_poll_deferred(&er->er_record, &cb_stack);
126                        epcpu->cb_count -= npending - er->er_record.n_pending;
127                }
128
129                _Thread_Dispatch_enable(cpu_self);
130        }
131
132        head = ck_stack_batch_pop_npsc(&cb_stack);
133        for (cursor = head; cursor != NULL; cursor = next) {
134                struct ck_epoch_entry *entry;
135
136                entry = ck_epoch_entry_container(cursor);
137
138                next = CK_STACK_NEXT(cursor);
139                (*entry->function)(entry);
140        }
141}
142
143static void
144epoch_sysinit(void)
145{
146        uint32_t cpu_count;
147        uint32_t cpu_index;
148
149        cpu_count = rtems_get_processor_count();
150
151        for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
152                Per_CPU_Control *cpu;
153                struct epoch_pcpu *epcpu;
154
155                cpu = _Per_CPU_Get_by_index(cpu_index);
156                epcpu = PER_CPU_DATA_GET(cpu, struct epoch_pcpu, epoch);
157
158                _Watchdog_Preinitialize(&epcpu->wdg, cpu);
159                _Watchdog_Initialize(&epcpu->wdg, epoch_watchdog);
160                _Watchdog_Per_CPU_insert_ticks(&epcpu->wdg, cpu, 1);
161
162                rtems_interrupt_server_request_initialize(cpu_index,
163                    &epcpu->irq_srv_req, epoch_call_handler, epcpu);
164        }
165}
166SYSINIT(epoch, SI_SUB_TUNABLES, SI_ORDER_SECOND, epoch_sysinit, NULL);
167
168void
169epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
170{
171        Per_CPU_Control *cpu_self;
172        ISR_lock_Context lock_context;
173        Thread_Control *executing;
174        struct epoch_record *er;
175
176        SLIST_INIT(&et->et_mtx);
177
178        _ISR_lock_ISR_disable(&lock_context);
179        cpu_self = _Thread_Dispatch_disable_critical(&lock_context);
180        executing = _Per_CPU_Get_executing(cpu_self);
181        er = EPOCH_GET_RECORD(cpu_self, epoch);
182        TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
183        et->et_td = executing;
184        _ISR_lock_ISR_enable(&lock_context);
185
186        ck_epoch_begin(&er->er_record, &et->et_section);
187
188        _Thread_Pin(executing);
189        _Thread_Dispatch_enable(cpu_self);
190}
191
192void
193epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
194{
195        Per_CPU_Control *cpu_self;
196        ISR_lock_Context lock_context;
197        Thread_Control *executing;
198        struct epoch_record *er;
199        struct epoch_tracker_mutex *etm;
200
201        _ISR_lock_ISR_disable(&lock_context);
202        cpu_self = _Thread_Dispatch_disable_critical(&lock_context);
203        executing = _Per_CPU_Get_executing(cpu_self);
204        er = EPOCH_GET_RECORD(cpu_self, epoch);
205        TAILQ_REMOVE(&er->er_tdlist, et, et_link);
206        _ISR_lock_ISR_enable(&lock_context);
207
208        ck_epoch_end(&er->er_record, &et->et_section);
209
210        _Thread_Unpin(executing, cpu_self);
211        _Thread_Dispatch_enable(cpu_self);
212
213        SLIST_FOREACH(etm, &et->et_mtx, etm_link) {
214                rtems_mutex_unlock(&etm->etm_mtx);
215        }
216}
217
218static void
219epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
220    void *arg __unused)
221{
222        cpu_spinwait();
223}
224
225void
226epoch_wait(epoch_t epoch)
227{
228        Per_CPU_Control *cpu_self;
229
230        cpu_self = _Thread_Dispatch_disable();
231        ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
232        _Thread_Dispatch_enable(cpu_self);
233}
234
235static void
236epoch_register_mutex(void *arg)
237{
238        struct epoch_tracker_mutex *etm;
239        struct epoch_record *er;
240        struct epoch_tracker *et;
241
242        etm = arg;
243        er = etm->etm_record;
244        et = TAILQ_FIRST(&er->er_tdlist);
245
246        if (et != NULL) {
247                etm->etm_mtx._Queue._owner = et->et_td;
248                _Thread_Resource_count_increment( et->et_td );
249                SLIST_INSERT_HEAD(&et->et_mtx, etm, etm_link);
250        }
251}
252
253static void
254epoch_block_handler_preempt(struct ck_epoch *g __unused,
255    ck_epoch_record_t *cr, void *arg __unused)
256{
257        struct epoch_record *er;
258        Per_CPU_Control *cpu_self;
259        uint32_t cpu_self_index;
260        struct epoch_tracker_mutex etm;
261
262        er = __containerof(cr, struct epoch_record, er_record);
263        cpu_self = _Per_CPU_Get();
264        cpu_self_index = _Per_CPU_Get_index(cpu_self);
265
266        rtems_mutex_init(&etm.etm_mtx, "epoch");
267        etm.etm_record = er;
268
269#ifdef RTEMS_SMP
270        if (cpu_self_index != er->er_cpuid) {
271                cpu_set_t set;
272
273                CPU_ZERO(&set);
274                CPU_SET((int)er->er_cpuid, &set);
275                _SMP_Multicast_action(sizeof(set), &set, epoch_register_mutex,
276                    &etm);
277        } else {
278                epoch_register_mutex(&etm);
279        }
280#else
281        epoch_register_mutex(&etm);
282#endif
283
284        _Thread_Dispatch_enable(cpu_self);
285
286        rtems_mutex_lock(&etm.etm_mtx);
287        rtems_mutex_unlock(&etm.etm_mtx);
288        rtems_mutex_destroy(&etm.etm_mtx);
289
290        _Thread_Dispatch_disable();
291}
292
293void
294epoch_wait_preempt(epoch_t epoch)
295{
296
297        _Thread_Dispatch_disable();
298        ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
299            NULL);
300        _Thread_Dispatch_enable(_Per_CPU_Get());
301}
302
303void
304epoch_call(epoch_t epoch, epoch_context_t ctx,
305    void (*callback) (epoch_context_t))
306{
307        Per_CPU_Control *cpu_self;
308        struct epoch_record *er;
309        struct epoch_pcpu *epcpu;
310
311        cpu_self = _Thread_Dispatch_disable();
312        epcpu = PER_CPU_DATA_GET(cpu_self, struct epoch_pcpu, epoch);
313        epcpu->cb_count += 1;
314        er = EPOCH_GET_RECORD(cpu_self, epoch);
315        ck_epoch_call(&er->er_record, ctx, callback);
316        _Thread_Dispatch_enable(cpu_self);
317}
Note: See TracBrowser for help on using the repository browser.