1 | /* |
---|
2 | * SPDX-License-Identifier: BSD-2-Clause |
---|
3 | * |
---|
4 | * Copyright (C) 2019 embedded brains GmbH |
---|
5 | * |
---|
6 | * Redistribution and use in source and binary forms, with or without |
---|
7 | * modification, are permitted provided that the following conditions |
---|
8 | * are met: |
---|
9 | * 1. Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * |
---|
15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
16 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
18 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
---|
19 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
20 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
21 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
22 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
24 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
25 | * POSSIBILITY OF SUCH DAMAGE. |
---|
26 | */ |
---|
27 | |
---|
28 | #ifdef HAVE_CONFIG_H |
---|
29 | #include "config.h" |
---|
30 | #endif |
---|
31 | |
---|
32 | #include <rtems/score/smpimpl.h> |
---|
33 | #include <rtems/score/assert.h> |
---|
34 | #include <rtems/score/threaddispatch.h> |
---|
35 | |
---|
36 | typedef struct Per_CPU_Job Per_CPU_Job; |
---|
37 | |
---|
38 | typedef struct Per_CPU_Jobs Per_CPU_Jobs; |
---|
39 | |
---|
40 | /* |
---|
41 | * Value for the Per_CPU_Job::done member to indicate that a job is done |
---|
42 | * (handler was called on the target processor). Must not be a valid pointer |
---|
43 | * value since it overlaps with the Per_CPU_Job::next member. |
---|
44 | */ |
---|
45 | #define PER_CPU_JOB_DONE 1 |
---|
46 | |
---|
47 | /** |
---|
48 | * @brief A per-processor job. |
---|
49 | */ |
---|
50 | struct Per_CPU_Job { |
---|
51 | union { |
---|
52 | /** |
---|
53 | * @brief The next job in the corresponding per-processor job list. |
---|
54 | */ |
---|
55 | Per_CPU_Job *next; |
---|
56 | |
---|
57 | /** |
---|
58 | * @brief Indication if the job is done. |
---|
59 | * |
---|
60 | * A job is done if this member has the value PER_CPU_JOB_DONE. This |
---|
61 | * assumes that PER_CPU_JOB_DONE is not a valid pointer value. |
---|
62 | */ |
---|
63 | Atomic_Ulong done; |
---|
64 | }; |
---|
65 | |
---|
66 | /** |
---|
67 | * @brief Back pointer to the jobs to get the handler and argument. |
---|
68 | */ |
---|
69 | Per_CPU_Jobs *jobs; |
---|
70 | }; |
---|
71 | |
---|
72 | /** |
---|
73 | * @brief A collection of jobs, one for each processor. |
---|
74 | */ |
---|
75 | struct Per_CPU_Jobs { |
---|
76 | /** |
---|
77 | * @brief The job handler. |
---|
78 | */ |
---|
79 | SMP_Action_handler handler; |
---|
80 | |
---|
81 | /** |
---|
82 | * @brief The job handler argument. |
---|
83 | */ |
---|
84 | void *arg; |
---|
85 | |
---|
86 | /** |
---|
87 | * @brief One job for each potential processor. |
---|
88 | */ |
---|
89 | Per_CPU_Job Jobs[ CPU_MAXIMUM_PROCESSORS ]; |
---|
90 | }; |
---|
91 | |
---|
92 | #define _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, lock_context ) \ |
---|
93 | _ISR_lock_ISR_disable_and_acquire( &( cpu )->Jobs.Lock, lock_context ) |
---|
94 | |
---|
95 | #define _Per_CPU_Jobs_release_and_ISR_enable( cpu, lock_context ) \ |
---|
96 | _ISR_lock_Release_and_ISR_enable( &( cpu )->Jobs.Lock, lock_context ) |
---|
97 | |
---|
98 | void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu ) |
---|
99 | { |
---|
100 | ISR_lock_Context lock_context; |
---|
101 | Per_CPU_Job *job; |
---|
102 | |
---|
103 | _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context ); |
---|
104 | |
---|
105 | while ( ( job = cpu->Jobs.head ) != NULL ) { |
---|
106 | Per_CPU_Jobs *jobs; |
---|
107 | |
---|
108 | cpu->Jobs.head = job->next; |
---|
109 | _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context ); |
---|
110 | |
---|
111 | jobs = job->jobs; |
---|
112 | ( *jobs->handler )( jobs->arg ); |
---|
113 | _Atomic_Store_ulong( &job->done, PER_CPU_JOB_DONE, ATOMIC_ORDER_RELEASE ); |
---|
114 | |
---|
115 | _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context ); |
---|
116 | } |
---|
117 | |
---|
118 | _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context ); |
---|
119 | } |
---|
120 | |
---|
121 | static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self ) |
---|
122 | { |
---|
123 | unsigned long message; |
---|
124 | |
---|
125 | message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); |
---|
126 | |
---|
127 | if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) { |
---|
128 | bool success; |
---|
129 | |
---|
130 | success = _Atomic_Compare_exchange_ulong( |
---|
131 | &cpu_self->message, &message, |
---|
132 | message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED, |
---|
133 | ATOMIC_ORDER_RELAXED |
---|
134 | ); |
---|
135 | |
---|
136 | if ( success ) { |
---|
137 | _Per_CPU_Perform_jobs( cpu_self ); |
---|
138 | } |
---|
139 | } |
---|
140 | } |
---|
141 | |
---|
142 | static void _SMP_Issue_action_jobs( |
---|
143 | const Processor_mask *targets, |
---|
144 | Per_CPU_Jobs *jobs, |
---|
145 | uint32_t cpu_max |
---|
146 | ) |
---|
147 | { |
---|
148 | uint32_t cpu_index; |
---|
149 | |
---|
150 | for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) { |
---|
151 | if ( _Processor_mask_Is_set( targets, cpu_index ) ) { |
---|
152 | ISR_lock_Context lock_context; |
---|
153 | Per_CPU_Job *job; |
---|
154 | Per_CPU_Control *cpu; |
---|
155 | |
---|
156 | job = &jobs->Jobs[ cpu_index ]; |
---|
157 | _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED ); |
---|
158 | _Assert( job->next == NULL ); |
---|
159 | job->jobs = jobs; |
---|
160 | |
---|
161 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
162 | _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context ); |
---|
163 | |
---|
164 | if ( cpu->Jobs.head == NULL ) { |
---|
165 | cpu->Jobs.head = job; |
---|
166 | } else { |
---|
167 | *cpu->Jobs.tail = job; |
---|
168 | } |
---|
169 | |
---|
170 | cpu->Jobs.tail = &job->next; |
---|
171 | |
---|
172 | _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context ); |
---|
173 | _SMP_Send_message( cpu_index, SMP_MESSAGE_PERFORM_JOBS ); |
---|
174 | } |
---|
175 | } |
---|
176 | } |
---|
177 | |
---|
178 | static void _SMP_Wait_for_action_jobs( |
---|
179 | const Processor_mask *targets, |
---|
180 | const Per_CPU_Jobs *jobs, |
---|
181 | uint32_t cpu_max, |
---|
182 | Per_CPU_Control *cpu_self |
---|
183 | ) |
---|
184 | { |
---|
185 | uint32_t cpu_index; |
---|
186 | |
---|
187 | for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) { |
---|
188 | if ( _Processor_mask_Is_set( targets, cpu_index ) ) { |
---|
189 | const Per_CPU_Job *job; |
---|
190 | Per_CPU_Control *cpu; |
---|
191 | |
---|
192 | job = &jobs->Jobs[ cpu_index ]; |
---|
193 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
194 | |
---|
195 | while ( |
---|
196 | _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE ) |
---|
197 | != PER_CPU_JOB_DONE |
---|
198 | ) { |
---|
199 | switch ( cpu->state ) { |
---|
200 | case PER_CPU_STATE_INITIAL: |
---|
201 | case PER_CPU_STATE_READY_TO_START_MULTITASKING: |
---|
202 | case PER_CPU_STATE_REQUEST_START_MULTITASKING: |
---|
203 | _CPU_SMP_Processor_event_broadcast(); |
---|
204 | /* Fall through */ |
---|
205 | case PER_CPU_STATE_UP: |
---|
206 | /* |
---|
207 | * Calling this function with the current processor is intentional. |
---|
208 | * We have to perform our own jobs here in case inter-processor |
---|
209 | * interrupts are not working. |
---|
210 | */ |
---|
211 | _Per_CPU_Try_perform_jobs( cpu_self ); |
---|
212 | break; |
---|
213 | default: |
---|
214 | _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS ); |
---|
215 | break; |
---|
216 | } |
---|
217 | } |
---|
218 | } |
---|
219 | } |
---|
220 | } |
---|
221 | |
---|
222 | void _SMP_Multicast_action( |
---|
223 | const Processor_mask *targets, |
---|
224 | SMP_Action_handler handler, |
---|
225 | void *arg |
---|
226 | ) |
---|
227 | { |
---|
228 | Per_CPU_Jobs jobs; |
---|
229 | uint32_t cpu_max; |
---|
230 | Per_CPU_Control *cpu_self; |
---|
231 | uint32_t isr_level; |
---|
232 | |
---|
233 | cpu_max = _SMP_Get_processor_maximum(); |
---|
234 | _Assert( cpu_max <= CPU_MAXIMUM_PROCESSORS ); |
---|
235 | |
---|
236 | jobs.handler = handler; |
---|
237 | jobs.arg = arg; |
---|
238 | isr_level = _ISR_Get_level(); |
---|
239 | |
---|
240 | if ( isr_level == 0 ) { |
---|
241 | cpu_self = _Thread_Dispatch_disable(); |
---|
242 | } else { |
---|
243 | cpu_self = _Per_CPU_Get(); |
---|
244 | } |
---|
245 | |
---|
246 | _SMP_Issue_action_jobs( targets, &jobs, cpu_max ); |
---|
247 | _SMP_Wait_for_action_jobs( targets, &jobs, cpu_max, cpu_self ); |
---|
248 | |
---|
249 | if ( isr_level == 0 ) { |
---|
250 | _Thread_Dispatch_enable( cpu_self ); |
---|
251 | } |
---|
252 | } |
---|
253 | |
---|
254 | void _SMP_Broadcast_action( |
---|
255 | SMP_Action_handler handler, |
---|
256 | void *arg |
---|
257 | ) |
---|
258 | { |
---|
259 | _SMP_Multicast_action( _SMP_Get_online_processors(), handler, arg ); |
---|
260 | } |
---|