1 | /* |
---|
2 | * SPDX-License-Identifier: BSD-2-Clause |
---|
3 | * |
---|
4 | * Copyright (C) 2019 embedded brains GmbH |
---|
5 | * |
---|
6 | * Redistribution and use in source and binary forms, with or without |
---|
7 | * modification, are permitted provided that the following conditions |
---|
8 | * are met: |
---|
9 | * 1. Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * |
---|
15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
16 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
18 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
---|
19 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
20 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
21 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
22 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
24 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
25 | * POSSIBILITY OF SUCH DAMAGE. |
---|
26 | */ |
---|
27 | |
---|
28 | #ifdef HAVE_CONFIG_H |
---|
29 | #include "config.h" |
---|
30 | #endif |
---|
31 | |
---|
32 | #include <rtems/score/smpimpl.h> |
---|
33 | #include <rtems/score/assert.h> |
---|
34 | #include <rtems/score/threaddispatch.h> |
---|
35 | |
---|
36 | typedef struct Per_CPU_Job Per_CPU_Job; |
---|
37 | |
---|
38 | typedef struct Per_CPU_Jobs Per_CPU_Jobs; |
---|
39 | |
---|
40 | /** |
---|
41 | * @brief A per-processor job. |
---|
42 | */ |
---|
43 | struct Per_CPU_Job { |
---|
44 | union { |
---|
45 | /** |
---|
46 | * @brief The next job in the corresponding per-processor job list. |
---|
47 | */ |
---|
48 | Per_CPU_Job *next; |
---|
49 | |
---|
50 | /** |
---|
51 | * @brief Indication if the job is done. |
---|
52 | * |
---|
53 | * A job is done if this member has the value one. This assumes that one |
---|
54 | * is not a valid pointer value. |
---|
55 | */ |
---|
56 | Atomic_Ulong done; |
---|
57 | }; |
---|
58 | |
---|
59 | /** |
---|
60 | * @brief Back pointer to the jobs to get the handler and argument. |
---|
61 | */ |
---|
62 | Per_CPU_Jobs *jobs; |
---|
63 | }; |
---|
64 | |
---|
65 | /** |
---|
66 | * @brief A collection of jobs, one for each processor. |
---|
67 | */ |
---|
68 | struct Per_CPU_Jobs { |
---|
69 | /** |
---|
70 | * @brief The job handler. |
---|
71 | */ |
---|
72 | SMP_Action_handler handler; |
---|
73 | |
---|
74 | /** |
---|
75 | * @brief The job handler argument. |
---|
76 | */ |
---|
77 | void *arg; |
---|
78 | |
---|
79 | /** |
---|
80 | * @brief One job for each potential processor. |
---|
81 | */ |
---|
82 | Per_CPU_Job Jobs[ CPU_MAXIMUM_PROCESSORS ]; |
---|
83 | }; |
---|
84 | |
---|
85 | void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu ) |
---|
86 | { |
---|
87 | ISR_lock_Context lock_context; |
---|
88 | Per_CPU_Job *job; |
---|
89 | |
---|
90 | _ISR_lock_ISR_disable( &lock_context ); |
---|
91 | _Per_CPU_Acquire( cpu, &lock_context ); |
---|
92 | |
---|
93 | while ( ( job = cpu->Jobs.head ) != NULL ) { |
---|
94 | Per_CPU_Jobs *jobs; |
---|
95 | |
---|
96 | cpu->Jobs.head = job->next; |
---|
97 | _Per_CPU_Release( cpu, &lock_context ); |
---|
98 | _ISR_lock_ISR_enable( &lock_context ); |
---|
99 | |
---|
100 | jobs = job->jobs; |
---|
101 | ( *jobs->handler )( jobs->arg ); |
---|
102 | _Atomic_Store_ulong( &job->done, 1, ATOMIC_ORDER_RELEASE ); |
---|
103 | |
---|
104 | _ISR_lock_ISR_disable( &lock_context ); |
---|
105 | _Per_CPU_Acquire( cpu, &lock_context ); |
---|
106 | } |
---|
107 | |
---|
108 | _Per_CPU_Release( cpu, &lock_context ); |
---|
109 | _ISR_lock_ISR_enable( &lock_context ); |
---|
110 | } |
---|
111 | |
---|
112 | static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self ) |
---|
113 | { |
---|
114 | unsigned long message; |
---|
115 | |
---|
116 | message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); |
---|
117 | |
---|
118 | if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) { |
---|
119 | bool success; |
---|
120 | |
---|
121 | success = _Atomic_Compare_exchange_ulong( |
---|
122 | &cpu_self->message, &message, |
---|
123 | message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED, |
---|
124 | ATOMIC_ORDER_RELAXED |
---|
125 | ); |
---|
126 | |
---|
127 | if ( success ) { |
---|
128 | _Per_CPU_Perform_jobs( cpu_self ); |
---|
129 | } |
---|
130 | } |
---|
131 | } |
---|
132 | |
---|
133 | static void _SMP_Issue_action_jobs( |
---|
134 | const Processor_mask *targets, |
---|
135 | Per_CPU_Jobs *jobs, |
---|
136 | uint32_t cpu_max |
---|
137 | ) |
---|
138 | { |
---|
139 | uint32_t cpu_index; |
---|
140 | |
---|
141 | for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) { |
---|
142 | if ( _Processor_mask_Is_set( targets, cpu_index ) ) { |
---|
143 | ISR_lock_Context lock_context; |
---|
144 | Per_CPU_Job *job; |
---|
145 | Per_CPU_Control *cpu; |
---|
146 | |
---|
147 | job = &jobs->Jobs[ cpu_index ]; |
---|
148 | _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED ); |
---|
149 | _Assert( job->next == NULL ); |
---|
150 | job->jobs = jobs; |
---|
151 | |
---|
152 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
153 | _ISR_lock_ISR_disable( &lock_context ); |
---|
154 | _Per_CPU_Acquire( cpu, &lock_context ); |
---|
155 | |
---|
156 | if ( cpu->Jobs.head == NULL ) { |
---|
157 | cpu->Jobs.head = job; |
---|
158 | } else { |
---|
159 | *cpu->Jobs.tail = job; |
---|
160 | } |
---|
161 | |
---|
162 | cpu->Jobs.tail = &job->next; |
---|
163 | |
---|
164 | _Per_CPU_Release( cpu, &lock_context ); |
---|
165 | _ISR_lock_ISR_enable( &lock_context ); |
---|
166 | _SMP_Send_message( cpu_index, SMP_MESSAGE_PERFORM_JOBS ); |
---|
167 | } |
---|
168 | } |
---|
169 | } |
---|
170 | |
---|
171 | static void _SMP_Wait_for_action_jobs( |
---|
172 | const Processor_mask *targets, |
---|
173 | const Per_CPU_Jobs *jobs, |
---|
174 | uint32_t cpu_max, |
---|
175 | Per_CPU_Control *cpu_self |
---|
176 | ) |
---|
177 | { |
---|
178 | uint32_t cpu_index; |
---|
179 | |
---|
180 | for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) { |
---|
181 | if ( _Processor_mask_Is_set( targets, cpu_index ) ) { |
---|
182 | const Per_CPU_Job *job; |
---|
183 | Per_CPU_Control *cpu; |
---|
184 | |
---|
185 | job = &jobs->Jobs[ cpu_index ]; |
---|
186 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
187 | |
---|
188 | while ( _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE ) == 0 ) { |
---|
189 | switch ( cpu->state ) { |
---|
190 | case PER_CPU_STATE_INITIAL: |
---|
191 | case PER_CPU_STATE_READY_TO_START_MULTITASKING: |
---|
192 | case PER_CPU_STATE_REQUEST_START_MULTITASKING: |
---|
193 | _CPU_SMP_Processor_event_broadcast(); |
---|
194 | /* Fall through */ |
---|
195 | case PER_CPU_STATE_UP: |
---|
196 | /* |
---|
197 | * Calling this function with the current processor is intentional. |
---|
198 | * We have to perform our own jobs here in case inter-processor |
---|
199 | * interrupts are not working. |
---|
200 | */ |
---|
201 | _Per_CPU_Try_perform_jobs( cpu_self ); |
---|
202 | break; |
---|
203 | default: |
---|
204 | _SMP_Fatal( SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS ); |
---|
205 | break; |
---|
206 | } |
---|
207 | } |
---|
208 | } |
---|
209 | } |
---|
210 | } |
---|
211 | |
---|
212 | void _SMP_Multicast_action( |
---|
213 | const Processor_mask *targets, |
---|
214 | SMP_Action_handler handler, |
---|
215 | void *arg |
---|
216 | ) |
---|
217 | { |
---|
218 | Per_CPU_Jobs jobs; |
---|
219 | uint32_t cpu_max; |
---|
220 | Per_CPU_Control *cpu_self; |
---|
221 | uint32_t isr_level; |
---|
222 | |
---|
223 | cpu_max = _SMP_Get_processor_maximum(); |
---|
224 | _Assert( cpu_max <= CPU_MAXIMUM_PROCESSORS ); |
---|
225 | |
---|
226 | if ( targets == NULL ) { |
---|
227 | targets = _SMP_Get_online_processors(); |
---|
228 | } |
---|
229 | |
---|
230 | jobs.handler = handler; |
---|
231 | jobs.arg = arg; |
---|
232 | isr_level = _ISR_Get_level(); |
---|
233 | |
---|
234 | if ( isr_level == 0 ) { |
---|
235 | cpu_self = _Thread_Dispatch_disable(); |
---|
236 | } else { |
---|
237 | cpu_self = _Per_CPU_Get(); |
---|
238 | } |
---|
239 | |
---|
240 | _SMP_Issue_action_jobs( targets, &jobs, cpu_max ); |
---|
241 | _SMP_Wait_for_action_jobs( targets, &jobs, cpu_max, cpu_self ); |
---|
242 | |
---|
243 | if ( isr_level == 0 ) { |
---|
244 | _Thread_Dispatch_enable( cpu_self ); |
---|
245 | } |
---|
246 | } |
---|