1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief SMP Scheduler Implementation |
---|
5 | * |
---|
6 | * @ingroup ScoreSchedulerSMP |
---|
7 | */ |
---|
8 | |
---|
9 | /* |
---|
10 | * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. |
---|
11 | * |
---|
12 | * embedded brains GmbH |
---|
13 | * Dornierstr. 4 |
---|
14 | * 82178 Puchheim |
---|
15 | * Germany |
---|
16 | * <rtems@embedded-brains.de> |
---|
17 | * |
---|
18 | * The license and distribution terms for this file may be |
---|
19 | * found in the file LICENSE in this distribution or at |
---|
20 | * http://www.rtems.org/license/LICENSE. |
---|
21 | */ |
---|
22 | |
---|
23 | #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
24 | #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
25 | |
---|
26 | #include <rtems/score/schedulersmp.h> |
---|
27 | #include <rtems/score/assert.h> |
---|
28 | #include <rtems/score/chainimpl.h> |
---|
29 | #include <rtems/score/schedulersimpleimpl.h> |
---|
30 | |
---|
31 | #ifdef __cplusplus |
---|
32 | extern "C" { |
---|
33 | #endif /* __cplusplus */ |
---|
34 | |
---|
35 | /** |
---|
36 | * @addtogroup ScoreSchedulerSMP |
---|
37 | * |
---|
38 | * @{ |
---|
39 | */ |
---|
40 | |
---|
41 | typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )( |
---|
42 | Scheduler_SMP_Context *self |
---|
43 | ); |
---|
44 | |
---|
45 | typedef void ( *Scheduler_SMP_Extract )( |
---|
46 | Scheduler_SMP_Context *self, |
---|
47 | Thread_Control *thread |
---|
48 | ); |
---|
49 | |
---|
50 | typedef void ( *Scheduler_SMP_Insert )( |
---|
51 | Scheduler_SMP_Context *self, |
---|
52 | Thread_Control *thread_to_insert |
---|
53 | ); |
---|
54 | |
---|
55 | typedef void ( *Scheduler_SMP_Move )( |
---|
56 | Scheduler_SMP_Context *self, |
---|
57 | Thread_Control *thread_to_move |
---|
58 | ); |
---|
59 | |
---|
60 | static inline void _Scheduler_SMP_Initialize( |
---|
61 | Scheduler_SMP_Context *self |
---|
62 | ) |
---|
63 | { |
---|
64 | _Chain_Initialize_empty( &self->Scheduled ); |
---|
65 | } |
---|
66 | |
---|
67 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get( |
---|
68 | Thread_Control *thread |
---|
69 | ) |
---|
70 | { |
---|
71 | return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread ); |
---|
72 | } |
---|
73 | |
---|
74 | static inline void _Scheduler_SMP_Node_initialize( |
---|
75 | Scheduler_SMP_Node *node |
---|
76 | ) |
---|
77 | { |
---|
78 | node->state = SCHEDULER_SMP_NODE_BLOCKED; |
---|
79 | } |
---|
80 | |
---|
81 | extern const bool _Scheduler_SMP_Node_valid_state_changes[ 4 ][ 4 ]; |
---|
82 | |
---|
83 | static inline void _Scheduler_SMP_Node_change_state( |
---|
84 | Scheduler_SMP_Node *node, |
---|
85 | Scheduler_SMP_Node_state new_state |
---|
86 | ) |
---|
87 | { |
---|
88 | _Assert( |
---|
89 | _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ] |
---|
90 | ); |
---|
91 | |
---|
92 | node->state = new_state; |
---|
93 | } |
---|
94 | |
---|
95 | static inline bool _Scheduler_SMP_Is_processor_owned_by_us( |
---|
96 | const Scheduler_SMP_Context *self, |
---|
97 | const Per_CPU_Control *cpu |
---|
98 | ) |
---|
99 | { |
---|
100 | return cpu->scheduler_context == &self->Base; |
---|
101 | } |
---|
102 | |
---|
103 | static inline void _Scheduler_SMP_Update_heir( |
---|
104 | Per_CPU_Control *cpu_self, |
---|
105 | Per_CPU_Control *cpu_for_heir, |
---|
106 | Thread_Control *heir |
---|
107 | ) |
---|
108 | { |
---|
109 | cpu_for_heir->heir = heir; |
---|
110 | |
---|
111 | /* |
---|
112 | * It is critical that we first update the heir and then the dispatch |
---|
113 | * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an |
---|
114 | * update. |
---|
115 | */ |
---|
116 | _Atomic_Fence( ATOMIC_ORDER_SEQ_CST ); |
---|
117 | |
---|
118 | /* |
---|
119 | * Only update the dispatch necessary indicator if not already set to |
---|
120 | * avoid superfluous inter-processor interrupts. |
---|
121 | */ |
---|
122 | if ( !cpu_for_heir->dispatch_necessary ) { |
---|
123 | cpu_for_heir->dispatch_necessary = true; |
---|
124 | |
---|
125 | if ( cpu_for_heir != cpu_self ) { |
---|
126 | _Per_CPU_Send_interrupt( cpu_for_heir ); |
---|
127 | } |
---|
128 | } |
---|
129 | } |
---|
130 | |
---|
131 | static inline void _Scheduler_SMP_Allocate_processor( |
---|
132 | Scheduler_SMP_Context *self, |
---|
133 | Thread_Control *scheduled, |
---|
134 | Thread_Control *victim |
---|
135 | ) |
---|
136 | { |
---|
137 | Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled ); |
---|
138 | Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled ); |
---|
139 | Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim ); |
---|
140 | Per_CPU_Control *cpu_self = _Per_CPU_Get(); |
---|
141 | Thread_Control *heir; |
---|
142 | |
---|
143 | _Scheduler_SMP_Node_change_state( |
---|
144 | scheduled_node, |
---|
145 | SCHEDULER_SMP_NODE_SCHEDULED |
---|
146 | ); |
---|
147 | |
---|
148 | _Assert( _ISR_Get_level() != 0 ); |
---|
149 | |
---|
150 | if ( _Thread_Is_executing_on_a_processor( scheduled ) ) { |
---|
151 | if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) { |
---|
152 | heir = cpu_of_scheduled->heir; |
---|
153 | _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled ); |
---|
154 | } else { |
---|
155 | /* We have to force a migration to our processor set */ |
---|
156 | _Assert( scheduled->debug_real_cpu->heir != scheduled ); |
---|
157 | heir = scheduled; |
---|
158 | } |
---|
159 | } else { |
---|
160 | heir = scheduled; |
---|
161 | } |
---|
162 | |
---|
163 | if ( heir != victim ) { |
---|
164 | _Thread_Set_CPU( heir, cpu_of_victim ); |
---|
165 | _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir ); |
---|
166 | } |
---|
167 | } |
---|
168 | |
---|
169 | static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled( |
---|
170 | Scheduler_SMP_Context *self |
---|
171 | ) |
---|
172 | { |
---|
173 | Thread_Control *lowest_ready = NULL; |
---|
174 | Chain_Control *scheduled = &self->Scheduled; |
---|
175 | |
---|
176 | if ( !_Chain_Is_empty( scheduled ) ) { |
---|
177 | lowest_ready = (Thread_Control *) _Chain_Last( scheduled ); |
---|
178 | } |
---|
179 | |
---|
180 | return lowest_ready; |
---|
181 | } |
---|
182 | |
---|
183 | static inline void _Scheduler_SMP_Enqueue_ordered( |
---|
184 | Scheduler_SMP_Context *self, |
---|
185 | Thread_Control *thread, |
---|
186 | Chain_Node_order order, |
---|
187 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
188 | Scheduler_SMP_Insert insert_ready, |
---|
189 | Scheduler_SMP_Insert insert_scheduled, |
---|
190 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
191 | Scheduler_SMP_Move move_from_scheduled_to_ready |
---|
192 | ) |
---|
193 | { |
---|
194 | Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread ); |
---|
195 | |
---|
196 | if ( node->state == SCHEDULER_SMP_NODE_IN_THE_AIR ) { |
---|
197 | Thread_Control *highest_ready = ( *get_highest_ready )( self ); |
---|
198 | |
---|
199 | /* |
---|
200 | * The thread has been extracted from the scheduled chain. We have to |
---|
201 | * place it now on the scheduled or ready chain. |
---|
202 | * |
---|
203 | * NOTE: Do not exchange parameters to do the negation of the order check. |
---|
204 | */ |
---|
205 | if ( |
---|
206 | highest_ready != NULL |
---|
207 | && !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) |
---|
208 | ) { |
---|
209 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
210 | _Scheduler_SMP_Allocate_processor( self, highest_ready, thread ); |
---|
211 | ( *insert_ready )( self, thread ); |
---|
212 | ( *move_from_ready_to_scheduled )( self, highest_ready ); |
---|
213 | } else { |
---|
214 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED ); |
---|
215 | ( *insert_scheduled )( self, thread ); |
---|
216 | } |
---|
217 | } else { |
---|
218 | Thread_Control *lowest_scheduled = _Scheduler_SMP_Get_lowest_scheduled( self ); |
---|
219 | |
---|
220 | /* |
---|
221 | * The scheduled chain is empty if nested interrupts change the priority of |
---|
222 | * all scheduled threads. These threads are in the air. |
---|
223 | */ |
---|
224 | if ( |
---|
225 | lowest_scheduled != NULL |
---|
226 | && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) |
---|
227 | ) { |
---|
228 | Scheduler_SMP_Node *lowest_scheduled_node = |
---|
229 | _Scheduler_SMP_Node_get( lowest_scheduled ); |
---|
230 | |
---|
231 | _Scheduler_SMP_Node_change_state( |
---|
232 | lowest_scheduled_node, |
---|
233 | SCHEDULER_SMP_NODE_READY |
---|
234 | ); |
---|
235 | _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled ); |
---|
236 | ( *insert_scheduled )( self, thread ); |
---|
237 | ( *move_from_scheduled_to_ready )( self, lowest_scheduled ); |
---|
238 | } else { |
---|
239 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
240 | ( *insert_ready )( self, thread ); |
---|
241 | } |
---|
242 | } |
---|
243 | } |
---|
244 | |
---|
245 | static inline void _Scheduler_SMP_Schedule_highest_ready( |
---|
246 | Scheduler_SMP_Context *self, |
---|
247 | Thread_Control *victim, |
---|
248 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
249 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
250 | ) |
---|
251 | { |
---|
252 | Thread_Control *highest_ready = ( *get_highest_ready )( self ); |
---|
253 | |
---|
254 | _Scheduler_SMP_Allocate_processor( self, highest_ready, victim ); |
---|
255 | |
---|
256 | ( *move_from_ready_to_scheduled )( self, highest_ready ); |
---|
257 | } |
---|
258 | |
---|
259 | static inline void _Scheduler_SMP_Schedule( |
---|
260 | Scheduler_SMP_Context *self, |
---|
261 | Thread_Control *thread, |
---|
262 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
263 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
264 | ) |
---|
265 | { |
---|
266 | Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread ); |
---|
267 | |
---|
268 | if ( node->state == SCHEDULER_SMP_NODE_IN_THE_AIR ) { |
---|
269 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
270 | |
---|
271 | _Scheduler_SMP_Schedule_highest_ready( |
---|
272 | self, |
---|
273 | thread, |
---|
274 | get_highest_ready, |
---|
275 | move_from_ready_to_scheduled |
---|
276 | ); |
---|
277 | } |
---|
278 | } |
---|
279 | |
---|
280 | static inline void _Scheduler_SMP_Block( |
---|
281 | Scheduler_SMP_Context *self, |
---|
282 | Thread_Control *thread, |
---|
283 | Scheduler_SMP_Extract extract, |
---|
284 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
285 | Scheduler_SMP_Move move_from_ready_to_scheduled |
---|
286 | ) |
---|
287 | { |
---|
288 | ( *extract )( self, thread ); |
---|
289 | |
---|
290 | _Scheduler_SMP_Schedule( |
---|
291 | self, |
---|
292 | thread, |
---|
293 | get_highest_ready, |
---|
294 | move_from_ready_to_scheduled |
---|
295 | ); |
---|
296 | } |
---|
297 | |
---|
298 | static inline void _Scheduler_SMP_Extract( |
---|
299 | Scheduler_SMP_Context *self, |
---|
300 | Thread_Control *thread, |
---|
301 | Scheduler_SMP_Extract extract |
---|
302 | ) |
---|
303 | { |
---|
304 | ( *extract )( self, thread ); |
---|
305 | } |
---|
306 | |
---|
307 | static inline void _Scheduler_SMP_Insert_scheduled_lifo( |
---|
308 | Scheduler_SMP_Context *self, |
---|
309 | Thread_Control *thread |
---|
310 | ) |
---|
311 | { |
---|
312 | _Chain_Insert_ordered_unprotected( |
---|
313 | &self->Scheduled, |
---|
314 | &thread->Object.Node, |
---|
315 | _Scheduler_simple_Insert_priority_lifo_order |
---|
316 | ); |
---|
317 | } |
---|
318 | |
---|
319 | static inline void _Scheduler_SMP_Insert_scheduled_fifo( |
---|
320 | Scheduler_SMP_Context *self, |
---|
321 | Thread_Control *thread |
---|
322 | ) |
---|
323 | { |
---|
324 | _Chain_Insert_ordered_unprotected( |
---|
325 | &self->Scheduled, |
---|
326 | &thread->Object.Node, |
---|
327 | _Scheduler_simple_Insert_priority_fifo_order |
---|
328 | ); |
---|
329 | } |
---|
330 | |
---|
331 | static inline void _Scheduler_SMP_Start_idle( |
---|
332 | Scheduler_SMP_Context *self, |
---|
333 | Thread_Control *thread, |
---|
334 | Per_CPU_Control *cpu |
---|
335 | ) |
---|
336 | { |
---|
337 | Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread ); |
---|
338 | |
---|
339 | node->state = SCHEDULER_SMP_NODE_SCHEDULED; |
---|
340 | |
---|
341 | _Thread_Set_CPU( thread, cpu ); |
---|
342 | _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node ); |
---|
343 | } |
---|
344 | |
---|
345 | /** @} */ |
---|
346 | |
---|
347 | #ifdef __cplusplus |
---|
348 | } |
---|
349 | #endif /* __cplusplus */ |
---|
350 | |
---|
351 | #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */ |
---|