1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief SMP Scheduler Implementation |
---|
5 | * |
---|
6 | * @ingroup ScoreSchedulerSMP |
---|
7 | */ |
---|
8 | |
---|
9 | /* |
---|
10 | * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. |
---|
11 | * |
---|
12 | * embedded brains GmbH |
---|
13 | * Dornierstr. 4 |
---|
14 | * 82178 Puchheim |
---|
15 | * Germany |
---|
16 | * <rtems@embedded-brains.de> |
---|
17 | * |
---|
18 | * The license and distribution terms for this file may be |
---|
19 | * found in the file LICENSE in this distribution or at |
---|
20 | * http://www.rtems.org/license/LICENSE. |
---|
21 | */ |
---|
22 | |
---|
23 | #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
24 | #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
25 | |
---|
26 | #include <rtems/score/schedulersmp.h> |
---|
27 | #include <rtems/score/assert.h> |
---|
28 | #include <rtems/score/chainimpl.h> |
---|
29 | #include <rtems/score/schedulersimpleimpl.h> |
---|
30 | #include <rtems/bspIo.h> |
---|
31 | |
---|
32 | #ifdef __cplusplus |
---|
33 | extern "C" { |
---|
34 | #endif /* __cplusplus */ |
---|
35 | |
---|
36 | /** |
---|
37 | * @addtogroup ScoreSchedulerSMP |
---|
38 | * |
---|
39 | * The scheduler nodes can be in four states |
---|
40 | * - @ref SCHEDULER_SMP_NODE_BLOCKED, |
---|
41 | * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and |
---|
42 | * - @ref SCHEDULER_SMP_NODE_READY. |
---|
43 | * |
---|
44 | * State transitions are triggered via basic operations |
---|
45 | * - _Scheduler_SMP_Enqueue_ordered(), |
---|
46 | * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and |
---|
47 | * - _Scheduler_SMP_Block(). |
---|
48 | * |
---|
49 | * @dot |
---|
50 | * digraph { |
---|
51 | * node [style="filled"]; |
---|
52 | * |
---|
53 | * bs [label="BLOCKED"]; |
---|
54 | * ss [label="SCHEDULED", fillcolor="green"]; |
---|
55 | * rs [label="READY", fillcolor="red"]; |
---|
56 | * |
---|
57 | * edge [label="enqueue"]; |
---|
58 | * edge [fontcolor="darkgreen", color="darkgreen"]; |
---|
59 | * |
---|
60 | * bs -> ss; |
---|
61 | * |
---|
62 | * edge [fontcolor="red", color="red"]; |
---|
63 | * |
---|
64 | * bs -> rs; |
---|
65 | * |
---|
66 | * edge [label="enqueue other"]; |
---|
67 | * |
---|
68 | * ss -> rs; |
---|
69 | * |
---|
70 | * edge [label="block"]; |
---|
71 | * edge [fontcolor="black", color="black"]; |
---|
72 | * |
---|
73 | * ss -> bs; |
---|
74 | * rs -> bs; |
---|
75 | * |
---|
76 | * edge [label="block other"]; |
---|
77 | * edge [fontcolor="darkgreen", color="darkgreen"]; |
---|
78 | * |
---|
79 | * rs -> ss; |
---|
80 | * } |
---|
81 | * @enddot |
---|
82 | * |
---|
83 | * During system initialization each processor of the scheduler instance starts |
---|
84 | * with an idle thread assigned to it. Lets have a look at an example with two |
---|
85 | * idle threads I and J with priority 5. We also have blocked threads A, B and |
---|
86 | * C with priorities 1, 2 and 3 respectively. The scheduler nodes are ordered |
---|
87 | * with respect to the thread priority from left to right in the below |
---|
88 | * diagrams. The highest priority node (lowest priority number) is the |
---|
89 | * leftmost node. Since the processor assignment is independent of the thread |
---|
90 | * priority the processor indices may move from one state to the other. |
---|
91 | * |
---|
92 | * @dot |
---|
93 | * digraph { |
---|
94 | * node [style="filled"]; |
---|
95 | * edge [dir="none"]; |
---|
96 | * subgraph { |
---|
97 | * rank = same; |
---|
98 | * |
---|
99 | * i [label="I (5)", fillcolor="green"]; |
---|
100 | * j [label="J (5)", fillcolor="green"]; |
---|
101 | * a [label="A (1)"]; |
---|
102 | * b [label="B (2)"]; |
---|
103 | * c [label="C (3)"]; |
---|
104 | * i -> j; |
---|
105 | * } |
---|
106 | * |
---|
107 | * subgraph { |
---|
108 | * rank = same; |
---|
109 | * |
---|
110 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
111 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
112 | * } |
---|
113 | * |
---|
114 | * i -> p0; |
---|
115 | * j -> p1; |
---|
116 | * } |
---|
117 | * @enddot |
---|
118 | * |
---|
119 | * Lets start A. For this an enqueue operation is performed. |
---|
120 | * |
---|
121 | * @dot |
---|
122 | * digraph { |
---|
123 | * node [style="filled"]; |
---|
124 | * edge [dir="none"]; |
---|
125 | * |
---|
126 | * subgraph { |
---|
127 | * rank = same; |
---|
128 | * |
---|
129 | * i [label="I (5)", fillcolor="green"]; |
---|
130 | * j [label="J (5)", fillcolor="red"]; |
---|
131 | * a [label="A (1)", fillcolor="green"]; |
---|
132 | * b [label="B (2)"]; |
---|
133 | * c [label="C (3)"]; |
---|
134 | * a -> i; |
---|
135 | * } |
---|
136 | * |
---|
137 | * subgraph { |
---|
138 | * rank = same; |
---|
139 | * |
---|
140 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
141 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
142 | * } |
---|
143 | * |
---|
144 | * i -> p0; |
---|
145 | * a -> p1; |
---|
146 | * } |
---|
147 | * @enddot |
---|
148 | * |
---|
149 | * Lets start C. |
---|
150 | * |
---|
151 | * @dot |
---|
152 | * digraph { |
---|
153 | * node [style="filled"]; |
---|
154 | * edge [dir="none"]; |
---|
155 | * |
---|
156 | * subgraph { |
---|
157 | * rank = same; |
---|
158 | * |
---|
159 | * a [label="A (1)", fillcolor="green"]; |
---|
160 | * c [label="C (3)", fillcolor="green"]; |
---|
161 | * i [label="I (5)", fillcolor="red"]; |
---|
162 | * j [label="J (5)", fillcolor="red"]; |
---|
163 | * b [label="B (2)"]; |
---|
164 | * a -> c; |
---|
165 | * i -> j; |
---|
166 | * } |
---|
167 | * |
---|
168 | * subgraph { |
---|
169 | * rank = same; |
---|
170 | * |
---|
171 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
172 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
173 | * } |
---|
174 | * |
---|
175 | * c -> p0; |
---|
176 | * a -> p1; |
---|
177 | * } |
---|
178 | * @enddot |
---|
179 | * |
---|
180 | * Lets start B. |
---|
181 | * |
---|
182 | * @dot |
---|
183 | * digraph { |
---|
184 | * node [style="filled"]; |
---|
185 | * edge [dir="none"]; |
---|
186 | * |
---|
187 | * subgraph { |
---|
188 | * rank = same; |
---|
189 | * |
---|
190 | * a [label="A (1)", fillcolor="green"]; |
---|
191 | * b [label="B (2)", fillcolor="green"]; |
---|
192 | * c [label="C (3)", fillcolor="red"]; |
---|
193 | * i [label="I (5)", fillcolor="red"]; |
---|
194 | * j [label="J (5)", fillcolor="red"]; |
---|
195 | * a -> b; |
---|
196 | * c -> i -> j; |
---|
197 | * } |
---|
198 | * |
---|
199 | * subgraph { |
---|
200 | * rank = same; |
---|
201 | * |
---|
202 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
203 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
204 | * } |
---|
205 | * |
---|
206 | * b -> p0; |
---|
207 | * a -> p1; |
---|
208 | * } |
---|
209 | * @enddot |
---|
210 | * |
---|
211 | * Lets change the priority of thread A to 4. |
---|
212 | * |
---|
213 | * @dot |
---|
214 | * digraph { |
---|
215 | * node [style="filled"]; |
---|
216 | * edge [dir="none"]; |
---|
217 | * |
---|
218 | * subgraph { |
---|
219 | * rank = same; |
---|
220 | * |
---|
221 | * b [label="B (2)", fillcolor="green"]; |
---|
222 | * c [label="C (3)", fillcolor="green"]; |
---|
223 | * a [label="A (4)", fillcolor="red"]; |
---|
224 | * i [label="I (5)", fillcolor="red"]; |
---|
225 | * j [label="J (5)", fillcolor="red"]; |
---|
226 | * b -> c; |
---|
227 | * a -> i -> j; |
---|
228 | * } |
---|
229 | * |
---|
230 | * subgraph { |
---|
231 | * rank = same; |
---|
232 | * |
---|
233 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
234 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
235 | * } |
---|
236 | * |
---|
237 | * b -> p0; |
---|
238 | * c -> p1; |
---|
239 | * } |
---|
240 | * @enddot |
---|
241 | * |
---|
242 | * Now perform a blocking operation with thread B. Please note that thread A |
---|
243 | * migrated now from processor 0 to processor 1 and thread C still executes on |
---|
244 | * processor 1. |
---|
245 | * |
---|
246 | * @dot |
---|
247 | * digraph { |
---|
248 | * node [style="filled"]; |
---|
249 | * edge [dir="none"]; |
---|
250 | * |
---|
251 | * subgraph { |
---|
252 | * rank = same; |
---|
253 | * |
---|
254 | * c [label="C (3)", fillcolor="green"]; |
---|
255 | * a [label="A (4)", fillcolor="green"]; |
---|
256 | * i [label="I (5)", fillcolor="red"]; |
---|
257 | * j [label="J (5)", fillcolor="red"]; |
---|
258 | * b [label="B (2)"]; |
---|
259 | * c -> a; |
---|
260 | * i -> j; |
---|
261 | * } |
---|
262 | * |
---|
263 | * subgraph { |
---|
264 | * rank = same; |
---|
265 | * |
---|
266 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
267 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
268 | * } |
---|
269 | * |
---|
270 | * a -> p0; |
---|
271 | * c -> p1; |
---|
272 | * } |
---|
273 | * @enddot |
---|
274 | * |
---|
275 | * @{ |
---|
276 | */ |
---|
277 | |
---|
278 | typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )( |
---|
279 | Scheduler_Context *context, |
---|
280 | Scheduler_Node *node |
---|
281 | ); |
---|
282 | |
---|
283 | typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )( |
---|
284 | Scheduler_Context *context, |
---|
285 | Scheduler_Node *filter, |
---|
286 | Chain_Node_order order |
---|
287 | ); |
---|
288 | |
---|
289 | typedef void ( *Scheduler_SMP_Extract )( |
---|
290 | Scheduler_Context *context, |
---|
291 | Scheduler_Node *node_to_extract |
---|
292 | ); |
---|
293 | |
---|
294 | typedef void ( *Scheduler_SMP_Insert )( |
---|
295 | Scheduler_Context *context, |
---|
296 | Scheduler_Node *node_to_insert |
---|
297 | ); |
---|
298 | |
---|
299 | typedef void ( *Scheduler_SMP_Move )( |
---|
300 | Scheduler_Context *context, |
---|
301 | Scheduler_Node *node_to_move |
---|
302 | ); |
---|
303 | |
---|
304 | typedef bool ( *Scheduler_SMP_Ask_for_help )( |
---|
305 | Scheduler_Context *context, |
---|
306 | Thread_Control *thread, |
---|
307 | Scheduler_Node *node |
---|
308 | ); |
---|
309 | |
---|
310 | typedef void ( *Scheduler_SMP_Update )( |
---|
311 | Scheduler_Context *context, |
---|
312 | Scheduler_Node *node_to_update, |
---|
313 | Priority_Control new_priority |
---|
314 | ); |
---|
315 | |
---|
316 | typedef Thread_Control *( *Scheduler_SMP_Enqueue )( |
---|
317 | Scheduler_Context *context, |
---|
318 | Scheduler_Node *node_to_enqueue, |
---|
319 | Thread_Control *needs_help |
---|
320 | ); |
---|
321 | |
---|
322 | typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )( |
---|
323 | Scheduler_Context *context, |
---|
324 | Scheduler_Node *node_to_enqueue |
---|
325 | ); |
---|
326 | |
---|
327 | typedef void ( *Scheduler_SMP_Allocate_processor )( |
---|
328 | Scheduler_Context *context, |
---|
329 | Thread_Control *scheduled_thread, |
---|
330 | Thread_Control *victim_thread, |
---|
331 | Per_CPU_Control *victim_cpu |
---|
332 | ); |
---|
333 | |
---|
334 | static inline bool _Scheduler_SMP_Insert_priority_lifo_order( |
---|
335 | const Chain_Node *to_insert, |
---|
336 | const Chain_Node *next |
---|
337 | ) |
---|
338 | { |
---|
339 | const Scheduler_SMP_Node *node_to_insert = |
---|
340 | (const Scheduler_SMP_Node *) to_insert; |
---|
341 | const Scheduler_SMP_Node *node_next = |
---|
342 | (const Scheduler_SMP_Node *) next; |
---|
343 | |
---|
344 | return node_to_insert->priority <= node_next->priority; |
---|
345 | } |
---|
346 | |
---|
347 | static inline bool _Scheduler_SMP_Insert_priority_fifo_order( |
---|
348 | const Chain_Node *to_insert, |
---|
349 | const Chain_Node *next |
---|
350 | ) |
---|
351 | { |
---|
352 | const Scheduler_SMP_Node *node_to_insert = |
---|
353 | (const Scheduler_SMP_Node *) to_insert; |
---|
354 | const Scheduler_SMP_Node *node_next = |
---|
355 | (const Scheduler_SMP_Node *) next; |
---|
356 | |
---|
357 | return node_to_insert->priority < node_next->priority; |
---|
358 | } |
---|
359 | |
---|
360 | static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self( |
---|
361 | Scheduler_Context *context |
---|
362 | ) |
---|
363 | { |
---|
364 | return (Scheduler_SMP_Context *) context; |
---|
365 | } |
---|
366 | |
---|
367 | static inline void _Scheduler_SMP_Initialize( |
---|
368 | Scheduler_SMP_Context *self |
---|
369 | ) |
---|
370 | { |
---|
371 | _Chain_Initialize_empty( &self->Scheduled ); |
---|
372 | _Chain_Initialize_empty( &self->Idle_threads ); |
---|
373 | } |
---|
374 | |
---|
375 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node( |
---|
376 | Thread_Control *thread |
---|
377 | ) |
---|
378 | { |
---|
379 | return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread ); |
---|
380 | } |
---|
381 | |
---|
382 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node( |
---|
383 | Thread_Control *thread |
---|
384 | ) |
---|
385 | { |
---|
386 | return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread ); |
---|
387 | } |
---|
388 | |
---|
389 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast( |
---|
390 | Scheduler_Node *node |
---|
391 | ) |
---|
392 | { |
---|
393 | return (Scheduler_SMP_Node *) node; |
---|
394 | } |
---|
395 | |
---|
396 | static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state( |
---|
397 | const Scheduler_Node *node |
---|
398 | ) |
---|
399 | { |
---|
400 | return ( (const Scheduler_SMP_Node *) node )->state; |
---|
401 | } |
---|
402 | |
---|
403 | static inline Priority_Control _Scheduler_SMP_Node_priority( |
---|
404 | const Scheduler_Node *node |
---|
405 | ) |
---|
406 | { |
---|
407 | return ( (const Scheduler_SMP_Node *) node )->priority; |
---|
408 | } |
---|
409 | |
---|
410 | static inline void _Scheduler_SMP_Node_initialize( |
---|
411 | const Scheduler_Control *scheduler, |
---|
412 | Scheduler_SMP_Node *node, |
---|
413 | Thread_Control *thread, |
---|
414 | Priority_Control priority |
---|
415 | ) |
---|
416 | { |
---|
417 | _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority ); |
---|
418 | node->state = SCHEDULER_SMP_NODE_BLOCKED; |
---|
419 | node->priority = priority; |
---|
420 | } |
---|
421 | |
---|
422 | static inline void _Scheduler_SMP_Node_update_priority( |
---|
423 | Scheduler_SMP_Node *node, |
---|
424 | Priority_Control new_priority |
---|
425 | ) |
---|
426 | { |
---|
427 | node->priority = new_priority; |
---|
428 | } |
---|
429 | |
---|
430 | extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ]; |
---|
431 | |
---|
432 | static inline void _Scheduler_SMP_Node_change_state( |
---|
433 | Scheduler_Node *node, |
---|
434 | Scheduler_SMP_Node_state new_state |
---|
435 | ) |
---|
436 | { |
---|
437 | Scheduler_SMP_Node *the_node; |
---|
438 | |
---|
439 | the_node = _Scheduler_SMP_Node_downcast( node ); |
---|
440 | _Assert( |
---|
441 | _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ] |
---|
442 | ); |
---|
443 | |
---|
444 | the_node->state = new_state; |
---|
445 | } |
---|
446 | |
---|
447 | static inline bool _Scheduler_SMP_Is_processor_owned_by_us( |
---|
448 | const Scheduler_Context *context, |
---|
449 | const Per_CPU_Control *cpu |
---|
450 | ) |
---|
451 | { |
---|
452 | return cpu->scheduler_context == context; |
---|
453 | } |
---|
454 | |
---|
455 | static inline Thread_Control *_Scheduler_SMP_Get_idle_thread( |
---|
456 | Scheduler_Context *context |
---|
457 | ) |
---|
458 | { |
---|
459 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
460 | Thread_Control *idle = (Thread_Control *) |
---|
461 | _Chain_Get_first_unprotected( &self->Idle_threads ); |
---|
462 | |
---|
463 | _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) ); |
---|
464 | |
---|
465 | return idle; |
---|
466 | } |
---|
467 | |
---|
468 | static inline void _Scheduler_SMP_Release_idle_thread( |
---|
469 | Scheduler_Context *context, |
---|
470 | Thread_Control *idle |
---|
471 | ) |
---|
472 | { |
---|
473 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
474 | |
---|
475 | _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node ); |
---|
476 | } |
---|
477 | |
---|
478 | static inline void _Scheduler_SMP_Allocate_processor_lazy( |
---|
479 | Scheduler_Context *context, |
---|
480 | Thread_Control *scheduled_thread, |
---|
481 | Thread_Control *victim_thread, |
---|
482 | Per_CPU_Control *victim_cpu |
---|
483 | ) |
---|
484 | { |
---|
485 | Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread ); |
---|
486 | Per_CPU_Control *cpu_self = _Per_CPU_Get(); |
---|
487 | Thread_Control *heir; |
---|
488 | |
---|
489 | _Assert( _ISR_Get_level() != 0 ); |
---|
490 | |
---|
491 | if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) { |
---|
492 | if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) { |
---|
493 | heir = scheduled_cpu->heir; |
---|
494 | _Thread_Dispatch_update_heir( |
---|
495 | cpu_self, |
---|
496 | scheduled_cpu, |
---|
497 | scheduled_thread |
---|
498 | ); |
---|
499 | } else { |
---|
500 | /* We have to force a migration to our processor set */ |
---|
501 | heir = scheduled_thread; |
---|
502 | } |
---|
503 | } else { |
---|
504 | heir = scheduled_thread; |
---|
505 | } |
---|
506 | |
---|
507 | if ( heir != victim_thread ) { |
---|
508 | _Thread_Set_CPU( heir, victim_cpu ); |
---|
509 | _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir ); |
---|
510 | } |
---|
511 | } |
---|
512 | |
---|
513 | /* |
---|
514 | * This method is slightly different from |
---|
515 | * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to |
---|
516 | * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations |
---|
517 | * but does not take into account affinity. |
---|
518 | */ |
---|
519 | static inline void _Scheduler_SMP_Allocate_processor_exact( |
---|
520 | Scheduler_Context *context, |
---|
521 | Thread_Control *scheduled_thread, |
---|
522 | Thread_Control *victim_thread, |
---|
523 | Per_CPU_Control *victim_cpu |
---|
524 | ) |
---|
525 | { |
---|
526 | Per_CPU_Control *cpu_self = _Per_CPU_Get(); |
---|
527 | |
---|
528 | (void) context; |
---|
529 | |
---|
530 | _Thread_Set_CPU( scheduled_thread, victim_cpu ); |
---|
531 | _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread ); |
---|
532 | } |
---|
533 | |
---|
534 | static inline void _Scheduler_SMP_Allocate_processor( |
---|
535 | Scheduler_Context *context, |
---|
536 | Scheduler_Node *scheduled, |
---|
537 | Thread_Control *victim_thread, |
---|
538 | Per_CPU_Control *victim_cpu, |
---|
539 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
540 | ) |
---|
541 | { |
---|
542 | Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled ); |
---|
543 | |
---|
544 | _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED ); |
---|
545 | |
---|
546 | ( *allocate_processor )( |
---|
547 | context, |
---|
548 | scheduled_thread, |
---|
549 | victim_thread, |
---|
550 | victim_cpu |
---|
551 | ); |
---|
552 | } |
---|
553 | |
---|
554 | static inline Thread_Control *_Scheduler_SMP_Preempt( |
---|
555 | Scheduler_Context *context, |
---|
556 | Scheduler_Node *scheduled, |
---|
557 | Scheduler_Node *victim, |
---|
558 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
559 | ) |
---|
560 | { |
---|
561 | Thread_Control *victim_thread; |
---|
562 | ISR_lock_Context lock_context; |
---|
563 | Per_CPU_Control *victim_cpu; |
---|
564 | |
---|
565 | victim_thread = _Scheduler_Node_get_user( victim ); |
---|
566 | _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY ); |
---|
567 | |
---|
568 | _Thread_Scheduler_acquire_critical( victim_thread, &lock_context ); |
---|
569 | |
---|
570 | victim_cpu = _Thread_Get_CPU( victim_thread ); |
---|
571 | |
---|
572 | if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) { |
---|
573 | _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY ); |
---|
574 | |
---|
575 | if ( victim_thread->Scheduler.helping_nodes > 0 ) { |
---|
576 | _Per_CPU_Acquire( victim_cpu ); |
---|
577 | _Chain_Append_unprotected( |
---|
578 | &victim_cpu->Threads_in_need_for_help, |
---|
579 | &victim_thread->Scheduler.Help_node |
---|
580 | ); |
---|
581 | _Per_CPU_Release( victim_cpu ); |
---|
582 | } |
---|
583 | } |
---|
584 | |
---|
585 | _Thread_Scheduler_release_critical( victim_thread, &lock_context ); |
---|
586 | |
---|
587 | _Scheduler_SMP_Allocate_processor( |
---|
588 | context, |
---|
589 | scheduled, |
---|
590 | victim_thread, |
---|
591 | victim_cpu, |
---|
592 | allocate_processor |
---|
593 | ); |
---|
594 | |
---|
595 | return victim_thread; |
---|
596 | } |
---|
597 | |
---|
598 | static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled( |
---|
599 | Scheduler_Context *context, |
---|
600 | Scheduler_Node *filter, |
---|
601 | Chain_Node_order order |
---|
602 | ) |
---|
603 | { |
---|
604 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
605 | Chain_Control *scheduled = &self->Scheduled; |
---|
606 | Scheduler_Node *lowest_scheduled = |
---|
607 | (Scheduler_Node *) _Chain_Last( scheduled ); |
---|
608 | |
---|
609 | (void) filter; |
---|
610 | (void) order; |
---|
611 | |
---|
612 | _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) ); |
---|
613 | _Assert( |
---|
614 | _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled ) |
---|
615 | ); |
---|
616 | |
---|
617 | return lowest_scheduled; |
---|
618 | } |
---|
619 | |
---|
620 | static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled( |
---|
621 | Scheduler_Context *context, |
---|
622 | Scheduler_Node *node, |
---|
623 | Scheduler_Node *lowest_scheduled, |
---|
624 | Scheduler_SMP_Insert insert_scheduled, |
---|
625 | Scheduler_SMP_Move move_from_scheduled_to_ready, |
---|
626 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
627 | ) |
---|
628 | { |
---|
629 | Thread_Control *needs_help; |
---|
630 | Scheduler_Try_to_schedule_action action; |
---|
631 | |
---|
632 | action = _Scheduler_Try_to_schedule_node( |
---|
633 | context, |
---|
634 | node, |
---|
635 | _Scheduler_Node_get_idle( lowest_scheduled ), |
---|
636 | _Scheduler_SMP_Get_idle_thread |
---|
637 | ); |
---|
638 | |
---|
639 | if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { |
---|
640 | Thread_Control *lowest_scheduled_user; |
---|
641 | Thread_Control *idle; |
---|
642 | |
---|
643 | lowest_scheduled_user = _Scheduler_SMP_Preempt( |
---|
644 | context, |
---|
645 | node, |
---|
646 | lowest_scheduled, |
---|
647 | allocate_processor |
---|
648 | ); |
---|
649 | |
---|
650 | ( *insert_scheduled )( context, node ); |
---|
651 | ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); |
---|
652 | |
---|
653 | idle = _Scheduler_Release_idle_thread( |
---|
654 | context, |
---|
655 | lowest_scheduled, |
---|
656 | _Scheduler_SMP_Release_idle_thread |
---|
657 | ); |
---|
658 | if ( idle == NULL ) { |
---|
659 | needs_help = lowest_scheduled_user; |
---|
660 | } else { |
---|
661 | needs_help = NULL; |
---|
662 | } |
---|
663 | } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) { |
---|
664 | _Scheduler_SMP_Node_change_state( |
---|
665 | lowest_scheduled, |
---|
666 | SCHEDULER_SMP_NODE_READY |
---|
667 | ); |
---|
668 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED ); |
---|
669 | |
---|
670 | ( *insert_scheduled )( context, node ); |
---|
671 | ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); |
---|
672 | |
---|
673 | _Scheduler_Exchange_idle_thread( |
---|
674 | node, |
---|
675 | lowest_scheduled, |
---|
676 | _Scheduler_Node_get_idle( lowest_scheduled ) |
---|
677 | ); |
---|
678 | |
---|
679 | needs_help = NULL; |
---|
680 | } else { |
---|
681 | _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); |
---|
682 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
683 | needs_help = NULL; |
---|
684 | } |
---|
685 | |
---|
686 | return needs_help; |
---|
687 | } |
---|
688 | |
---|
689 | /** |
---|
690 | * @brief Enqueues a node according to the specified order function. |
---|
691 | * |
---|
692 | * The node must not be in the scheduled state. |
---|
693 | * |
---|
694 | * @param[in] context The scheduler instance context. |
---|
695 | * @param[in] node The node to enqueue. |
---|
696 | * @param[in] needs_help The thread needing help in case the node cannot be |
---|
697 | * scheduled. |
---|
698 | * @param[in] order The order function. |
---|
699 | * @param[in] insert_ready Function to insert a node into the set of ready |
---|
700 | * nodes. |
---|
701 | * @param[in] insert_scheduled Function to insert a node into the set of |
---|
702 | * scheduled nodes. |
---|
703 | * @param[in] move_from_scheduled_to_ready Function to move a node from the set |
---|
704 | * of scheduled nodes to the set of ready nodes. |
---|
705 | * @param[in] get_lowest_scheduled Function to select the node from the |
---|
706 | * scheduled nodes to replace. It may not be possible to find one, in this |
---|
707 | * case a pointer must be returned so that the order functions returns false |
---|
708 | * if this pointer is passed as the second argument to the order function. |
---|
709 | * @param[in] allocate_processor Function to allocate a processor to a node |
---|
710 | * based on the rules of the scheduler. |
---|
711 | */ |
---|
712 | static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered( |
---|
713 | Scheduler_Context *context, |
---|
714 | Scheduler_Node *node, |
---|
715 | Thread_Control *needs_help, |
---|
716 | Chain_Node_order order, |
---|
717 | Scheduler_SMP_Insert insert_ready, |
---|
718 | Scheduler_SMP_Insert insert_scheduled, |
---|
719 | Scheduler_SMP_Move move_from_scheduled_to_ready, |
---|
720 | Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, |
---|
721 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
722 | ) |
---|
723 | { |
---|
724 | Scheduler_Node *lowest_scheduled = |
---|
725 | ( *get_lowest_scheduled )( context, node, order ); |
---|
726 | |
---|
727 | if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { |
---|
728 | needs_help = _Scheduler_SMP_Enqueue_to_scheduled( |
---|
729 | context, |
---|
730 | node, |
---|
731 | lowest_scheduled, |
---|
732 | insert_scheduled, |
---|
733 | move_from_scheduled_to_ready, |
---|
734 | allocate_processor |
---|
735 | ); |
---|
736 | } else { |
---|
737 | ( *insert_ready )( context, node ); |
---|
738 | } |
---|
739 | |
---|
740 | return needs_help; |
---|
741 | } |
---|
742 | |
---|
743 | /** |
---|
744 | * @brief Enqueues a scheduled node according to the specified order |
---|
745 | * function. |
---|
746 | * |
---|
747 | * @param[in] context The scheduler instance context. |
---|
748 | * @param[in] node The node to enqueue. |
---|
749 | * @param[in] order The order function. |
---|
750 | * @param[in] extract_from_ready Function to extract a node from the set of |
---|
751 | * ready nodes. |
---|
752 | * @param[in] get_highest_ready Function to get the highest ready node. |
---|
753 | * @param[in] insert_ready Function to insert a node into the set of ready |
---|
754 | * nodes. |
---|
755 | * @param[in] insert_scheduled Function to insert a node into the set of |
---|
756 | * scheduled nodes. |
---|
757 | * @param[in] move_from_ready_to_scheduled Function to move a node from the set |
---|
758 | * of ready nodes to the set of scheduled nodes. |
---|
759 | * @param[in] allocate_processor Function to allocate a processor to a node |
---|
760 | * based on the rules of the scheduler. |
---|
761 | */ |
---|
762 | static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered( |
---|
763 | Scheduler_Context *context, |
---|
764 | Scheduler_Node *node, |
---|
765 | Chain_Node_order order, |
---|
766 | Scheduler_SMP_Extract extract_from_ready, |
---|
767 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
768 | Scheduler_SMP_Insert insert_ready, |
---|
769 | Scheduler_SMP_Insert insert_scheduled, |
---|
770 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
771 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
772 | ) |
---|
773 | { |
---|
774 | while ( true ) { |
---|
775 | Scheduler_Node *highest_ready; |
---|
776 | Scheduler_Try_to_schedule_action action; |
---|
777 | |
---|
778 | highest_ready = ( *get_highest_ready )( context, node ); |
---|
779 | |
---|
780 | /* |
---|
781 | * The node has been extracted from the scheduled chain. We have to place |
---|
782 | * it now on the scheduled or ready set. |
---|
783 | */ |
---|
784 | if ( ( *order )( &node->Node, &highest_ready->Node ) ) { |
---|
785 | ( *insert_scheduled )( context, node ); |
---|
786 | return NULL; |
---|
787 | } |
---|
788 | |
---|
789 | action = _Scheduler_Try_to_schedule_node( |
---|
790 | context, |
---|
791 | highest_ready, |
---|
792 | _Scheduler_Node_get_idle( node ), |
---|
793 | _Scheduler_SMP_Get_idle_thread |
---|
794 | ); |
---|
795 | |
---|
796 | if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { |
---|
797 | Thread_Control *user; |
---|
798 | Thread_Control *idle; |
---|
799 | |
---|
800 | user = _Scheduler_SMP_Preempt( |
---|
801 | context, |
---|
802 | highest_ready, |
---|
803 | node, |
---|
804 | allocate_processor |
---|
805 | ); |
---|
806 | |
---|
807 | ( *insert_ready )( context, node ); |
---|
808 | ( *move_from_ready_to_scheduled )( context, highest_ready ); |
---|
809 | |
---|
810 | idle = _Scheduler_Release_idle_thread( |
---|
811 | context, |
---|
812 | node, |
---|
813 | _Scheduler_SMP_Release_idle_thread |
---|
814 | ); |
---|
815 | |
---|
816 | if ( idle == NULL ) { |
---|
817 | return user; |
---|
818 | } else { |
---|
819 | return NULL; |
---|
820 | } |
---|
821 | } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) { |
---|
822 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
823 | _Scheduler_SMP_Node_change_state( |
---|
824 | highest_ready, |
---|
825 | SCHEDULER_SMP_NODE_SCHEDULED |
---|
826 | ); |
---|
827 | |
---|
828 | ( *insert_ready )( context, node ); |
---|
829 | ( *move_from_ready_to_scheduled )( context, highest_ready ); |
---|
830 | |
---|
831 | _Scheduler_Exchange_idle_thread( |
---|
832 | highest_ready, |
---|
833 | node, |
---|
834 | _Scheduler_Node_get_idle( node ) |
---|
835 | ); |
---|
836 | return NULL; |
---|
837 | } else { |
---|
838 | _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); |
---|
839 | |
---|
840 | _Scheduler_SMP_Node_change_state( |
---|
841 | highest_ready, |
---|
842 | SCHEDULER_SMP_NODE_BLOCKED |
---|
843 | ); |
---|
844 | |
---|
845 | ( *extract_from_ready )( context, highest_ready ); |
---|
846 | } |
---|
847 | } |
---|
848 | } |
---|
849 | |
---|
850 | static inline void _Scheduler_SMP_Extract_from_scheduled( |
---|
851 | Scheduler_Node *node |
---|
852 | ) |
---|
853 | { |
---|
854 | _Chain_Extract_unprotected( &node->Node ); |
---|
855 | } |
---|
856 | |
---|
857 | static inline void _Scheduler_SMP_Schedule_highest_ready( |
---|
858 | Scheduler_Context *context, |
---|
859 | Scheduler_Node *victim, |
---|
860 | Per_CPU_Control *victim_cpu, |
---|
861 | Scheduler_SMP_Extract extract_from_ready, |
---|
862 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
863 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
864 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
865 | ) |
---|
866 | { |
---|
867 | Scheduler_Try_to_schedule_action action; |
---|
868 | |
---|
869 | do { |
---|
870 | Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim ); |
---|
871 | |
---|
872 | action = _Scheduler_Try_to_schedule_node( |
---|
873 | context, |
---|
874 | highest_ready, |
---|
875 | NULL, |
---|
876 | _Scheduler_SMP_Get_idle_thread |
---|
877 | ); |
---|
878 | |
---|
879 | if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { |
---|
880 | _Scheduler_SMP_Allocate_processor( |
---|
881 | context, |
---|
882 | highest_ready, |
---|
883 | _Scheduler_Node_get_user( victim ), |
---|
884 | victim_cpu, |
---|
885 | allocate_processor |
---|
886 | ); |
---|
887 | |
---|
888 | ( *move_from_ready_to_scheduled )( context, highest_ready ); |
---|
889 | } else { |
---|
890 | _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); |
---|
891 | |
---|
892 | _Scheduler_SMP_Node_change_state( |
---|
893 | highest_ready, |
---|
894 | SCHEDULER_SMP_NODE_BLOCKED |
---|
895 | ); |
---|
896 | |
---|
897 | ( *extract_from_ready )( context, highest_ready ); |
---|
898 | } |
---|
899 | } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK ); |
---|
900 | } |
---|
901 | |
---|
902 | /** |
---|
903 | * @brief Blocks a thread. |
---|
904 | * |
---|
905 | * @param[in] context The scheduler instance context. |
---|
906 | * @param[in] thread The thread of the scheduling operation. |
---|
907 | * @param[in] node The scheduler node of the thread to block. |
---|
908 | * @param[in] extract_from_ready Function to extract a node from the set of |
---|
909 | * ready nodes. |
---|
910 | * @param[in] get_highest_ready Function to get the highest ready node. |
---|
911 | * @param[in] move_from_ready_to_scheduled Function to move a node from the set |
---|
912 | * of ready nodes to the set of scheduled nodes. |
---|
913 | */ |
---|
914 | static inline void _Scheduler_SMP_Block( |
---|
915 | Scheduler_Context *context, |
---|
916 | Thread_Control *thread, |
---|
917 | Scheduler_Node *node, |
---|
918 | Scheduler_SMP_Extract extract_from_ready, |
---|
919 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
920 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
921 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
922 | ) |
---|
923 | { |
---|
924 | Scheduler_SMP_Node_state node_state; |
---|
925 | Per_CPU_Control *thread_cpu; |
---|
926 | |
---|
927 | node_state = _Scheduler_SMP_Node_state( node ); |
---|
928 | |
---|
929 | thread_cpu = _Scheduler_Block_node( |
---|
930 | context, |
---|
931 | thread, |
---|
932 | node, |
---|
933 | node_state == SCHEDULER_SMP_NODE_SCHEDULED, |
---|
934 | _Scheduler_SMP_Get_idle_thread |
---|
935 | ); |
---|
936 | |
---|
937 | if ( thread_cpu != NULL ) { |
---|
938 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
939 | |
---|
940 | if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
941 | _Scheduler_SMP_Extract_from_scheduled( node ); |
---|
942 | _Scheduler_SMP_Schedule_highest_ready( |
---|
943 | context, |
---|
944 | node, |
---|
945 | thread_cpu, |
---|
946 | extract_from_ready, |
---|
947 | get_highest_ready, |
---|
948 | move_from_ready_to_scheduled, |
---|
949 | allocate_processor |
---|
950 | ); |
---|
951 | } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { |
---|
952 | ( *extract_from_ready )( context, node ); |
---|
953 | } |
---|
954 | } |
---|
955 | } |
---|
956 | |
---|
957 | static inline Thread_Control *_Scheduler_SMP_Unblock( |
---|
958 | Scheduler_Context *context, |
---|
959 | Thread_Control *thread, |
---|
960 | Scheduler_Node *node, |
---|
961 | Scheduler_SMP_Update update, |
---|
962 | Scheduler_SMP_Enqueue enqueue_fifo |
---|
963 | ) |
---|
964 | { |
---|
965 | Scheduler_SMP_Node_state node_state; |
---|
966 | bool unblock; |
---|
967 | Thread_Control *needs_help; |
---|
968 | |
---|
969 | node_state = _Scheduler_SMP_Node_state( node ); |
---|
970 | unblock = _Scheduler_Unblock_node( |
---|
971 | context, |
---|
972 | thread, |
---|
973 | node, |
---|
974 | node_state == SCHEDULER_SMP_NODE_SCHEDULED, |
---|
975 | _Scheduler_SMP_Release_idle_thread |
---|
976 | ); |
---|
977 | |
---|
978 | if ( unblock ) { |
---|
979 | Priority_Control new_priority; |
---|
980 | bool prepend_it; |
---|
981 | |
---|
982 | new_priority = _Scheduler_Node_get_priority( node, &prepend_it ); |
---|
983 | (void) prepend_it; |
---|
984 | |
---|
985 | if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) { |
---|
986 | ( *update )( context, node, new_priority ); |
---|
987 | } |
---|
988 | |
---|
989 | if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) { |
---|
990 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
991 | |
---|
992 | needs_help = ( *enqueue_fifo )( context, node, thread ); |
---|
993 | } else { |
---|
994 | _Assert( node_state == SCHEDULER_SMP_NODE_READY ); |
---|
995 | _Assert( |
---|
996 | node->help_state == SCHEDULER_HELP_ACTIVE_OWNER |
---|
997 | || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL |
---|
998 | ); |
---|
999 | _Assert( node->idle == NULL ); |
---|
1000 | |
---|
1001 | if ( node->accepts_help == thread ) { |
---|
1002 | needs_help = thread; |
---|
1003 | } else { |
---|
1004 | needs_help = NULL; |
---|
1005 | } |
---|
1006 | } |
---|
1007 | } else { |
---|
1008 | needs_help = NULL; |
---|
1009 | } |
---|
1010 | |
---|
1011 | return needs_help; |
---|
1012 | } |
---|
1013 | |
---|
1014 | static inline void _Scheduler_SMP_Update_priority( |
---|
1015 | Scheduler_Context *context, |
---|
1016 | Thread_Control *thread, |
---|
1017 | Scheduler_Node *node, |
---|
1018 | Scheduler_SMP_Extract extract_from_ready, |
---|
1019 | Scheduler_SMP_Update update, |
---|
1020 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
1021 | Scheduler_SMP_Enqueue enqueue_lifo, |
---|
1022 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo, |
---|
1023 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo, |
---|
1024 | Scheduler_SMP_Ask_for_help ask_for_help |
---|
1025 | ) |
---|
1026 | { |
---|
1027 | Priority_Control new_priority; |
---|
1028 | bool prepend_it; |
---|
1029 | Scheduler_SMP_Node_state node_state; |
---|
1030 | |
---|
1031 | new_priority = _Scheduler_Node_get_priority( node, &prepend_it ); |
---|
1032 | |
---|
1033 | if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) { |
---|
1034 | if ( _Thread_Is_ready( thread ) ) { |
---|
1035 | ( *ask_for_help )( context, thread, node ); |
---|
1036 | } |
---|
1037 | |
---|
1038 | return; |
---|
1039 | } |
---|
1040 | |
---|
1041 | node_state = _Scheduler_SMP_Node_state( node ); |
---|
1042 | |
---|
1043 | if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
1044 | _Scheduler_SMP_Extract_from_scheduled( node ); |
---|
1045 | |
---|
1046 | ( *update )( context, node, new_priority ); |
---|
1047 | |
---|
1048 | if ( prepend_it ) { |
---|
1049 | ( *enqueue_scheduled_lifo )( context, node ); |
---|
1050 | } else { |
---|
1051 | ( *enqueue_scheduled_fifo )( context, node ); |
---|
1052 | } |
---|
1053 | } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { |
---|
1054 | ( *extract_from_ready )( context, node ); |
---|
1055 | |
---|
1056 | ( *update )( context, node, new_priority ); |
---|
1057 | |
---|
1058 | if ( prepend_it ) { |
---|
1059 | ( *enqueue_lifo )( context, node, NULL ); |
---|
1060 | } else { |
---|
1061 | ( *enqueue_fifo )( context, node, NULL ); |
---|
1062 | } |
---|
1063 | } else { |
---|
1064 | ( *update )( context, node, new_priority ); |
---|
1065 | |
---|
1066 | if ( _Thread_Is_ready( thread ) ) { |
---|
1067 | ( *ask_for_help )( context, thread, node ); |
---|
1068 | } |
---|
1069 | } |
---|
1070 | } |
---|
1071 | |
---|
1072 | static inline Thread_Control *_Scheduler_SMP_Ask_for_help_X( |
---|
1073 | Scheduler_Context *context, |
---|
1074 | Thread_Control *offers_help, |
---|
1075 | Thread_Control *needs_help, |
---|
1076 | Scheduler_SMP_Enqueue enqueue_fifo |
---|
1077 | ) |
---|
1078 | { |
---|
1079 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help ); |
---|
1080 | Thread_Control *next_needs_help = NULL; |
---|
1081 | Thread_Control *previous_accepts_help; |
---|
1082 | |
---|
1083 | previous_accepts_help = node->Base.accepts_help; |
---|
1084 | node->Base.accepts_help = needs_help; |
---|
1085 | |
---|
1086 | switch ( node->state ) { |
---|
1087 | case SCHEDULER_SMP_NODE_READY: |
---|
1088 | next_needs_help = |
---|
1089 | _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help ); |
---|
1090 | break; |
---|
1091 | case SCHEDULER_SMP_NODE_SCHEDULED: |
---|
1092 | next_needs_help = _Scheduler_Ask_scheduled_node_for_help( |
---|
1093 | context, |
---|
1094 | &node->Base, |
---|
1095 | offers_help, |
---|
1096 | needs_help, |
---|
1097 | previous_accepts_help, |
---|
1098 | _Scheduler_SMP_Release_idle_thread |
---|
1099 | ); |
---|
1100 | break; |
---|
1101 | case SCHEDULER_SMP_NODE_BLOCKED: |
---|
1102 | if ( |
---|
1103 | _Scheduler_Ask_blocked_node_for_help( |
---|
1104 | context, |
---|
1105 | &node->Base, |
---|
1106 | offers_help, |
---|
1107 | needs_help |
---|
1108 | ) |
---|
1109 | ) { |
---|
1110 | _Scheduler_SMP_Node_change_state( |
---|
1111 | &node->Base, |
---|
1112 | SCHEDULER_SMP_NODE_READY |
---|
1113 | ); |
---|
1114 | |
---|
1115 | next_needs_help = ( *enqueue_fifo )( |
---|
1116 | context, |
---|
1117 | &node->Base, |
---|
1118 | needs_help |
---|
1119 | ); |
---|
1120 | } |
---|
1121 | break; |
---|
1122 | } |
---|
1123 | |
---|
1124 | return next_needs_help; |
---|
1125 | } |
---|
1126 | |
---|
1127 | static inline Thread_Control *_Scheduler_SMP_Yield( |
---|
1128 | Scheduler_Context *context, |
---|
1129 | Thread_Control *thread, |
---|
1130 | Scheduler_Node *node, |
---|
1131 | Scheduler_SMP_Extract extract_from_ready, |
---|
1132 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
1133 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo |
---|
1134 | ) |
---|
1135 | { |
---|
1136 | Thread_Control *needs_help; |
---|
1137 | Scheduler_SMP_Node_state node_state; |
---|
1138 | |
---|
1139 | node_state = _Scheduler_SMP_Node_state( node ); |
---|
1140 | |
---|
1141 | if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
1142 | _Scheduler_SMP_Extract_from_scheduled( node ); |
---|
1143 | |
---|
1144 | needs_help = ( *enqueue_scheduled_fifo )( context, node ); |
---|
1145 | } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { |
---|
1146 | ( *extract_from_ready )( context, node ); |
---|
1147 | |
---|
1148 | needs_help = ( *enqueue_fifo )( context, node, NULL ); |
---|
1149 | } else { |
---|
1150 | needs_help = thread; |
---|
1151 | } |
---|
1152 | |
---|
1153 | return needs_help; |
---|
1154 | } |
---|
1155 | |
---|
1156 | static inline void _Scheduler_SMP_Insert_scheduled_lifo( |
---|
1157 | Scheduler_Context *context, |
---|
1158 | Scheduler_Node *node_to_insert |
---|
1159 | ) |
---|
1160 | { |
---|
1161 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
1162 | |
---|
1163 | _Chain_Insert_ordered_unprotected( |
---|
1164 | &self->Scheduled, |
---|
1165 | &node_to_insert->Node, |
---|
1166 | _Scheduler_SMP_Insert_priority_lifo_order |
---|
1167 | ); |
---|
1168 | } |
---|
1169 | |
---|
1170 | static inline void _Scheduler_SMP_Insert_scheduled_fifo( |
---|
1171 | Scheduler_Context *context, |
---|
1172 | Scheduler_Node *node_to_insert |
---|
1173 | ) |
---|
1174 | { |
---|
1175 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
1176 | |
---|
1177 | _Chain_Insert_ordered_unprotected( |
---|
1178 | &self->Scheduled, |
---|
1179 | &node_to_insert->Node, |
---|
1180 | _Scheduler_SMP_Insert_priority_fifo_order |
---|
1181 | ); |
---|
1182 | } |
---|
1183 | |
---|
1184 | static inline bool _Scheduler_SMP_Ask_for_help( |
---|
1185 | Scheduler_Context *context, |
---|
1186 | Thread_Control *thread, |
---|
1187 | Scheduler_Node *node, |
---|
1188 | Chain_Node_order order, |
---|
1189 | Scheduler_SMP_Insert insert_ready, |
---|
1190 | Scheduler_SMP_Insert insert_scheduled, |
---|
1191 | Scheduler_SMP_Move move_from_scheduled_to_ready, |
---|
1192 | Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, |
---|
1193 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
1194 | ) |
---|
1195 | { |
---|
1196 | Scheduler_Node *lowest_scheduled; |
---|
1197 | ISR_lock_Context lock_context; |
---|
1198 | bool success; |
---|
1199 | |
---|
1200 | lowest_scheduled = ( *get_lowest_scheduled )( context, node, order ); |
---|
1201 | |
---|
1202 | _Thread_Scheduler_acquire_critical( thread, &lock_context ); |
---|
1203 | |
---|
1204 | if ( |
---|
1205 | thread->Scheduler.state == THREAD_SCHEDULER_READY |
---|
1206 | && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED |
---|
1207 | ) { |
---|
1208 | if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { |
---|
1209 | _Thread_Scheduler_cancel_need_for_help( |
---|
1210 | thread, |
---|
1211 | _Thread_Get_CPU( thread ) |
---|
1212 | ); |
---|
1213 | _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED ); |
---|
1214 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1215 | |
---|
1216 | _Scheduler_SMP_Preempt( |
---|
1217 | context, |
---|
1218 | node, |
---|
1219 | lowest_scheduled, |
---|
1220 | allocate_processor |
---|
1221 | ); |
---|
1222 | |
---|
1223 | ( *insert_scheduled )( context, node ); |
---|
1224 | ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); |
---|
1225 | |
---|
1226 | _Scheduler_Release_idle_thread( |
---|
1227 | context, |
---|
1228 | lowest_scheduled, |
---|
1229 | _Scheduler_SMP_Release_idle_thread |
---|
1230 | ); |
---|
1231 | success = true; |
---|
1232 | } else { |
---|
1233 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1234 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
1235 | ( *insert_ready )( context, node ); |
---|
1236 | success = false; |
---|
1237 | } |
---|
1238 | } else { |
---|
1239 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1240 | success = false; |
---|
1241 | } |
---|
1242 | |
---|
1243 | return success; |
---|
1244 | } |
---|
1245 | |
---|
1246 | static inline void _Scheduler_SMP_Reconsider_help_request( |
---|
1247 | Scheduler_Context *context, |
---|
1248 | Thread_Control *thread, |
---|
1249 | Scheduler_Node *node, |
---|
1250 | Scheduler_SMP_Extract extract_from_ready |
---|
1251 | ) |
---|
1252 | { |
---|
1253 | ISR_lock_Context lock_context; |
---|
1254 | |
---|
1255 | _Thread_Scheduler_acquire_critical( thread, &lock_context ); |
---|
1256 | |
---|
1257 | if ( |
---|
1258 | thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED |
---|
1259 | && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY |
---|
1260 | ) { |
---|
1261 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
1262 | ( *extract_from_ready )( context, node ); |
---|
1263 | } |
---|
1264 | |
---|
1265 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1266 | } |
---|
1267 | |
---|
1268 | static inline void _Scheduler_SMP_Withdraw_node( |
---|
1269 | Scheduler_Context *context, |
---|
1270 | Thread_Control *thread, |
---|
1271 | Scheduler_Node *node, |
---|
1272 | Thread_Scheduler_state next_state, |
---|
1273 | Scheduler_SMP_Extract extract_from_ready, |
---|
1274 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
1275 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
1276 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
1277 | ) |
---|
1278 | { |
---|
1279 | ISR_lock_Context lock_context; |
---|
1280 | Scheduler_SMP_Node_state node_state; |
---|
1281 | |
---|
1282 | _Thread_Scheduler_acquire_critical( thread, &lock_context ); |
---|
1283 | |
---|
1284 | node_state = _Scheduler_SMP_Node_state( node ); |
---|
1285 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
1286 | |
---|
1287 | if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
1288 | Per_CPU_Control *thread_cpu; |
---|
1289 | |
---|
1290 | thread_cpu = _Thread_Get_CPU( thread ); |
---|
1291 | _Scheduler_Thread_change_state( thread, next_state ); |
---|
1292 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1293 | |
---|
1294 | _Scheduler_SMP_Extract_from_scheduled( node ); |
---|
1295 | _Scheduler_SMP_Schedule_highest_ready( |
---|
1296 | context, |
---|
1297 | node, |
---|
1298 | thread_cpu, |
---|
1299 | extract_from_ready, |
---|
1300 | get_highest_ready, |
---|
1301 | move_from_ready_to_scheduled, |
---|
1302 | allocate_processor |
---|
1303 | ); |
---|
1304 | } else if ( node_state == SCHEDULER_SMP_NODE_READY ) { |
---|
1305 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1306 | ( *extract_from_ready )( context, node ); |
---|
1307 | } else { |
---|
1308 | _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED ); |
---|
1309 | _Thread_Scheduler_release_critical( thread, &lock_context ); |
---|
1310 | } |
---|
1311 | } |
---|
1312 | |
---|
1313 | /** @} */ |
---|
1314 | |
---|
1315 | #ifdef __cplusplus |
---|
1316 | } |
---|
1317 | #endif /* __cplusplus */ |
---|
1318 | |
---|
1319 | #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */ |
---|