[9d83f58a] | 1 | /** |
---|
| 2 | * @file |
---|
| 3 | * |
---|
| 4 | * @brief SMP Scheduler Implementation |
---|
| 5 | * |
---|
| 6 | * @ingroup ScoreSchedulerSMP |
---|
| 7 | */ |
---|
| 8 | |
---|
| 9 | /* |
---|
[494c2e3] | 10 | * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved. |
---|
[9d83f58a] | 11 | * |
---|
| 12 | * embedded brains GmbH |
---|
| 13 | * Dornierstr. 4 |
---|
| 14 | * 82178 Puchheim |
---|
| 15 | * Germany |
---|
| 16 | * <rtems@embedded-brains.de> |
---|
| 17 | * |
---|
| 18 | * The license and distribution terms for this file may be |
---|
| 19 | * found in the file LICENSE in this distribution or at |
---|
[c499856] | 20 | * http://www.rtems.org/license/LICENSE. |
---|
[9d83f58a] | 21 | */ |
---|
| 22 | |
---|
| 23 | #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
| 24 | #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H |
---|
| 25 | |
---|
| 26 | #include <rtems/score/schedulersmp.h> |
---|
[38b59a6] | 27 | #include <rtems/score/assert.h> |
---|
[48c4a55] | 28 | #include <rtems/score/chainimpl.h> |
---|
[38b59a6] | 29 | #include <rtems/score/schedulersimpleimpl.h> |
---|
[9d83f58a] | 30 | |
---|
| 31 | #ifdef __cplusplus |
---|
| 32 | extern "C" { |
---|
| 33 | #endif /* __cplusplus */ |
---|
| 34 | |
---|
| 35 | /** |
---|
| 36 | * @addtogroup ScoreSchedulerSMP |
---|
| 37 | * |
---|
[c6522a65] | 38 | * The scheduler nodes can be in four states |
---|
| 39 | * - @ref SCHEDULER_SMP_NODE_BLOCKED, |
---|
[f39f667a] | 40 | * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and |
---|
| 41 | * - @ref SCHEDULER_SMP_NODE_READY. |
---|
[c6522a65] | 42 | * |
---|
[f39f667a] | 43 | * State transitions are triggered via basic operations |
---|
[c0bff5e] | 44 | * - _Scheduler_SMP_Enqueue_ordered(), |
---|
| 45 | * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and |
---|
[f39f667a] | 46 | * - _Scheduler_SMP_Block(). |
---|
[c6522a65] | 47 | * |
---|
| 48 | * @dot |
---|
| 49 | * digraph { |
---|
| 50 | * node [style="filled"]; |
---|
| 51 | * |
---|
| 52 | * bs [label="BLOCKED"]; |
---|
| 53 | * ss [label="SCHEDULED", fillcolor="green"]; |
---|
| 54 | * rs [label="READY", fillcolor="red"]; |
---|
| 55 | * |
---|
| 56 | * edge [label="enqueue"]; |
---|
| 57 | * edge [fontcolor="darkgreen", color="darkgreen"]; |
---|
| 58 | * |
---|
| 59 | * bs -> ss; |
---|
| 60 | * |
---|
| 61 | * edge [fontcolor="red", color="red"]; |
---|
| 62 | * |
---|
| 63 | * bs -> rs; |
---|
| 64 | * |
---|
| 65 | * edge [label="enqueue other"]; |
---|
| 66 | * |
---|
| 67 | * ss -> rs; |
---|
| 68 | * |
---|
[f39f667a] | 69 | * edge [label="block"]; |
---|
[c6522a65] | 70 | * edge [fontcolor="black", color="black"]; |
---|
| 71 | * |
---|
[b532bb2c] | 72 | * ss -> bs; |
---|
[c6522a65] | 73 | * rs -> bs; |
---|
| 74 | * |
---|
[f39f667a] | 75 | * edge [label="block other"]; |
---|
[c6522a65] | 76 | * edge [fontcolor="darkgreen", color="darkgreen"]; |
---|
| 77 | * |
---|
| 78 | * rs -> ss; |
---|
| 79 | * } |
---|
| 80 | * @enddot |
---|
| 81 | * |
---|
| 82 | * During system initialization each processor of the scheduler instance starts |
---|
| 83 | * with an idle thread assigned to it. Lets have a look at an example with two |
---|
| 84 | * idle threads I and J with priority 5. We also have blocked threads A, B and |
---|
[2d96533] | 85 | * C with priorities 1, 2 and 3 respectively. The scheduler nodes are ordered |
---|
| 86 | * with respect to the thread priority from left to right in the below |
---|
| 87 | * diagrams. The highest priority node (lowest priority number) is the |
---|
| 88 | * leftmost node. Since the processor assignment is independent of the thread |
---|
| 89 | * priority the processor indices may move from one state to the other. |
---|
[c6522a65] | 90 | * |
---|
| 91 | * @dot |
---|
| 92 | * digraph { |
---|
| 93 | * node [style="filled"]; |
---|
| 94 | * edge [dir="none"]; |
---|
| 95 | * subgraph { |
---|
| 96 | * rank = same; |
---|
| 97 | * |
---|
| 98 | * i [label="I (5)", fillcolor="green"]; |
---|
| 99 | * j [label="J (5)", fillcolor="green"]; |
---|
| 100 | * a [label="A (1)"]; |
---|
| 101 | * b [label="B (2)"]; |
---|
| 102 | * c [label="C (3)"]; |
---|
| 103 | * i -> j; |
---|
| 104 | * } |
---|
| 105 | * |
---|
| 106 | * subgraph { |
---|
| 107 | * rank = same; |
---|
| 108 | * |
---|
| 109 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 110 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 111 | * } |
---|
| 112 | * |
---|
| 113 | * i -> p0; |
---|
| 114 | * j -> p1; |
---|
| 115 | * } |
---|
| 116 | * @enddot |
---|
| 117 | * |
---|
| 118 | * Lets start A. For this an enqueue operation is performed. |
---|
| 119 | * |
---|
| 120 | * @dot |
---|
| 121 | * digraph { |
---|
| 122 | * node [style="filled"]; |
---|
| 123 | * edge [dir="none"]; |
---|
| 124 | * |
---|
| 125 | * subgraph { |
---|
| 126 | * rank = same; |
---|
| 127 | * |
---|
| 128 | * i [label="I (5)", fillcolor="green"]; |
---|
| 129 | * j [label="J (5)", fillcolor="red"]; |
---|
| 130 | * a [label="A (1)", fillcolor="green"]; |
---|
| 131 | * b [label="B (2)"]; |
---|
| 132 | * c [label="C (3)"]; |
---|
| 133 | * a -> i; |
---|
| 134 | * } |
---|
| 135 | * |
---|
| 136 | * subgraph { |
---|
| 137 | * rank = same; |
---|
| 138 | * |
---|
| 139 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 140 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 141 | * } |
---|
| 142 | * |
---|
| 143 | * i -> p0; |
---|
| 144 | * a -> p1; |
---|
| 145 | * } |
---|
| 146 | * @enddot |
---|
| 147 | * |
---|
| 148 | * Lets start C. |
---|
| 149 | * |
---|
| 150 | * @dot |
---|
| 151 | * digraph { |
---|
| 152 | * node [style="filled"]; |
---|
| 153 | * edge [dir="none"]; |
---|
| 154 | * |
---|
| 155 | * subgraph { |
---|
| 156 | * rank = same; |
---|
| 157 | * |
---|
| 158 | * a [label="A (1)", fillcolor="green"]; |
---|
| 159 | * c [label="C (3)", fillcolor="green"]; |
---|
| 160 | * i [label="I (5)", fillcolor="red"]; |
---|
| 161 | * j [label="J (5)", fillcolor="red"]; |
---|
| 162 | * b [label="B (2)"]; |
---|
| 163 | * a -> c; |
---|
| 164 | * i -> j; |
---|
| 165 | * } |
---|
| 166 | * |
---|
| 167 | * subgraph { |
---|
| 168 | * rank = same; |
---|
| 169 | * |
---|
| 170 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 171 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 172 | * } |
---|
| 173 | * |
---|
| 174 | * c -> p0; |
---|
| 175 | * a -> p1; |
---|
| 176 | * } |
---|
| 177 | * @enddot |
---|
| 178 | * |
---|
| 179 | * Lets start B. |
---|
| 180 | * |
---|
| 181 | * @dot |
---|
| 182 | * digraph { |
---|
| 183 | * node [style="filled"]; |
---|
| 184 | * edge [dir="none"]; |
---|
| 185 | * |
---|
| 186 | * subgraph { |
---|
| 187 | * rank = same; |
---|
| 188 | * |
---|
| 189 | * a [label="A (1)", fillcolor="green"]; |
---|
| 190 | * b [label="B (2)", fillcolor="green"]; |
---|
| 191 | * c [label="C (3)", fillcolor="red"]; |
---|
| 192 | * i [label="I (5)", fillcolor="red"]; |
---|
| 193 | * j [label="J (5)", fillcolor="red"]; |
---|
| 194 | * a -> b; |
---|
| 195 | * c -> i -> j; |
---|
| 196 | * } |
---|
| 197 | * |
---|
| 198 | * subgraph { |
---|
| 199 | * rank = same; |
---|
| 200 | * |
---|
| 201 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 202 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 203 | * } |
---|
| 204 | * |
---|
| 205 | * b -> p0; |
---|
| 206 | * a -> p1; |
---|
| 207 | * } |
---|
| 208 | * @enddot |
---|
| 209 | * |
---|
[f39f667a] | 210 | * Lets change the priority of thread A to 4. |
---|
[c6522a65] | 211 | * |
---|
| 212 | * @dot |
---|
| 213 | * digraph { |
---|
| 214 | * node [style="filled"]; |
---|
| 215 | * edge [dir="none"]; |
---|
| 216 | * |
---|
| 217 | * subgraph { |
---|
| 218 | * rank = same; |
---|
| 219 | * |
---|
| 220 | * b [label="B (2)", fillcolor="green"]; |
---|
| 221 | * c [label="C (3)", fillcolor="green"]; |
---|
| 222 | * a [label="A (4)", fillcolor="red"]; |
---|
| 223 | * i [label="I (5)", fillcolor="red"]; |
---|
| 224 | * j [label="J (5)", fillcolor="red"]; |
---|
| 225 | * b -> c; |
---|
| 226 | * a -> i -> j; |
---|
| 227 | * } |
---|
| 228 | * |
---|
| 229 | * subgraph { |
---|
| 230 | * rank = same; |
---|
| 231 | * |
---|
| 232 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 233 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 234 | * } |
---|
| 235 | * |
---|
| 236 | * b -> p0; |
---|
| 237 | * c -> p1; |
---|
| 238 | * } |
---|
| 239 | * @enddot |
---|
| 240 | * |
---|
[f39f667a] | 241 | * Now perform a blocking operation with thread B. Please note that thread A |
---|
| 242 | * migrated now from processor 0 to processor 1 and thread C still executes on |
---|
| 243 | * processor 1. |
---|
[c6522a65] | 244 | * |
---|
| 245 | * @dot |
---|
| 246 | * digraph { |
---|
| 247 | * node [style="filled"]; |
---|
| 248 | * edge [dir="none"]; |
---|
| 249 | * |
---|
| 250 | * subgraph { |
---|
| 251 | * rank = same; |
---|
| 252 | * |
---|
| 253 | * c [label="C (3)", fillcolor="green"]; |
---|
[f39f667a] | 254 | * a [label="A (4)", fillcolor="green"]; |
---|
[c6522a65] | 255 | * i [label="I (5)", fillcolor="red"]; |
---|
| 256 | * j [label="J (5)", fillcolor="red"]; |
---|
[f39f667a] | 257 | * b [label="B (2)"]; |
---|
| 258 | * c -> a; |
---|
[c6522a65] | 259 | * i -> j; |
---|
| 260 | * } |
---|
| 261 | * |
---|
| 262 | * subgraph { |
---|
| 263 | * rank = same; |
---|
| 264 | * |
---|
| 265 | * p0 [label="PROCESSOR 0", shape="box"]; |
---|
| 266 | * p1 [label="PROCESSOR 1", shape="box"]; |
---|
| 267 | * } |
---|
| 268 | * |
---|
[f39f667a] | 269 | * a -> p0; |
---|
[c6522a65] | 270 | * c -> p1; |
---|
| 271 | * } |
---|
| 272 | * @enddot |
---|
| 273 | * |
---|
[9d83f58a] | 274 | * @{ |
---|
| 275 | */ |
---|
| 276 | |
---|
[8f0c7a46] | 277 | typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )( |
---|
[238629f] | 278 | Scheduler_Context *context, |
---|
[8f0c7a46] | 279 | Scheduler_Node *node |
---|
[238629f] | 280 | ); |
---|
| 281 | |
---|
[8f0c7a46] | 282 | typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )( |
---|
[238629f] | 283 | Scheduler_Context *context, |
---|
[8f0c7a46] | 284 | Scheduler_Node *filter, |
---|
[238629f] | 285 | Chain_Node_order order |
---|
[48c4a55] | 286 | ); |
---|
| 287 | |
---|
| 288 | typedef void ( *Scheduler_SMP_Extract )( |
---|
[3730a07f] | 289 | Scheduler_Context *context, |
---|
[8f0c7a46] | 290 | Scheduler_Node *node_to_extract |
---|
[48c4a55] | 291 | ); |
---|
| 292 | |
---|
| 293 | typedef void ( *Scheduler_SMP_Insert )( |
---|
[3730a07f] | 294 | Scheduler_Context *context, |
---|
[8f0c7a46] | 295 | Scheduler_Node *node_to_insert |
---|
[48c4a55] | 296 | ); |
---|
| 297 | |
---|
| 298 | typedef void ( *Scheduler_SMP_Move )( |
---|
[3730a07f] | 299 | Scheduler_Context *context, |
---|
[8f0c7a46] | 300 | Scheduler_Node *node_to_move |
---|
[48c4a55] | 301 | ); |
---|
| 302 | |
---|
[f39f667a] | 303 | typedef void ( *Scheduler_SMP_Update )( |
---|
| 304 | Scheduler_Context *context, |
---|
[8f0c7a46] | 305 | Scheduler_Node *node_to_update, |
---|
[d9b54da] | 306 | Priority_Control new_priority |
---|
[f39f667a] | 307 | ); |
---|
| 308 | |
---|
[8568341] | 309 | typedef Thread_Control *( *Scheduler_SMP_Enqueue )( |
---|
| 310 | Scheduler_Context *context, |
---|
| 311 | Scheduler_Node *node_to_enqueue, |
---|
| 312 | Thread_Control *needs_help |
---|
| 313 | ); |
---|
| 314 | |
---|
| 315 | typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )( |
---|
[f39f667a] | 316 | Scheduler_Context *context, |
---|
[8f0c7a46] | 317 | Scheduler_Node *node_to_enqueue |
---|
[f39f667a] | 318 | ); |
---|
| 319 | |
---|
[238629f] | 320 | typedef void ( *Scheduler_SMP_Allocate_processor )( |
---|
[8f0c7a46] | 321 | Scheduler_Context *context, |
---|
[19e41767] | 322 | Thread_Control *scheduled, |
---|
| 323 | Thread_Control *victim |
---|
[238629f] | 324 | ); |
---|
| 325 | |
---|
[8f0c7a46] | 326 | static inline bool _Scheduler_SMP_Insert_priority_lifo_order( |
---|
| 327 | const Chain_Node *to_insert, |
---|
| 328 | const Chain_Node *next |
---|
| 329 | ) |
---|
| 330 | { |
---|
| 331 | const Scheduler_SMP_Node *node_to_insert = |
---|
| 332 | (const Scheduler_SMP_Node *) to_insert; |
---|
| 333 | const Scheduler_SMP_Node *node_next = |
---|
| 334 | (const Scheduler_SMP_Node *) next; |
---|
| 335 | |
---|
| 336 | return node_to_insert->priority <= node_next->priority; |
---|
| 337 | } |
---|
| 338 | |
---|
| 339 | static inline bool _Scheduler_SMP_Insert_priority_fifo_order( |
---|
| 340 | const Chain_Node *to_insert, |
---|
| 341 | const Chain_Node *next |
---|
| 342 | ) |
---|
| 343 | { |
---|
| 344 | const Scheduler_SMP_Node *node_to_insert = |
---|
| 345 | (const Scheduler_SMP_Node *) to_insert; |
---|
| 346 | const Scheduler_SMP_Node *node_next = |
---|
| 347 | (const Scheduler_SMP_Node *) next; |
---|
| 348 | |
---|
| 349 | return node_to_insert->priority < node_next->priority; |
---|
| 350 | } |
---|
| 351 | |
---|
[3730a07f] | 352 | static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self( |
---|
| 353 | Scheduler_Context *context |
---|
| 354 | ) |
---|
| 355 | { |
---|
| 356 | return (Scheduler_SMP_Context *) context; |
---|
| 357 | } |
---|
| 358 | |
---|
[494c2e3] | 359 | static inline void _Scheduler_SMP_Initialize( |
---|
[e1598a6] | 360 | Scheduler_SMP_Context *self |
---|
[494c2e3] | 361 | ) |
---|
[9d83f58a] | 362 | { |
---|
[494c2e3] | 363 | _Chain_Initialize_empty( &self->Scheduled ); |
---|
[5c3d250] | 364 | _Chain_Initialize_empty( &self->Idle_threads ); |
---|
[9d83f58a] | 365 | } |
---|
| 366 | |
---|
[08d9760] | 367 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node( |
---|
[beab7329] | 368 | Thread_Control *thread |
---|
| 369 | ) |
---|
| 370 | { |
---|
[08d9760] | 371 | return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread ); |
---|
[beab7329] | 372 | } |
---|
| 373 | |
---|
[5c3d250] | 374 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node( |
---|
| 375 | Thread_Control *thread |
---|
| 376 | ) |
---|
| 377 | { |
---|
| 378 | return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread ); |
---|
| 379 | } |
---|
| 380 | |
---|
[8f0c7a46] | 381 | static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast( |
---|
| 382 | Scheduler_Node *node |
---|
| 383 | ) |
---|
| 384 | { |
---|
| 385 | return (Scheduler_SMP_Node *) node; |
---|
| 386 | } |
---|
| 387 | |
---|
[beab7329] | 388 | static inline void _Scheduler_SMP_Node_initialize( |
---|
[8f0c7a46] | 389 | Scheduler_SMP_Node *node, |
---|
| 390 | Thread_Control *thread |
---|
[beab7329] | 391 | ) |
---|
| 392 | { |
---|
[8f0c7a46] | 393 | _Scheduler_Node_do_initialize( &node->Base, thread ); |
---|
[beab7329] | 394 | node->state = SCHEDULER_SMP_NODE_BLOCKED; |
---|
| 395 | } |
---|
| 396 | |
---|
[8f0c7a46] | 397 | static inline void _Scheduler_SMP_Node_update_priority( |
---|
| 398 | Scheduler_SMP_Node *node, |
---|
| 399 | Priority_Control new_priority |
---|
| 400 | ) |
---|
| 401 | { |
---|
| 402 | node->priority = new_priority; |
---|
| 403 | } |
---|
| 404 | |
---|
[f39f667a] | 405 | extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ]; |
---|
[beab7329] | 406 | |
---|
| 407 | static inline void _Scheduler_SMP_Node_change_state( |
---|
[d9b54da] | 408 | Scheduler_SMP_Node *node, |
---|
[beab7329] | 409 | Scheduler_SMP_Node_state new_state |
---|
| 410 | ) |
---|
| 411 | { |
---|
| 412 | _Assert( |
---|
| 413 | _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ] |
---|
| 414 | ); |
---|
| 415 | |
---|
| 416 | node->state = new_state; |
---|
| 417 | } |
---|
| 418 | |
---|
[38b59a6] | 419 | static inline bool _Scheduler_SMP_Is_processor_owned_by_us( |
---|
[8f0c7a46] | 420 | const Scheduler_Context *context, |
---|
| 421 | const Per_CPU_Control *cpu |
---|
[38b59a6] | 422 | ) |
---|
| 423 | { |
---|
[8f0c7a46] | 424 | return cpu->scheduler_context == context; |
---|
[38b59a6] | 425 | } |
---|
| 426 | |
---|
[5c3d250] | 427 | static inline Thread_Control *_Scheduler_SMP_Get_idle_thread( |
---|
| 428 | Scheduler_Context *context, |
---|
| 429 | Scheduler_SMP_Extract extract_from_ready |
---|
| 430 | ) |
---|
| 431 | { |
---|
| 432 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
| 433 | Thread_Control *idle = (Thread_Control *) |
---|
| 434 | _Chain_Get_first_unprotected( &self->Idle_threads ); |
---|
| 435 | Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle ); |
---|
| 436 | |
---|
| 437 | ( *extract_from_ready )( &self->Base, own_node ); |
---|
| 438 | |
---|
| 439 | _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) ); |
---|
| 440 | |
---|
| 441 | return idle; |
---|
| 442 | } |
---|
| 443 | |
---|
| 444 | static inline void _Scheduler_SMP_Release_idle_thread( |
---|
| 445 | Scheduler_Context *context, |
---|
| 446 | Thread_Control *idle, |
---|
| 447 | Scheduler_SMP_Insert insert_ready |
---|
| 448 | ) |
---|
| 449 | { |
---|
| 450 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
| 451 | Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle ); |
---|
| 452 | |
---|
| 453 | _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node ); |
---|
| 454 | ( *insert_ready )( context, own_node ); |
---|
| 455 | } |
---|
| 456 | |
---|
[19e41767] | 457 | static inline void _Scheduler_SMP_Allocate_processor_lazy( |
---|
[8f0c7a46] | 458 | Scheduler_Context *context, |
---|
[19e41767] | 459 | Thread_Control *scheduled_thread, |
---|
| 460 | Thread_Control *victim_thread |
---|
[fc2ad63] | 461 | ) |
---|
| 462 | { |
---|
[8f0c7a46] | 463 | Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread ); |
---|
| 464 | Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread ); |
---|
[38b59a6] | 465 | Per_CPU_Control *cpu_self = _Per_CPU_Get(); |
---|
[fc2ad63] | 466 | Thread_Control *heir; |
---|
| 467 | |
---|
[38b59a6] | 468 | _Assert( _ISR_Get_level() != 0 ); |
---|
[fc2ad63] | 469 | |
---|
[8f0c7a46] | 470 | if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) { |
---|
| 471 | if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) { |
---|
| 472 | heir = scheduled_cpu->heir; |
---|
[835b88b] | 473 | _Thread_Dispatch_update_heir( |
---|
[8f0c7a46] | 474 | cpu_self, |
---|
| 475 | scheduled_cpu, |
---|
| 476 | scheduled_thread |
---|
| 477 | ); |
---|
[38b59a6] | 478 | } else { |
---|
| 479 | /* We have to force a migration to our processor set */ |
---|
[8f0c7a46] | 480 | _Assert( |
---|
| 481 | scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread |
---|
| 482 | ); |
---|
| 483 | heir = scheduled_thread; |
---|
[38b59a6] | 484 | } |
---|
[fc2ad63] | 485 | } else { |
---|
[8f0c7a46] | 486 | heir = scheduled_thread; |
---|
[fc2ad63] | 487 | } |
---|
| 488 | |
---|
[8f0c7a46] | 489 | if ( heir != victim_thread ) { |
---|
| 490 | _Thread_Set_CPU( heir, victim_cpu ); |
---|
[835b88b] | 491 | _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir ); |
---|
[fc2ad63] | 492 | } |
---|
| 493 | } |
---|
| 494 | |
---|
[19e41767] | 495 | static inline void _Scheduler_SMP_Allocate_processor( |
---|
| 496 | Scheduler_Context *context, |
---|
| 497 | Scheduler_Node *scheduled, |
---|
| 498 | Scheduler_Node *victim, |
---|
| 499 | Scheduler_SMP_Allocate_processor allocate_processor |
---|
| 500 | ) |
---|
| 501 | { |
---|
[ac532f3] | 502 | Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled ); |
---|
| 503 | Thread_Control *victim_thread = _Scheduler_Node_get_user( victim ); |
---|
[19e41767] | 504 | |
---|
| 505 | _Scheduler_SMP_Node_change_state( |
---|
| 506 | _Scheduler_SMP_Node_downcast( scheduled ), |
---|
| 507 | SCHEDULER_SMP_NODE_SCHEDULED |
---|
| 508 | ); |
---|
[5c3d250] | 509 | _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED ); |
---|
[19e41767] | 510 | |
---|
| 511 | ( *allocate_processor )( context, scheduled_thread, victim_thread ); |
---|
| 512 | } |
---|
| 513 | |
---|
[8f0c7a46] | 514 | static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled( |
---|
[238629f] | 515 | Scheduler_Context *context, |
---|
[8f0c7a46] | 516 | Scheduler_Node *filter, |
---|
[238629f] | 517 | Chain_Node_order order |
---|
[aea4a91] | 518 | ) |
---|
| 519 | { |
---|
[238629f] | 520 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
[494c2e3] | 521 | Chain_Control *scheduled = &self->Scheduled; |
---|
[8f0c7a46] | 522 | Scheduler_Node *lowest_scheduled = |
---|
| 523 | (Scheduler_Node *) _Chain_Last( scheduled ); |
---|
[aea4a91] | 524 | |
---|
[8f0c7a46] | 525 | (void) filter; |
---|
| 526 | (void) order; |
---|
[aea4a91] | 527 | |
---|
[5c632c4] | 528 | _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) ); |
---|
[238629f] | 529 | |
---|
[8f0c7a46] | 530 | return lowest_scheduled; |
---|
[aea4a91] | 531 | } |
---|
| 532 | |
---|
[5c3d250] | 533 | static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled( |
---|
| 534 | Scheduler_Context *context, |
---|
| 535 | Scheduler_Node *node, |
---|
| 536 | Scheduler_Node *lowest_scheduled, |
---|
| 537 | Scheduler_SMP_Insert insert_scheduled, |
---|
| 538 | Scheduler_SMP_Move move_from_scheduled_to_ready, |
---|
| 539 | Scheduler_SMP_Allocate_processor allocate_processor, |
---|
| 540 | Scheduler_Release_idle_thread release_idle_thread |
---|
| 541 | ) |
---|
| 542 | { |
---|
| 543 | Thread_Control *user = _Scheduler_Node_get_user( node ); |
---|
| 544 | Thread_Control *lowest_scheduled_user = |
---|
| 545 | _Scheduler_Node_get_user( lowest_scheduled ); |
---|
| 546 | Thread_Control *needs_help; |
---|
| 547 | Thread_Control *idle; |
---|
| 548 | |
---|
| 549 | _Scheduler_SMP_Node_change_state( |
---|
| 550 | _Scheduler_SMP_Node_downcast( lowest_scheduled ), |
---|
| 551 | SCHEDULER_SMP_NODE_READY |
---|
| 552 | ); |
---|
| 553 | _Scheduler_Thread_change_state( |
---|
| 554 | lowest_scheduled_user, |
---|
| 555 | THREAD_SCHEDULER_READY |
---|
| 556 | ); |
---|
| 557 | |
---|
| 558 | _Scheduler_Thread_set_node( user, node ); |
---|
| 559 | |
---|
| 560 | _Scheduler_SMP_Allocate_processor( |
---|
| 561 | context, |
---|
| 562 | node, |
---|
| 563 | lowest_scheduled, |
---|
| 564 | allocate_processor |
---|
| 565 | ); |
---|
| 566 | |
---|
| 567 | ( *insert_scheduled )( context, node ); |
---|
| 568 | ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); |
---|
| 569 | |
---|
| 570 | idle = _Scheduler_Release_idle_thread( |
---|
| 571 | context, |
---|
| 572 | lowest_scheduled, |
---|
| 573 | release_idle_thread |
---|
| 574 | ); |
---|
| 575 | if ( idle == NULL ) { |
---|
| 576 | needs_help = lowest_scheduled_user; |
---|
| 577 | } else { |
---|
| 578 | needs_help = NULL; |
---|
| 579 | } |
---|
| 580 | |
---|
| 581 | return needs_help; |
---|
| 582 | } |
---|
| 583 | |
---|
[c6522a65] | 584 | /** |
---|
[8f0c7a46] | 585 | * @brief Enqueues a node according to the specified order function. |
---|
[c6522a65] | 586 | * |
---|
[8f0c7a46] | 587 | * The node must not be in the scheduled state. |
---|
[c0bff5e] | 588 | * |
---|
[c6522a65] | 589 | * @param[in] context The scheduler instance context. |
---|
[8f0c7a46] | 590 | * @param[in] node The node to enqueue. |
---|
[8568341] | 591 | * @param[in] needs_help The thread needing help in case the node cannot be |
---|
| 592 | * scheduled. |
---|
[c6522a65] | 593 | * @param[in] order The order function. |
---|
| 594 | * @param[in] insert_ready Function to insert a node into the set of ready |
---|
[238629f] | 595 | * nodes. |
---|
[c6522a65] | 596 | * @param[in] insert_scheduled Function to insert a node into the set of |
---|
[238629f] | 597 | * scheduled nodes. |
---|
[c6522a65] | 598 | * @param[in] move_from_scheduled_to_ready Function to move a node from the set |
---|
[238629f] | 599 | * of scheduled nodes to the set of ready nodes. |
---|
[8f0c7a46] | 600 | * @param[in] get_lowest_scheduled Function to select the node from the |
---|
[82df6f3] | 601 | * scheduled nodes to replace. It may not be possible to find one, in this |
---|
| 602 | * case a pointer must be returned so that the order functions returns false |
---|
| 603 | * if this pointer is passed as the second argument to the order function. |
---|
[8f0c7a46] | 604 | * @param[in] allocate_processor Function to allocate a processor to a node |
---|
[238629f] | 605 | * based on the rules of the scheduler. |
---|
[5c3d250] | 606 | * @param[in] release_idle_thread Function to release an idle thread. |
---|
[c6522a65] | 607 | */ |
---|
[8568341] | 608 | static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered( |
---|
[d9b54da] | 609 | Scheduler_Context *context, |
---|
[8f0c7a46] | 610 | Scheduler_Node *node, |
---|
[8568341] | 611 | Thread_Control *needs_help, |
---|
[d9b54da] | 612 | Chain_Node_order order, |
---|
[238629f] | 613 | Scheduler_SMP_Insert insert_ready, |
---|
| 614 | Scheduler_SMP_Insert insert_scheduled, |
---|
| 615 | Scheduler_SMP_Move move_from_scheduled_to_ready, |
---|
| 616 | Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, |
---|
[5c3d250] | 617 | Scheduler_SMP_Allocate_processor allocate_processor, |
---|
| 618 | Scheduler_Release_idle_thread release_idle_thread |
---|
[48c4a55] | 619 | ) |
---|
[c0bff5e] | 620 | { |
---|
[8f0c7a46] | 621 | Scheduler_Node *lowest_scheduled = |
---|
| 622 | ( *get_lowest_scheduled )( context, node, order ); |
---|
[c0bff5e] | 623 | |
---|
[8f0c7a46] | 624 | if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { |
---|
[5c3d250] | 625 | needs_help = _Scheduler_SMP_Enqueue_to_scheduled( |
---|
[19e41767] | 626 | context, |
---|
| 627 | node, |
---|
| 628 | lowest_scheduled, |
---|
[5c3d250] | 629 | insert_scheduled, |
---|
| 630 | move_from_scheduled_to_ready, |
---|
| 631 | allocate_processor, |
---|
| 632 | release_idle_thread |
---|
[19e41767] | 633 | ); |
---|
[c0bff5e] | 634 | } else { |
---|
[8f0c7a46] | 635 | ( *insert_ready )( context, node ); |
---|
[c0bff5e] | 636 | } |
---|
[8568341] | 637 | |
---|
| 638 | return needs_help; |
---|
[c0bff5e] | 639 | } |
---|
| 640 | |
---|
| 641 | /** |
---|
[8f0c7a46] | 642 | * @brief Enqueues a scheduled node according to the specified order |
---|
[c0bff5e] | 643 | * function. |
---|
| 644 | * |
---|
| 645 | * @param[in] context The scheduler instance context. |
---|
[8f0c7a46] | 646 | * @param[in] node The node to enqueue. |
---|
[c0bff5e] | 647 | * @param[in] order The order function. |
---|
[5c3d250] | 648 | * @param[in] extract_from_ready Function to extract a node from the set of |
---|
| 649 | * ready nodes. |
---|
[c0bff5e] | 650 | * @param[in] get_highest_ready Function to get the highest ready node. |
---|
| 651 | * @param[in] insert_ready Function to insert a node into the set of ready |
---|
[238629f] | 652 | * nodes. |
---|
[c0bff5e] | 653 | * @param[in] insert_scheduled Function to insert a node into the set of |
---|
[238629f] | 654 | * scheduled nodes. |
---|
[c0bff5e] | 655 | * @param[in] move_from_ready_to_scheduled Function to move a node from the set |
---|
[238629f] | 656 | * of ready nodes to the set of scheduled nodes. |
---|
[8f0c7a46] | 657 | * @param[in] allocate_processor Function to allocate a processor to a node |
---|
[238629f] | 658 | * based on the rules of the scheduler. |
---|
[5c3d250] | 659 | * @param[in] get_idle_thread Function to get an idle thread. |
---|
| 660 | * @param[in] release_idle_thread Function to release an idle thread. |
---|
[c0bff5e] | 661 | */ |
---|
[8568341] | 662 | static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered( |
---|
[d9b54da] | 663 | Scheduler_Context *context, |
---|
[8f0c7a46] | 664 | Scheduler_Node *node, |
---|
[238629f] | 665 | Chain_Node_order order, |
---|
[5c3d250] | 666 | Scheduler_SMP_Extract extract_from_ready, |
---|
[238629f] | 667 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
| 668 | Scheduler_SMP_Insert insert_ready, |
---|
| 669 | Scheduler_SMP_Insert insert_scheduled, |
---|
| 670 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
[5c3d250] | 671 | Scheduler_SMP_Allocate_processor allocate_processor, |
---|
| 672 | Scheduler_Get_idle_thread get_idle_thread, |
---|
| 673 | Scheduler_Release_idle_thread release_idle_thread |
---|
[c0bff5e] | 674 | ) |
---|
[48c4a55] | 675 | { |
---|
[8568341] | 676 | Thread_Control *needs_help; |
---|
[48c4a55] | 677 | |
---|
[5c3d250] | 678 | while ( true ) { |
---|
| 679 | Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node ); |
---|
| 680 | |
---|
| 681 | _Assert( highest_ready != NULL ); |
---|
| 682 | |
---|
| 683 | /* |
---|
| 684 | * The node has been extracted from the scheduled chain. We have to place |
---|
| 685 | * it now on the scheduled or ready set. |
---|
| 686 | */ |
---|
| 687 | if ( ( *order )( &node->Node, &highest_ready->Node ) ) { |
---|
| 688 | ( *insert_scheduled )( context, node ); |
---|
| 689 | |
---|
| 690 | needs_help = NULL; |
---|
| 691 | |
---|
| 692 | break; |
---|
| 693 | } else if ( |
---|
| 694 | _Scheduler_Try_to_schedule_node( |
---|
| 695 | context, |
---|
| 696 | highest_ready, |
---|
| 697 | get_idle_thread |
---|
| 698 | ) |
---|
| 699 | ) { |
---|
| 700 | Thread_Control *user = _Scheduler_Node_get_user( node ); |
---|
| 701 | Thread_Control *idle; |
---|
| 702 | |
---|
| 703 | _Scheduler_SMP_Node_change_state( |
---|
| 704 | _Scheduler_SMP_Node_downcast( node ), |
---|
| 705 | SCHEDULER_SMP_NODE_READY |
---|
| 706 | ); |
---|
| 707 | _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY ); |
---|
[c0bff5e] | 708 | |
---|
[5c3d250] | 709 | _Scheduler_SMP_Allocate_processor( |
---|
| 710 | context, |
---|
| 711 | highest_ready, |
---|
| 712 | node, |
---|
| 713 | allocate_processor |
---|
| 714 | ); |
---|
[8568341] | 715 | |
---|
[5c3d250] | 716 | ( *insert_ready )( context, node ); |
---|
| 717 | ( *move_from_ready_to_scheduled )( context, highest_ready ); |
---|
[19e41767] | 718 | |
---|
[5c3d250] | 719 | idle = _Scheduler_Release_idle_thread( |
---|
| 720 | context, |
---|
| 721 | node, |
---|
| 722 | release_idle_thread |
---|
| 723 | ); |
---|
| 724 | if ( idle == NULL ) { |
---|
| 725 | needs_help = user; |
---|
| 726 | } else { |
---|
| 727 | needs_help = NULL; |
---|
| 728 | } |
---|
[19e41767] | 729 | |
---|
[5c3d250] | 730 | break; |
---|
| 731 | } else { |
---|
| 732 | _Scheduler_SMP_Node_change_state( |
---|
| 733 | _Scheduler_SMP_Node_downcast( highest_ready ), |
---|
| 734 | SCHEDULER_SMP_NODE_BLOCKED |
---|
| 735 | ); |
---|
[8568341] | 736 | |
---|
[5c3d250] | 737 | ( *extract_from_ready )( context, highest_ready ); |
---|
| 738 | } |
---|
[48c4a55] | 739 | } |
---|
[8568341] | 740 | |
---|
| 741 | return needs_help; |
---|
[48c4a55] | 742 | } |
---|
| 743 | |
---|
[c0bff5e] | 744 | static inline void _Scheduler_SMP_Extract_from_scheduled( |
---|
[8f0c7a46] | 745 | Scheduler_Node *node |
---|
[c0bff5e] | 746 | ) |
---|
[f39f667a] | 747 | { |
---|
[8f0c7a46] | 748 | _Chain_Extract_unprotected( &node->Node ); |
---|
[f39f667a] | 749 | } |
---|
| 750 | |
---|
[48c4a55] | 751 | static inline void _Scheduler_SMP_Schedule_highest_ready( |
---|
[d9b54da] | 752 | Scheduler_Context *context, |
---|
[8f0c7a46] | 753 | Scheduler_Node *victim, |
---|
[5c3d250] | 754 | Scheduler_SMP_Extract extract_from_ready, |
---|
[d9b54da] | 755 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
| 756 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
[5c3d250] | 757 | Scheduler_SMP_Allocate_processor allocate_processor, |
---|
| 758 | Scheduler_Get_idle_thread get_idle_thread |
---|
[48c4a55] | 759 | ) |
---|
| 760 | { |
---|
[5c3d250] | 761 | while ( true ) { |
---|
| 762 | Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim ); |
---|
| 763 | |
---|
| 764 | _Assert( highest_ready != NULL ); |
---|
| 765 | |
---|
| 766 | if ( |
---|
| 767 | _Scheduler_Try_to_schedule_node( |
---|
| 768 | context, |
---|
| 769 | highest_ready, |
---|
| 770 | get_idle_thread |
---|
| 771 | ) |
---|
| 772 | ) { |
---|
| 773 | _Scheduler_SMP_Allocate_processor( |
---|
| 774 | context, |
---|
| 775 | highest_ready, |
---|
| 776 | victim, |
---|
| 777 | allocate_processor |
---|
| 778 | ); |
---|
[48c4a55] | 779 | |
---|
[5c3d250] | 780 | ( *move_from_ready_to_scheduled )( context, highest_ready ); |
---|
| 781 | |
---|
| 782 | break; |
---|
| 783 | } else { |
---|
| 784 | _Scheduler_SMP_Node_change_state( |
---|
| 785 | _Scheduler_SMP_Node_downcast( highest_ready ), |
---|
| 786 | SCHEDULER_SMP_NODE_BLOCKED |
---|
| 787 | ); |
---|
[19e41767] | 788 | |
---|
[5c3d250] | 789 | ( *extract_from_ready )( context, highest_ready ); |
---|
| 790 | } |
---|
| 791 | } |
---|
[48c4a55] | 792 | } |
---|
| 793 | |
---|
[c6522a65] | 794 | /** |
---|
[f39f667a] | 795 | * @brief Blocks a thread. |
---|
[c6522a65] | 796 | * |
---|
| 797 | * @param[in] context The scheduler instance context. |
---|
| 798 | * @param[in] thread The thread of the scheduling operation. |
---|
[f39f667a] | 799 | * @param[in] extract_from_ready Function to extract a node from the set of |
---|
[5c3d250] | 800 | * ready nodes. |
---|
[c6522a65] | 801 | * @param[in] get_highest_ready Function to get the highest ready node. |
---|
| 802 | * @param[in] move_from_ready_to_scheduled Function to move a node from the set |
---|
[5c3d250] | 803 | * of ready nodes to the set of scheduled nodes. |
---|
| 804 | * @param[in] get_idle_thread Function to get an idle thread. |
---|
[c6522a65] | 805 | */ |
---|
[f39f667a] | 806 | static inline void _Scheduler_SMP_Block( |
---|
[d9b54da] | 807 | Scheduler_Context *context, |
---|
| 808 | Thread_Control *thread, |
---|
| 809 | Scheduler_SMP_Extract extract_from_ready, |
---|
| 810 | Scheduler_SMP_Get_highest_ready get_highest_ready, |
---|
| 811 | Scheduler_SMP_Move move_from_ready_to_scheduled, |
---|
[5c3d250] | 812 | Scheduler_SMP_Allocate_processor allocate_processor, |
---|
| 813 | Scheduler_Get_idle_thread get_idle_thread |
---|
[48c4a55] | 814 | ) |
---|
| 815 | { |
---|
[08d9760] | 816 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); |
---|
[f39f667a] | 817 | bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED; |
---|
[5c3d250] | 818 | bool block = _Scheduler_Block_node( |
---|
| 819 | context, |
---|
| 820 | &node->Base, |
---|
| 821 | is_scheduled, |
---|
| 822 | get_idle_thread |
---|
| 823 | ); |
---|
[f39f667a] | 824 | |
---|
[5c3d250] | 825 | if ( block ) { |
---|
| 826 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); |
---|
[beab7329] | 827 | |
---|
[5c3d250] | 828 | if ( is_scheduled ) { |
---|
| 829 | _Scheduler_SMP_Extract_from_scheduled( &node->Base ); |
---|
[48c4a55] | 830 | |
---|
[5c3d250] | 831 | _Scheduler_SMP_Schedule_highest_ready( |
---|
| 832 | context, |
---|
| 833 | &node->Base, |
---|
| 834 | extract_from_ready, |
---|
| 835 | get_highest_ready, |
---|
| 836 | move_from_ready_to_scheduled, |
---|
| 837 | allocate_processor, |
---|
| 838 | get_idle_thread |
---|
| 839 | ); |
---|
| 840 | } else { |
---|
| 841 | ( *extract_from_ready )( context, &node->Base ); |
---|
| 842 | } |
---|
[48c4a55] | 843 | } |
---|
| 844 | } |
---|
| 845 | |
---|
[8568341] | 846 | static inline Thread_Control *_Scheduler_SMP_Unblock( |
---|
[5c3d250] | 847 | Scheduler_Context *context, |
---|
| 848 | Thread_Control *thread, |
---|
| 849 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
| 850 | Scheduler_Release_idle_thread release_idle_thread |
---|
[c0bff5e] | 851 | ) |
---|
| 852 | { |
---|
[08d9760] | 853 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); |
---|
[5c3d250] | 854 | bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED; |
---|
| 855 | bool unblock = _Scheduler_Unblock_node( |
---|
| 856 | context, |
---|
| 857 | thread, |
---|
| 858 | &node->Base, |
---|
| 859 | is_scheduled, |
---|
| 860 | release_idle_thread |
---|
| 861 | ); |
---|
| 862 | Thread_Control *needs_help; |
---|
[c0bff5e] | 863 | |
---|
[5c3d250] | 864 | if ( unblock ) { |
---|
| 865 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
[c0bff5e] | 866 | |
---|
[5c3d250] | 867 | needs_help = ( *enqueue_fifo )( context, &node->Base, thread ); |
---|
| 868 | } else { |
---|
| 869 | needs_help = NULL; |
---|
| 870 | } |
---|
| 871 | |
---|
| 872 | return needs_help; |
---|
[c0bff5e] | 873 | } |
---|
| 874 | |
---|
[8568341] | 875 | static inline Thread_Control *_Scheduler_SMP_Change_priority( |
---|
| 876 | Scheduler_Context *context, |
---|
| 877 | Thread_Control *thread, |
---|
| 878 | Priority_Control new_priority, |
---|
| 879 | bool prepend_it, |
---|
| 880 | Scheduler_SMP_Extract extract_from_ready, |
---|
| 881 | Scheduler_SMP_Update update, |
---|
| 882 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
| 883 | Scheduler_SMP_Enqueue enqueue_lifo, |
---|
| 884 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo, |
---|
| 885 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_lifo |
---|
[48c4a55] | 886 | ) |
---|
| 887 | { |
---|
[5c3d250] | 888 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread ); |
---|
[8568341] | 889 | Thread_Control *needs_help; |
---|
[a336d51] | 890 | |
---|
[c0bff5e] | 891 | if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
[8f0c7a46] | 892 | _Scheduler_SMP_Extract_from_scheduled( &node->Base ); |
---|
[c0bff5e] | 893 | |
---|
| 894 | ( *update )( context, &node->Base, new_priority ); |
---|
| 895 | |
---|
| 896 | if ( prepend_it ) { |
---|
[8568341] | 897 | needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base ); |
---|
[c0bff5e] | 898 | } else { |
---|
[8568341] | 899 | needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base ); |
---|
[c0bff5e] | 900 | } |
---|
[5c3d250] | 901 | } else if ( node->state == SCHEDULER_SMP_NODE_READY ) { |
---|
[8f0c7a46] | 902 | ( *extract_from_ready )( context, &node->Base ); |
---|
[48c4a55] | 903 | |
---|
[c0bff5e] | 904 | ( *update )( context, &node->Base, new_priority ); |
---|
[f39f667a] | 905 | |
---|
[c0bff5e] | 906 | if ( prepend_it ) { |
---|
[8568341] | 907 | needs_help = ( *enqueue_lifo )( context, &node->Base, NULL ); |
---|
[c0bff5e] | 908 | } else { |
---|
[8568341] | 909 | needs_help = ( *enqueue_fifo )( context, &node->Base, NULL ); |
---|
[c0bff5e] | 910 | } |
---|
[5c3d250] | 911 | } else { |
---|
| 912 | ( *update )( context, &node->Base, new_priority ); |
---|
| 913 | |
---|
| 914 | needs_help = NULL; |
---|
[f39f667a] | 915 | } |
---|
[8568341] | 916 | |
---|
| 917 | return needs_help; |
---|
[48c4a55] | 918 | } |
---|
| 919 | |
---|
[5c3d250] | 920 | static inline Thread_Control *_Scheduler_SMP_Ask_for_help( |
---|
| 921 | Scheduler_Context *context, |
---|
| 922 | Thread_Control *offers_help, |
---|
| 923 | Thread_Control *needs_help, |
---|
| 924 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
| 925 | Scheduler_Release_idle_thread release_idle_thread |
---|
| 926 | ) |
---|
| 927 | { |
---|
| 928 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help ); |
---|
| 929 | Thread_Control *next_needs_help = NULL; |
---|
| 930 | Thread_Control *previous_accepts_help; |
---|
| 931 | |
---|
| 932 | previous_accepts_help = node->Base.accepts_help; |
---|
| 933 | node->Base.accepts_help = needs_help; |
---|
| 934 | |
---|
| 935 | switch ( node->state ) { |
---|
| 936 | case SCHEDULER_SMP_NODE_READY: |
---|
| 937 | next_needs_help = |
---|
| 938 | _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help ); |
---|
| 939 | break; |
---|
| 940 | case SCHEDULER_SMP_NODE_SCHEDULED: |
---|
| 941 | next_needs_help = _Scheduler_Ask_scheduled_node_for_help( |
---|
| 942 | context, |
---|
| 943 | &node->Base, |
---|
| 944 | offers_help, |
---|
| 945 | needs_help, |
---|
| 946 | previous_accepts_help, |
---|
| 947 | release_idle_thread |
---|
| 948 | ); |
---|
| 949 | break; |
---|
| 950 | case SCHEDULER_SMP_NODE_BLOCKED: |
---|
| 951 | if ( |
---|
| 952 | _Scheduler_Ask_blocked_node_for_help( |
---|
| 953 | context, |
---|
| 954 | &node->Base, |
---|
| 955 | offers_help, |
---|
| 956 | needs_help |
---|
| 957 | ) |
---|
| 958 | ) { |
---|
| 959 | _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); |
---|
| 960 | |
---|
| 961 | next_needs_help = ( *enqueue_fifo )( |
---|
| 962 | context, |
---|
| 963 | &node->Base, |
---|
| 964 | needs_help |
---|
| 965 | ); |
---|
| 966 | } |
---|
| 967 | break; |
---|
| 968 | } |
---|
| 969 | |
---|
| 970 | return next_needs_help; |
---|
| 971 | } |
---|
| 972 | |
---|
[8568341] | 973 | static inline Thread_Control *_Scheduler_SMP_Yield( |
---|
| 974 | Scheduler_Context *context, |
---|
| 975 | Thread_Control *thread, |
---|
| 976 | Scheduler_SMP_Extract extract_from_ready, |
---|
| 977 | Scheduler_SMP_Enqueue enqueue_fifo, |
---|
| 978 | Scheduler_SMP_Enqueue_scheduled enqueue_scheduled_fifo |
---|
[701dd96f] | 979 | ) |
---|
| 980 | { |
---|
[08d9760] | 981 | Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread ); |
---|
[8568341] | 982 | Thread_Control *needs_help; |
---|
[701dd96f] | 983 | |
---|
| 984 | if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) { |
---|
[8f0c7a46] | 985 | _Scheduler_SMP_Extract_from_scheduled( &node->Base ); |
---|
[701dd96f] | 986 | |
---|
[8568341] | 987 | needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base ); |
---|
[701dd96f] | 988 | } else { |
---|
[8f0c7a46] | 989 | ( *extract_from_ready )( context, &node->Base ); |
---|
[701dd96f] | 990 | |
---|
[8568341] | 991 | needs_help = ( *enqueue_fifo )( context, &node->Base, NULL ); |
---|
[701dd96f] | 992 | } |
---|
[8568341] | 993 | |
---|
| 994 | return needs_help; |
---|
[701dd96f] | 995 | } |
---|
| 996 | |
---|
[48c4a55] | 997 | static inline void _Scheduler_SMP_Insert_scheduled_lifo( |
---|
[3730a07f] | 998 | Scheduler_Context *context, |
---|
[8f0c7a46] | 999 | Scheduler_Node *node_to_insert |
---|
[48c4a55] | 1000 | ) |
---|
| 1001 | { |
---|
[3730a07f] | 1002 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
| 1003 | |
---|
[48c4a55] | 1004 | _Chain_Insert_ordered_unprotected( |
---|
[494c2e3] | 1005 | &self->Scheduled, |
---|
[8f0c7a46] | 1006 | &node_to_insert->Node, |
---|
| 1007 | _Scheduler_SMP_Insert_priority_lifo_order |
---|
[48c4a55] | 1008 | ); |
---|
| 1009 | } |
---|
| 1010 | |
---|
| 1011 | static inline void _Scheduler_SMP_Insert_scheduled_fifo( |
---|
[3730a07f] | 1012 | Scheduler_Context *context, |
---|
[8f0c7a46] | 1013 | Scheduler_Node *node_to_insert |
---|
[48c4a55] | 1014 | ) |
---|
| 1015 | { |
---|
[3730a07f] | 1016 | Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); |
---|
| 1017 | |
---|
[48c4a55] | 1018 | _Chain_Insert_ordered_unprotected( |
---|
[494c2e3] | 1019 | &self->Scheduled, |
---|
[8f0c7a46] | 1020 | &node_to_insert->Node, |
---|
| 1021 | _Scheduler_SMP_Insert_priority_fifo_order |
---|
[48c4a55] | 1022 | ); |
---|
| 1023 | } |
---|
| 1024 | |
---|
[9d83f58a] | 1025 | /** @} */ |
---|
| 1026 | |
---|
| 1027 | #ifdef __cplusplus |
---|
| 1028 | } |
---|
| 1029 | #endif /* __cplusplus */ |
---|
| 1030 | |
---|
| 1031 | #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */ |
---|