source: rtems/cpukit/score/src/scheduleredfsmp.c @ 834a86fe

Last change on this file since 834a86fe was 834a86fe, checked in by Sebastian Huber <sebastian.huber@…>, on 11/15/21 at 09:20:30

score: Restrict affinity for EDF SMP scheduler

The SMP EDF scheduler supports a one-to-one and one-to-all thread to
processor affinity. It accepted affinity sets which are a proper
subset of the online processor containing at least two processors owned by
the scheduler. In this case it used a one-to-one thread to processor
affinity. This leads to undefined behaviour if a processor is removed
since the higher level check in rtems_scheduler_remove_processor() does
not account for this implementation detail.

Restrict the affinity set accepted by the SMP EDF scheduler to

  1. all online processors, or
  1. exactly one processor owned by the scheduler.

Close #4545.

  • Property mode set to 100644
File size: 25.1 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreSchedulerSMPEDF
5 *
6 * @brief This source file contains the implementation of
7 *   _Scheduler_EDF_SMP_Add_processor(), _Scheduler_EDF_SMP_Ask_for_help(),
8 *   _Scheduler_EDF_SMP_Block(), _Scheduler_EDF_SMP_Initialize(),
9 *   _Scheduler_EDF_SMP_Node_initialize(), _Scheduler_EDF_SMP_Pin(),
10 *   _Scheduler_EDF_SMP_Reconsider_help_request(),
11 *   _Scheduler_EDF_SMP_Remove_processor(), _Scheduler_EDF_SMP_Set_affinity(),
12 *   _Scheduler_EDF_SMP_Start_idle(), _Scheduler_EDF_SMP_Unblock(),
13 *   _Scheduler_EDF_SMP_Unpin(), _Scheduler_EDF_SMP_Update_priority(),
14 *   _Scheduler_EDF_SMP_Withdraw_node(), _Scheduler_EDF_SMP_Make_sticky(),
15 *   _Scheduler_EDF_SMP_Clean_sticky(), and _Scheduler_EDF_SMP_Yield().
16 */
17
18/*
19 * Copyright (c) 2017 embedded brains GmbH.
20 *
21 * The license and distribution terms for this file may be
22 * found in the file LICENSE in this distribution or at
23 * http://www.rtems.org/license/LICENSE.
24 */
25
26#ifdef HAVE_CONFIG_H
27#include "config.h"
28#endif
29
30#include <rtems/score/scheduleredfsmp.h>
31#include <rtems/score/schedulersmpimpl.h>
32
33static inline Scheduler_EDF_SMP_Context *
34_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
35{
36  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
37}
38
39static inline Scheduler_EDF_SMP_Context *
40_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
41{
42  return (Scheduler_EDF_SMP_Context *) context;
43}
44
45static inline Scheduler_EDF_SMP_Node *
46_Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
47{
48  return (Scheduler_EDF_SMP_Node *) node;
49}
50
51static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
52  const void        *left,
53  const RBTree_Node *right
54)
55{
56  const Priority_Control   *the_left;
57  const Scheduler_SMP_Node *the_right;
58  Priority_Control          prio_left;
59  Priority_Control          prio_right;
60
61  the_left = left;
62  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
63
64  prio_left = *the_left;
65  prio_right = the_right->priority;
66
67  return prio_left <= prio_right;
68}
69
70static inline bool _Scheduler_EDF_SMP_Overall_less_equal(
71  const void       *key,
72  const Chain_Node *to_insert,
73  const Chain_Node *next
74)
75{
76  Priority_Control              insert_priority;
77  Priority_Control              next_priority;
78  const Scheduler_EDF_SMP_Node *node_to_insert;
79  const Scheduler_EDF_SMP_Node *node_next;
80
81  insert_priority = *(const Priority_Control *) key;
82  insert_priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
83  node_to_insert = (const Scheduler_EDF_SMP_Node *) to_insert;
84  node_next = (const Scheduler_EDF_SMP_Node *) next;
85  next_priority = node_next->Base.priority;
86
87  return insert_priority < next_priority ||
88    ( insert_priority == next_priority &&
89      node_to_insert->generation <= node_next->generation );
90}
91
92void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
93{
94  Scheduler_EDF_SMP_Context *self =
95    _Scheduler_EDF_SMP_Get_context( scheduler );
96
97  _Scheduler_SMP_Initialize( &self->Base );
98  _Chain_Initialize_empty( &self->Affine_queues );
99  /* The ready queues are zero initialized and thus empty */
100}
101
102void _Scheduler_EDF_SMP_Node_initialize(
103  const Scheduler_Control *scheduler,
104  Scheduler_Node          *node,
105  Thread_Control          *the_thread,
106  Priority_Control         priority
107)
108{
109  Scheduler_SMP_Node *smp_node;
110
111  smp_node = _Scheduler_SMP_Node_downcast( node );
112  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
113}
114
115static inline void _Scheduler_EDF_SMP_Do_update(
116  Scheduler_Context *context,
117  Scheduler_Node    *node,
118  Priority_Control   new_priority
119)
120{
121  Scheduler_SMP_Node *smp_node;
122
123  (void) context;
124
125  smp_node = _Scheduler_SMP_Node_downcast( node );
126  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
127}
128
129static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
130{
131  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
132
133  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
134}
135
136static inline bool _Scheduler_EDF_SMP_Overall_less(
137  const Scheduler_EDF_SMP_Node *left,
138  const Scheduler_EDF_SMP_Node *right
139)
140{
141  Priority_Control lp;
142  Priority_Control rp;
143
144  lp = left->Base.priority;
145  rp = right->Base.priority;
146
147  return lp < rp || (lp == rp && left->generation < right->generation );
148}
149
150static inline Scheduler_EDF_SMP_Node *
151_Scheduler_EDF_SMP_Challenge_highest_ready(
152  Scheduler_EDF_SMP_Context *self,
153  Scheduler_EDF_SMP_Node    *highest_ready,
154  RBTree_Control            *ready_queue
155)
156{
157  Scheduler_EDF_SMP_Node *other;
158
159  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
160  _Assert( other != NULL );
161
162  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
163    return other;
164  }
165
166  return highest_ready;
167}
168
169static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
170  Scheduler_Context *context,
171  Scheduler_Node    *filter
172)
173{
174  Scheduler_EDF_SMP_Context *self;
175  Scheduler_EDF_SMP_Node    *highest_ready;
176  Scheduler_EDF_SMP_Node    *node;
177  uint8_t                    rqi;
178  const Chain_Node          *tail;
179  Chain_Node                *next;
180
181  self = _Scheduler_EDF_SMP_Get_self( context );
182  highest_ready = (Scheduler_EDF_SMP_Node *)
183    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
184  _Assert( highest_ready != NULL );
185
186  /*
187   * The filter node is a scheduled node which is no longer on the scheduled
188   * chain.  In case this is an affine thread, then we have to check the
189   * corresponding affine ready queue.
190   */
191
192  node = (Scheduler_EDF_SMP_Node *) filter;
193  rqi = node->ready_queue_index;
194
195  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
196    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
197      self,
198      highest_ready,
199      &self->Ready[ rqi ].Queue
200    );
201  }
202
203  tail = _Chain_Immutable_tail( &self->Affine_queues );
204  next = _Chain_First( &self->Affine_queues );
205
206  while ( next != tail ) {
207    Scheduler_EDF_SMP_Ready_queue *ready_queue;
208
209    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
210    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
211      self,
212      highest_ready,
213      &ready_queue->Queue
214    );
215
216    next = _Chain_Next( next );
217  }
218
219  return &highest_ready->Base.Base;
220}
221
222static inline void _Scheduler_EDF_SMP_Set_allocated(
223  Scheduler_EDF_SMP_Context *self,
224  Scheduler_EDF_SMP_Node    *allocated,
225  const Per_CPU_Control     *cpu
226)
227{
228  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].allocated = allocated;
229}
230
231static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_allocated(
232  const Scheduler_EDF_SMP_Context *self,
233  uint8_t                          rqi
234)
235{
236  return self->Ready[ rqi ].allocated;
237}
238
239static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
240  Scheduler_Context *context,
241  Scheduler_Node    *filter_base
242)
243{
244  Scheduler_EDF_SMP_Node *filter;
245  uint8_t                 rqi;
246
247  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
248  rqi = filter->ready_queue_index;
249
250  if ( rqi != 0 ) {
251    Scheduler_EDF_SMP_Context *self;
252    Scheduler_EDF_SMP_Node    *affine_scheduled;
253
254    self = _Scheduler_EDF_SMP_Get_self( context );
255    affine_scheduled = self->Ready[ rqi ].affine_scheduled;
256
257    if ( affine_scheduled != NULL ) {
258      _Assert( affine_scheduled->ready_queue_index == rqi );
259      return &affine_scheduled->Base.Base;
260    }
261  }
262
263  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
264}
265
266static inline void _Scheduler_EDF_SMP_Update_generation(
267  Scheduler_Context *context,
268  Scheduler_Node    *node_base,
269  Priority_Control   insert_priority
270)
271{
272  Scheduler_EDF_SMP_Context *self;
273  Scheduler_EDF_SMP_Node    *node;
274  int                        generation_index;
275  int                        increment;
276  int64_t                    generation;
277
278  self = _Scheduler_EDF_SMP_Get_self( context );
279  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
280  generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
281  increment = ( generation_index << 1 ) - 1;
282
283  generation = self->generations[ generation_index ];
284  node->generation = generation;
285  self->generations[ generation_index ] = generation + increment;
286}
287
288static inline void _Scheduler_EDF_SMP_Insert_scheduled(
289  Scheduler_Context *context,
290  Scheduler_Node    *node_base,
291  Priority_Control   priority_to_insert
292)
293{
294  Scheduler_EDF_SMP_Context     *self;
295  Scheduler_EDF_SMP_Node        *node;
296  uint8_t                        rqi;
297  Scheduler_EDF_SMP_Ready_queue *ready_queue;
298
299  self = _Scheduler_EDF_SMP_Get_self( context );
300  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
301  rqi = node->ready_queue_index;
302  ready_queue = &self->Ready[ rqi ];
303
304  _Chain_Insert_ordered_unprotected(
305    &self->Base.Scheduled,
306    &node_base->Node.Chain,
307    &priority_to_insert,
308    _Scheduler_EDF_SMP_Overall_less_equal
309  );
310
311  if ( rqi != 0 ) {
312    ready_queue->affine_scheduled = node;
313
314    if ( !_RBTree_Is_empty( &ready_queue->Queue ) ) {
315      _Chain_Extract_unprotected( &ready_queue->Node );
316    }
317  }
318}
319
320static inline void _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary(
321  Scheduler_EDF_SMP_Context     *self,
322  uint8_t                        rqi,
323  Scheduler_EDF_SMP_Ready_queue *ready_queue
324)
325{
326  if (
327    rqi != 0 &&
328    _RBTree_Is_empty( &ready_queue->Queue ) &&
329    ready_queue->affine_scheduled == NULL
330  ) {
331    _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
332  }
333}
334
335static inline void _Scheduler_EDF_SMP_Insert_ready(
336  Scheduler_Context *context,
337  Scheduler_Node    *node_base,
338  Priority_Control   insert_priority
339)
340{
341  Scheduler_EDF_SMP_Context     *self;
342  Scheduler_EDF_SMP_Node        *node;
343  uint8_t                        rqi;
344  Scheduler_EDF_SMP_Ready_queue *ready_queue;
345
346  self = _Scheduler_EDF_SMP_Get_self( context );
347  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
348  rqi = node->ready_queue_index;
349  ready_queue = &self->Ready[ rqi ];
350
351  _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
352  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
353  _RBTree_Insert_inline(
354    &ready_queue->Queue,
355    &node->Base.Base.Node.RBTree,
356    &insert_priority,
357    _Scheduler_EDF_SMP_Priority_less_equal
358  );
359}
360
361static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
362  Scheduler_Context *context,
363  Scheduler_Node    *node_to_extract
364)
365{
366  Scheduler_EDF_SMP_Context     *self;
367  Scheduler_EDF_SMP_Node        *node;
368  uint8_t                        rqi;
369  Scheduler_EDF_SMP_Ready_queue *ready_queue;
370
371  self = _Scheduler_EDF_SMP_Get_self( context );
372  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
373
374  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
375
376  rqi = node->ready_queue_index;
377  ready_queue = &self->Ready[ rqi ];
378
379  if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
380    _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
381  }
382
383  ready_queue->affine_scheduled = NULL;
384}
385
386static inline void _Scheduler_EDF_SMP_Extract_from_ready(
387  Scheduler_Context *context,
388  Scheduler_Node    *node_to_extract
389)
390{
391  Scheduler_EDF_SMP_Context     *self;
392  Scheduler_EDF_SMP_Node        *node;
393  uint8_t                        rqi;
394  Scheduler_EDF_SMP_Ready_queue *ready_queue;
395
396  self = _Scheduler_EDF_SMP_Get_self( context );
397  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
398  rqi = node->ready_queue_index;
399  ready_queue = &self->Ready[ rqi ];
400
401  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
402  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
403
404  if (
405    rqi != 0
406      && _RBTree_Is_empty( &ready_queue->Queue )
407      && ready_queue->affine_scheduled == NULL
408  ) {
409    _Chain_Extract_unprotected( &ready_queue->Node );
410  }
411}
412
413static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
414  Scheduler_Context *context,
415  Scheduler_Node    *scheduled_to_ready
416)
417{
418  Scheduler_EDF_SMP_Context     *self;
419  Scheduler_EDF_SMP_Node        *node;
420  uint8_t                        rqi;
421  Scheduler_EDF_SMP_Ready_queue *ready_queue;
422
423  _Scheduler_EDF_SMP_Extract_from_scheduled( context, scheduled_to_ready );
424
425  self = _Scheduler_EDF_SMP_Get_self( context );
426  node = _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
427  rqi = node->ready_queue_index;
428  ready_queue = &self->Ready[ rqi ];
429
430  _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
431  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
432  _RBTree_Prepend( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
433}
434
435static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
436  Scheduler_Context *context,
437  Scheduler_Node    *ready_to_scheduled
438)
439{
440  Priority_Control insert_priority;
441
442  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
443  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
444  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
445  _Scheduler_EDF_SMP_Insert_scheduled(
446    context,
447    ready_to_scheduled,
448    insert_priority
449  );
450}
451
452static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
453{
454  Scheduler_EDF_SMP_Context *self;
455  Scheduler_Node            *lowest_ready;
456
457  self = _Scheduler_EDF_SMP_Get_self( arg );
458  lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
459  _Assert( lowest_ready != NULL );
460  _RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
461  _Chain_Initialize_node( &lowest_ready->Node.Chain );
462
463  return lowest_ready;
464}
465
466static inline void _Scheduler_EDF_SMP_Release_idle(
467  Scheduler_Node *node,
468  void           *arg
469)
470{
471  Scheduler_EDF_SMP_Context *self;
472
473  self = _Scheduler_EDF_SMP_Get_self( arg );
474  _RBTree_Initialize_node( &node->Node.RBTree );
475  _RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
476}
477
478static inline void _Scheduler_EDF_SMP_Allocate_processor(
479  Scheduler_Context *context,
480  Scheduler_Node    *scheduled_base,
481  Per_CPU_Control   *cpu
482)
483{
484  Scheduler_EDF_SMP_Context     *self;
485  Scheduler_EDF_SMP_Node        *scheduled;
486  uint8_t                        rqi;
487
488  self = _Scheduler_EDF_SMP_Get_self( context );
489  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
490  rqi = scheduled->ready_queue_index;
491
492  if ( rqi != 0 ) {
493    Per_CPU_Control *affine_cpu;
494
495    affine_cpu = _Per_CPU_Get_by_index( rqi - 1 );
496
497    if ( cpu != affine_cpu ) {
498      Scheduler_EDF_SMP_Node *node;
499
500      node = _Scheduler_EDF_SMP_Get_allocated( self, rqi );
501      _Assert( node->ready_queue_index == 0 );
502      _Scheduler_EDF_SMP_Set_allocated( self, node, cpu );
503      _Scheduler_SMP_Allocate_processor_exact(
504        context,
505        &node->Base.Base,
506        cpu
507      );
508      cpu = affine_cpu;
509    }
510  }
511
512  _Scheduler_EDF_SMP_Set_allocated( self, scheduled, cpu );
513  _Scheduler_SMP_Allocate_processor_exact(
514    context,
515    &scheduled->Base.Base,
516    cpu
517  );
518}
519
520void _Scheduler_EDF_SMP_Block(
521  const Scheduler_Control *scheduler,
522  Thread_Control          *thread,
523  Scheduler_Node          *node
524)
525{
526  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
527
528  _Scheduler_SMP_Block(
529    context,
530    thread,
531    node,
532    _Scheduler_EDF_SMP_Extract_from_scheduled,
533    _Scheduler_EDF_SMP_Extract_from_ready,
534    _Scheduler_EDF_SMP_Get_highest_ready,
535    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
536    _Scheduler_EDF_SMP_Allocate_processor,
537    _Scheduler_EDF_SMP_Get_idle
538  );
539}
540
541static inline bool _Scheduler_EDF_SMP_Enqueue(
542  Scheduler_Context *context,
543  Scheduler_Node    *node,
544  Priority_Control   insert_priority
545)
546{
547  _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
548
549  return _Scheduler_SMP_Enqueue(
550    context,
551    node,
552    insert_priority,
553    _Scheduler_EDF_SMP_Overall_less_equal,
554    _Scheduler_EDF_SMP_Insert_ready,
555    _Scheduler_EDF_SMP_Insert_scheduled,
556    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
557    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
558    _Scheduler_EDF_SMP_Get_lowest_scheduled,
559    _Scheduler_EDF_SMP_Allocate_processor,
560    _Scheduler_EDF_SMP_Get_idle,
561    _Scheduler_EDF_SMP_Release_idle
562  );
563}
564
565static inline void _Scheduler_EDF_SMP_Enqueue_scheduled(
566  Scheduler_Context *context,
567  Scheduler_Node    *node,
568  Priority_Control   insert_priority
569)
570{
571  _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
572  _Scheduler_SMP_Enqueue_scheduled(
573    context,
574    node,
575    insert_priority,
576    _Scheduler_EDF_SMP_Overall_less_equal,
577    _Scheduler_EDF_SMP_Extract_from_ready,
578    _Scheduler_EDF_SMP_Get_highest_ready,
579    _Scheduler_EDF_SMP_Insert_ready,
580    _Scheduler_EDF_SMP_Insert_scheduled,
581    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
582    _Scheduler_EDF_SMP_Allocate_processor,
583    _Scheduler_EDF_SMP_Get_idle,
584    _Scheduler_EDF_SMP_Release_idle
585  );
586}
587
588void _Scheduler_EDF_SMP_Unblock(
589  const Scheduler_Control *scheduler,
590  Thread_Control          *thread,
591  Scheduler_Node          *node
592)
593{
594  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
595
596  _Scheduler_SMP_Unblock(
597    context,
598    thread,
599    node,
600    _Scheduler_EDF_SMP_Do_update,
601    _Scheduler_EDF_SMP_Enqueue,
602    _Scheduler_EDF_SMP_Release_idle
603  );
604}
605
606static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
607  Scheduler_Context *context,
608  Thread_Control    *the_thread,
609  Scheduler_Node    *node
610)
611{
612  return _Scheduler_SMP_Ask_for_help(
613    context,
614    the_thread,
615    node,
616    _Scheduler_EDF_SMP_Overall_less_equal,
617    _Scheduler_EDF_SMP_Insert_ready,
618    _Scheduler_EDF_SMP_Insert_scheduled,
619    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
620    _Scheduler_EDF_SMP_Get_lowest_scheduled,
621    _Scheduler_EDF_SMP_Allocate_processor,
622    _Scheduler_EDF_SMP_Release_idle
623  );
624}
625
626void _Scheduler_EDF_SMP_Update_priority(
627  const Scheduler_Control *scheduler,
628  Thread_Control          *thread,
629  Scheduler_Node          *node
630)
631{
632  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
633
634  _Scheduler_SMP_Update_priority(
635    context,
636    thread,
637    node,
638    _Scheduler_EDF_SMP_Extract_from_scheduled,
639    _Scheduler_EDF_SMP_Extract_from_ready,
640    _Scheduler_EDF_SMP_Do_update,
641    _Scheduler_EDF_SMP_Enqueue,
642    _Scheduler_EDF_SMP_Enqueue_scheduled,
643    _Scheduler_EDF_SMP_Do_ask_for_help
644  );
645}
646
647bool _Scheduler_EDF_SMP_Ask_for_help(
648  const Scheduler_Control *scheduler,
649  Thread_Control          *the_thread,
650  Scheduler_Node          *node
651)
652{
653  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
654
655  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
656}
657
658void _Scheduler_EDF_SMP_Reconsider_help_request(
659  const Scheduler_Control *scheduler,
660  Thread_Control          *the_thread,
661  Scheduler_Node          *node
662)
663{
664  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
665
666  _Scheduler_SMP_Reconsider_help_request(
667    context,
668    the_thread,
669    node,
670    _Scheduler_EDF_SMP_Extract_from_ready
671  );
672}
673
674void _Scheduler_EDF_SMP_Withdraw_node(
675  const Scheduler_Control *scheduler,
676  Thread_Control          *the_thread,
677  Scheduler_Node          *node,
678  Thread_Scheduler_state   next_state
679)
680{
681  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
682
683  _Scheduler_SMP_Withdraw_node(
684    context,
685    the_thread,
686    node,
687    next_state,
688    _Scheduler_EDF_SMP_Extract_from_scheduled,
689    _Scheduler_EDF_SMP_Extract_from_ready,
690    _Scheduler_EDF_SMP_Get_highest_ready,
691    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
692    _Scheduler_EDF_SMP_Allocate_processor,
693    _Scheduler_EDF_SMP_Get_idle
694  );
695}
696
697void _Scheduler_EDF_SMP_Make_sticky(
698  const Scheduler_Control *scheduler,
699  Thread_Control          *the_thread,
700  Scheduler_Node          *node
701)
702{
703  _Scheduler_SMP_Make_sticky(
704    scheduler,
705    the_thread,
706    node,
707    _Scheduler_EDF_SMP_Do_update,
708    _Scheduler_EDF_SMP_Enqueue
709  );
710}
711
712void _Scheduler_EDF_SMP_Clean_sticky(
713  const Scheduler_Control *scheduler,
714  Thread_Control          *the_thread,
715  Scheduler_Node          *node
716)
717{
718  _Scheduler_SMP_Clean_sticky(
719    scheduler,
720    the_thread,
721    node,
722    _Scheduler_EDF_SMP_Extract_from_scheduled,
723    _Scheduler_EDF_SMP_Extract_from_ready,
724    _Scheduler_EDF_SMP_Get_highest_ready,
725    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
726    _Scheduler_EDF_SMP_Allocate_processor,
727    _Scheduler_EDF_SMP_Get_idle,
728    _Scheduler_EDF_SMP_Release_idle
729  );
730}
731
732static inline void _Scheduler_EDF_SMP_Register_idle(
733  Scheduler_Context *context,
734  Scheduler_Node    *idle_base,
735  Per_CPU_Control   *cpu
736)
737{
738  Scheduler_EDF_SMP_Context *self;
739  Scheduler_EDF_SMP_Node    *idle;
740
741  self = _Scheduler_EDF_SMP_Get_self( context );
742  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
743  _Scheduler_EDF_SMP_Set_allocated( self, idle, cpu );
744  _Scheduler_EDF_SMP_Update_generation(
745    context,
746    idle_base,
747    PRIORITY_GROUP_LAST
748  );
749}
750
751void _Scheduler_EDF_SMP_Add_processor(
752  const Scheduler_Control *scheduler,
753  Thread_Control          *idle
754)
755{
756  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
757
758  _Scheduler_SMP_Add_processor(
759    context,
760    idle,
761    _Scheduler_EDF_SMP_Has_ready,
762    _Scheduler_EDF_SMP_Enqueue_scheduled,
763    _Scheduler_EDF_SMP_Register_idle
764  );
765}
766
767Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
768  const Scheduler_Control *scheduler,
769  Per_CPU_Control         *cpu
770)
771{
772  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
773
774  return _Scheduler_SMP_Remove_processor(
775    context,
776    cpu,
777    _Scheduler_EDF_SMP_Extract_from_scheduled,
778    _Scheduler_EDF_SMP_Extract_from_ready,
779    _Scheduler_EDF_SMP_Enqueue,
780    _Scheduler_EDF_SMP_Get_idle,
781    _Scheduler_EDF_SMP_Release_idle
782  );
783}
784
785void _Scheduler_EDF_SMP_Yield(
786  const Scheduler_Control *scheduler,
787  Thread_Control          *thread,
788  Scheduler_Node          *node
789)
790{
791  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
792
793  _Scheduler_SMP_Yield(
794    context,
795    thread,
796    node,
797    _Scheduler_EDF_SMP_Extract_from_scheduled,
798    _Scheduler_EDF_SMP_Extract_from_ready,
799    _Scheduler_EDF_SMP_Enqueue,
800    _Scheduler_EDF_SMP_Enqueue_scheduled
801  );
802}
803
804static inline void _Scheduler_EDF_SMP_Do_set_affinity(
805  Scheduler_Context *context,
806  Scheduler_Node    *node_base,
807  void              *arg
808)
809{
810  Scheduler_EDF_SMP_Node *node;
811  const uint8_t          *rqi;
812
813  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
814  rqi = arg;
815  node->ready_queue_index = *rqi;
816}
817
818void _Scheduler_EDF_SMP_Start_idle(
819  const Scheduler_Control *scheduler,
820  Thread_Control          *idle,
821  Per_CPU_Control         *cpu
822)
823{
824  Scheduler_Context *context;
825
826  context = _Scheduler_Get_context( scheduler );
827
828  _Scheduler_SMP_Do_start_idle(
829    context,
830    idle,
831    cpu,
832    _Scheduler_EDF_SMP_Register_idle
833  );
834}
835
836void _Scheduler_EDF_SMP_Pin(
837  const Scheduler_Control *scheduler,
838  Thread_Control          *thread,
839  Scheduler_Node          *node_base,
840  struct Per_CPU_Control  *cpu
841)
842{
843  Scheduler_EDF_SMP_Node *node;
844  uint8_t                 rqi;
845
846  (void) scheduler;
847  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
848
849  _Assert(
850    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
851  );
852
853  rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
854  node->ready_queue_index = rqi;
855  node->pinning_ready_queue_index = rqi;
856}
857
858void _Scheduler_EDF_SMP_Unpin(
859  const Scheduler_Control *scheduler,
860  Thread_Control          *thread,
861  Scheduler_Node          *node_base,
862  struct Per_CPU_Control  *cpu
863)
864{
865  Scheduler_EDF_SMP_Node *node;
866
867  (void) scheduler;
868  (void) cpu;
869  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
870
871  _Assert(
872    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
873  );
874
875  node->ready_queue_index = node->affinity_ready_queue_index;
876  node->pinning_ready_queue_index = 0;
877}
878
879Status_Control _Scheduler_EDF_SMP_Set_affinity(
880  const Scheduler_Control *scheduler,
881  Thread_Control          *thread,
882  Scheduler_Node          *node_base,
883  const Processor_mask    *affinity
884)
885{
886  Scheduler_Context      *context;
887  Scheduler_EDF_SMP_Node *node;
888  uint8_t                 rqi;
889
890  context = _Scheduler_Get_context( scheduler );
891
892  /*
893   * We support a thread to processor affinity to all online processors and an
894   * affinity to exactly one processor.  This restriction is necessary to avoid
895   * issues if processors are added or removed to or from the scheduler.
896   */
897
898  if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
899    rqi = 0;
900  } else {
901    Processor_mask local_affinity;
902    Processor_mask one_to_one;
903    uint32_t       last;
904
905    _Processor_mask_And( &local_affinity, &context->Processors, affinity );
906
907    if ( _Processor_mask_Is_zero( &local_affinity ) ) {
908      return STATUS_INVALID_NUMBER;
909    }
910
911    last = _Processor_mask_Find_last_set( affinity );
912    _Processor_mask_From_index( &one_to_one, last - 1 );
913
914    /*
915     * Use the global affinity set and not the affinity set local to the
916     * scheduler to check for a one-to-one affinity.
917     */
918    if ( !_Processor_mask_Is_equal( &one_to_one, affinity ) ) {
919      return STATUS_INVALID_NUMBER;
920    }
921
922    rqi = last;
923  }
924
925  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
926  node->affinity_ready_queue_index = rqi;
927
928  if ( node->pinning_ready_queue_index == 0 ) {
929    _Scheduler_SMP_Set_affinity(
930      context,
931      thread,
932      node_base,
933      &rqi,
934      _Scheduler_EDF_SMP_Do_set_affinity,
935      _Scheduler_EDF_SMP_Extract_from_scheduled,
936      _Scheduler_EDF_SMP_Extract_from_ready,
937      _Scheduler_EDF_SMP_Get_highest_ready,
938      _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
939      _Scheduler_EDF_SMP_Enqueue,
940      _Scheduler_EDF_SMP_Allocate_processor,
941      _Scheduler_EDF_SMP_Get_idle,
942      _Scheduler_EDF_SMP_Release_idle
943    );
944  }
945
946  return STATUS_SUCCESSFUL;
947}
Note: See TracBrowser for help on using the repository browser.