source: rtems/cpukit/score/src/scheduleredfsmp.c @ 3aad9d9b

5
Last change on this file since 3aad9d9b was 3aad9d9b, checked in by Sebastian Huber <sebastian.huber@…>, on 09/03/18 at 07:31:19

score: Generalize SMP scheduler block support

Add extract from scheduled function to the _Scheduler_SMP_Block()
operation. This allows a scheduler implementation to do extra work in
case a scheduled node is blocked.

  • Property mode set to 100644
File size: 17.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief EDF SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMPEDF
7 */
8
9/*
10 * Copyright (c) 2017 embedded brains GmbH.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/scheduleredfsmp.h>
22#include <rtems/score/schedulersmpimpl.h>
23
24static inline Scheduler_EDF_SMP_Context *
25_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
26{
27  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
28}
29
30static inline Scheduler_EDF_SMP_Context *
31_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
32{
33  return (Scheduler_EDF_SMP_Context *) context;
34}
35
36static inline Scheduler_EDF_SMP_Node *
37_Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
38{
39  return (Scheduler_EDF_SMP_Node *) node;
40}
41
42static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
43  const void        *left,
44  const RBTree_Node *right
45)
46{
47  const Priority_Control   *the_left;
48  const Scheduler_SMP_Node *the_right;
49  Priority_Control          prio_left;
50  Priority_Control          prio_right;
51
52  the_left = left;
53  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
54
55  prio_left = *the_left;
56  prio_right = the_right->priority;
57
58  return prio_left <= prio_right;
59}
60
61void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
62{
63  Scheduler_EDF_SMP_Context *self =
64    _Scheduler_EDF_SMP_Get_context( scheduler );
65
66  _Scheduler_SMP_Initialize( &self->Base );
67  _Chain_Initialize_empty( &self->Affine_queues );
68  /* The ready queues are zero initialized and thus empty */
69}
70
71void _Scheduler_EDF_SMP_Node_initialize(
72  const Scheduler_Control *scheduler,
73  Scheduler_Node          *node,
74  Thread_Control          *the_thread,
75  Priority_Control         priority
76)
77{
78  Scheduler_SMP_Node *smp_node;
79
80  smp_node = _Scheduler_SMP_Node_downcast( node );
81  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
82}
83
84static inline void _Scheduler_EDF_SMP_Do_update(
85  Scheduler_Context *context,
86  Scheduler_Node    *node,
87  Priority_Control   new_priority
88)
89{
90  Scheduler_SMP_Node *smp_node;
91
92  (void) context;
93
94  smp_node = _Scheduler_SMP_Node_downcast( node );
95  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
96}
97
98static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
99{
100  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
101
102  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
103}
104
105static inline bool _Scheduler_EDF_SMP_Overall_less(
106  const Scheduler_EDF_SMP_Node *left,
107  const Scheduler_EDF_SMP_Node *right
108)
109{
110  Priority_Control lp;
111  Priority_Control rp;
112
113  lp = left->Base.priority;
114  rp = right->Base.priority;
115
116  return lp < rp || (lp == rp && left->generation < right->generation );
117}
118
119static inline Scheduler_EDF_SMP_Node *
120_Scheduler_EDF_SMP_Challenge_highest_ready(
121  Scheduler_EDF_SMP_Context *self,
122  Scheduler_EDF_SMP_Node    *highest_ready,
123  RBTree_Control            *ready_queue
124)
125{
126  Scheduler_EDF_SMP_Node *other;
127
128  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
129  _Assert( other != NULL );
130
131  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
132    return other;
133  }
134
135  return highest_ready;
136}
137
138static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
139  Scheduler_Context *context,
140  Scheduler_Node    *filter
141)
142{
143  Scheduler_EDF_SMP_Context *self;
144  Scheduler_EDF_SMP_Node    *highest_ready;
145  Scheduler_EDF_SMP_Node    *node;
146  uint32_t                   rqi;
147  const Chain_Node          *tail;
148  Chain_Node                *next;
149
150  self = _Scheduler_EDF_SMP_Get_self( context );
151  highest_ready = (Scheduler_EDF_SMP_Node *)
152    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
153  _Assert( highest_ready != NULL );
154
155  /*
156   * The filter node is a scheduled node which is no longer on the scheduled
157   * chain.  In case this is an affine thread, then we have to check the
158   * corresponding affine ready queue.
159   */
160
161  node = (Scheduler_EDF_SMP_Node *) filter;
162  rqi = node->ready_queue_index;
163
164  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
165    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
166      self,
167      highest_ready,
168      &self->Ready[ rqi ].Queue
169    );
170  }
171
172  tail = _Chain_Immutable_tail( &self->Affine_queues );
173  next = _Chain_First( &self->Affine_queues );
174
175  while ( next != tail ) {
176    Scheduler_EDF_SMP_Ready_queue *ready_queue;
177
178    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
179    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
180      self,
181      highest_ready,
182      &ready_queue->Queue
183    );
184
185    next = _Chain_Next( next );
186  }
187
188  return &highest_ready->Base.Base;
189}
190
191static inline void _Scheduler_EDF_SMP_Set_scheduled(
192  Scheduler_EDF_SMP_Context *self,
193  Scheduler_EDF_SMP_Node    *scheduled,
194  const Per_CPU_Control     *cpu
195)
196{
197  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
198}
199
200static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
201  const Scheduler_EDF_SMP_Context *self,
202  uint32_t                         rqi
203)
204{
205  return self->Ready[ rqi ].scheduled;
206}
207
208static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
209  Scheduler_Context *context,
210  Scheduler_Node    *filter_base
211)
212{
213  Scheduler_EDF_SMP_Node *filter;
214  uint32_t                rqi;
215
216  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
217  rqi = filter->ready_queue_index;
218
219  if ( rqi != 0 ) {
220    Scheduler_EDF_SMP_Context *self;
221    Scheduler_EDF_SMP_Node    *node;
222
223    self = _Scheduler_EDF_SMP_Get_self( context );
224    node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
225
226    if ( node->ready_queue_index > 0 ) {
227      _Assert( node->ready_queue_index == rqi );
228      return &node->Base.Base;
229    }
230  }
231
232  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
233}
234
235static inline void _Scheduler_EDF_SMP_Insert_ready(
236  Scheduler_Context *context,
237  Scheduler_Node    *node_base,
238  Priority_Control   insert_priority
239)
240{
241  Scheduler_EDF_SMP_Context     *self;
242  Scheduler_EDF_SMP_Node        *node;
243  uint32_t                       rqi;
244  Scheduler_EDF_SMP_Ready_queue *ready_queue;
245  int                            generation_index;
246  int                            increment;
247  int64_t                        generation;
248
249  self = _Scheduler_EDF_SMP_Get_self( context );
250  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
251  rqi = node->ready_queue_index;
252  generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
253  increment = ( generation_index << 1 ) - 1;
254  ready_queue = &self->Ready[ rqi ];
255
256  generation = self->generations[ generation_index ];
257  node->generation = generation;
258  self->generations[ generation_index ] = generation + increment;
259
260  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
261  _RBTree_Insert_inline(
262    &ready_queue->Queue,
263    &node->Base.Base.Node.RBTree,
264    &insert_priority,
265    _Scheduler_EDF_SMP_Priority_less_equal
266  );
267
268  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
269    Scheduler_EDF_SMP_Node *scheduled;
270
271    scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
272
273    if ( scheduled->ready_queue_index == 0 ) {
274      _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
275    }
276  }
277}
278
279static inline void _Scheduler_EDF_SMP_Extract_from_ready(
280  Scheduler_Context *context,
281  Scheduler_Node    *node_to_extract
282)
283{
284  Scheduler_EDF_SMP_Context     *self;
285  Scheduler_EDF_SMP_Node        *node;
286  uint32_t                       rqi;
287  Scheduler_EDF_SMP_Ready_queue *ready_queue;
288
289  self = _Scheduler_EDF_SMP_Get_self( context );
290  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
291  rqi = node->ready_queue_index;
292  ready_queue = &self->Ready[ rqi ];
293
294  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
295  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
296
297  if (
298    rqi != 0
299      && _RBTree_Is_empty( &ready_queue->Queue )
300      && !_Chain_Is_node_off_chain( &ready_queue->Node )
301  ) {
302    _Chain_Extract_unprotected( &ready_queue->Node );
303    _Chain_Set_off_chain( &ready_queue->Node );
304  }
305}
306
307static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
308  Scheduler_Context *context,
309  Scheduler_Node    *scheduled_to_ready
310)
311{
312  Priority_Control insert_priority;
313
314  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
315  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
316  _Scheduler_EDF_SMP_Insert_ready(
317    context,
318    scheduled_to_ready,
319    insert_priority
320  );
321}
322
323static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
324  Scheduler_Context *context,
325  Scheduler_Node    *ready_to_scheduled
326)
327{
328  Priority_Control insert_priority;
329
330  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
331  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
332  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
333  _Scheduler_SMP_Insert_scheduled(
334    context,
335    ready_to_scheduled,
336    insert_priority
337  );
338}
339
340static inline void _Scheduler_EDF_SMP_Allocate_processor(
341  Scheduler_Context *context,
342  Scheduler_Node    *scheduled_base,
343  Scheduler_Node    *victim_base,
344  Per_CPU_Control   *victim_cpu
345)
346{
347  Scheduler_EDF_SMP_Context     *self;
348  Scheduler_EDF_SMP_Node        *scheduled;
349  uint32_t                       rqi;
350
351  (void) victim_base;
352  self = _Scheduler_EDF_SMP_Get_self( context );
353  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
354  rqi = scheduled->ready_queue_index;
355
356  if ( rqi != 0 ) {
357    Scheduler_EDF_SMP_Ready_queue *ready_queue;
358    Per_CPU_Control               *desired_cpu;
359
360    ready_queue = &self->Ready[ rqi ];
361
362    if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
363      _Chain_Extract_unprotected( &ready_queue->Node );
364      _Chain_Set_off_chain( &ready_queue->Node );
365    }
366
367    desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
368
369    if ( victim_cpu != desired_cpu ) {
370      Scheduler_EDF_SMP_Node *node;
371
372      node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
373      _Assert( node->ready_queue_index == 0 );
374      _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
375      _Scheduler_SMP_Allocate_processor_exact(
376        context,
377        &node->Base.Base,
378        NULL,
379        victim_cpu
380      );
381      victim_cpu = desired_cpu;
382    }
383  }
384
385  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
386  _Scheduler_SMP_Allocate_processor_exact(
387    context,
388    &scheduled->Base.Base,
389    NULL,
390    victim_cpu
391  );
392}
393
394void _Scheduler_EDF_SMP_Block(
395  const Scheduler_Control *scheduler,
396  Thread_Control          *thread,
397  Scheduler_Node          *node
398)
399{
400  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
401
402  _Scheduler_SMP_Block(
403    context,
404    thread,
405    node,
406    _Scheduler_SMP_Extract_from_scheduled,
407    _Scheduler_EDF_SMP_Extract_from_ready,
408    _Scheduler_EDF_SMP_Get_highest_ready,
409    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
410    _Scheduler_EDF_SMP_Allocate_processor
411  );
412}
413
414static inline bool _Scheduler_EDF_SMP_Enqueue(
415  Scheduler_Context *context,
416  Scheduler_Node    *node,
417  Priority_Control   insert_priority
418)
419{
420  return _Scheduler_SMP_Enqueue(
421    context,
422    node,
423    insert_priority,
424    _Scheduler_SMP_Priority_less_equal,
425    _Scheduler_EDF_SMP_Insert_ready,
426    _Scheduler_SMP_Insert_scheduled,
427    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
428    _Scheduler_EDF_SMP_Get_lowest_scheduled,
429    _Scheduler_EDF_SMP_Allocate_processor
430  );
431}
432
433static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
434  Scheduler_Context *context,
435  Scheduler_Node    *node,
436  Priority_Control   insert_priority
437)
438{
439  return _Scheduler_SMP_Enqueue_scheduled(
440    context,
441    node,
442    insert_priority,
443    _Scheduler_SMP_Priority_less_equal,
444    _Scheduler_EDF_SMP_Extract_from_ready,
445    _Scheduler_EDF_SMP_Get_highest_ready,
446    _Scheduler_EDF_SMP_Insert_ready,
447    _Scheduler_SMP_Insert_scheduled,
448    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
449    _Scheduler_EDF_SMP_Allocate_processor
450  );
451}
452
453void _Scheduler_EDF_SMP_Unblock(
454  const Scheduler_Control *scheduler,
455  Thread_Control          *thread,
456  Scheduler_Node          *node
457)
458{
459  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
460
461  _Scheduler_SMP_Unblock(
462    context,
463    thread,
464    node,
465    _Scheduler_EDF_SMP_Do_update,
466    _Scheduler_EDF_SMP_Enqueue
467  );
468}
469
470static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
471  Scheduler_Context *context,
472  Thread_Control    *the_thread,
473  Scheduler_Node    *node
474)
475{
476  return _Scheduler_SMP_Ask_for_help(
477    context,
478    the_thread,
479    node,
480    _Scheduler_SMP_Priority_less_equal,
481    _Scheduler_EDF_SMP_Insert_ready,
482    _Scheduler_SMP_Insert_scheduled,
483    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
484    _Scheduler_EDF_SMP_Get_lowest_scheduled,
485    _Scheduler_EDF_SMP_Allocate_processor
486  );
487}
488
489void _Scheduler_EDF_SMP_Update_priority(
490  const Scheduler_Control *scheduler,
491  Thread_Control          *thread,
492  Scheduler_Node          *node
493)
494{
495  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
496
497  _Scheduler_SMP_Update_priority(
498    context,
499    thread,
500    node,
501    _Scheduler_EDF_SMP_Extract_from_ready,
502    _Scheduler_EDF_SMP_Do_update,
503    _Scheduler_EDF_SMP_Enqueue,
504    _Scheduler_EDF_SMP_Enqueue_scheduled,
505    _Scheduler_EDF_SMP_Do_ask_for_help
506  );
507}
508
509bool _Scheduler_EDF_SMP_Ask_for_help(
510  const Scheduler_Control *scheduler,
511  Thread_Control          *the_thread,
512  Scheduler_Node          *node
513)
514{
515  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
516
517  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
518}
519
520void _Scheduler_EDF_SMP_Reconsider_help_request(
521  const Scheduler_Control *scheduler,
522  Thread_Control          *the_thread,
523  Scheduler_Node          *node
524)
525{
526  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
527
528  _Scheduler_SMP_Reconsider_help_request(
529    context,
530    the_thread,
531    node,
532    _Scheduler_EDF_SMP_Extract_from_ready
533  );
534}
535
536void _Scheduler_EDF_SMP_Withdraw_node(
537  const Scheduler_Control *scheduler,
538  Thread_Control          *the_thread,
539  Scheduler_Node          *node,
540  Thread_Scheduler_state   next_state
541)
542{
543  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
544
545  _Scheduler_SMP_Withdraw_node(
546    context,
547    the_thread,
548    node,
549    next_state,
550    _Scheduler_EDF_SMP_Extract_from_ready,
551    _Scheduler_EDF_SMP_Get_highest_ready,
552    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
553    _Scheduler_EDF_SMP_Allocate_processor
554  );
555}
556
557static inline void _Scheduler_EDF_SMP_Register_idle(
558  Scheduler_Context *context,
559  Scheduler_Node    *idle_base,
560  Per_CPU_Control   *cpu
561)
562{
563  Scheduler_EDF_SMP_Context *self;
564  Scheduler_EDF_SMP_Node    *idle;
565
566  self = _Scheduler_EDF_SMP_Get_self( context );
567  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
568  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
569}
570
571void _Scheduler_EDF_SMP_Add_processor(
572  const Scheduler_Control *scheduler,
573  Thread_Control          *idle
574)
575{
576  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
577
578  _Scheduler_SMP_Add_processor(
579    context,
580    idle,
581    _Scheduler_EDF_SMP_Has_ready,
582    _Scheduler_EDF_SMP_Enqueue_scheduled,
583    _Scheduler_EDF_SMP_Register_idle
584  );
585}
586
587Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
588  const Scheduler_Control *scheduler,
589  Per_CPU_Control         *cpu
590)
591{
592  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
593
594  return _Scheduler_SMP_Remove_processor(
595    context,
596    cpu,
597    _Scheduler_EDF_SMP_Extract_from_ready,
598    _Scheduler_EDF_SMP_Enqueue
599  );
600}
601
602void _Scheduler_EDF_SMP_Yield(
603  const Scheduler_Control *scheduler,
604  Thread_Control          *thread,
605  Scheduler_Node          *node
606)
607{
608  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
609
610  _Scheduler_SMP_Yield(
611    context,
612    thread,
613    node,
614    _Scheduler_EDF_SMP_Extract_from_ready,
615    _Scheduler_EDF_SMP_Enqueue,
616    _Scheduler_EDF_SMP_Enqueue_scheduled
617  );
618}
619
620static inline void _Scheduler_EDF_SMP_Do_set_affinity(
621  Scheduler_Context *context,
622  Scheduler_Node    *node_base,
623  void              *arg
624)
625{
626  Scheduler_EDF_SMP_Node *node;
627  const uint32_t         *rqi;
628
629  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
630  rqi = arg;
631  node->ready_queue_index = *rqi;
632}
633
634void _Scheduler_EDF_SMP_Start_idle(
635  const Scheduler_Control *scheduler,
636  Thread_Control          *idle,
637  Per_CPU_Control         *cpu
638)
639{
640  Scheduler_Context *context;
641
642  context = _Scheduler_Get_context( scheduler );
643
644  _Scheduler_SMP_Do_start_idle(
645    context,
646    idle,
647    cpu,
648    _Scheduler_EDF_SMP_Register_idle
649  );
650}
651
652bool _Scheduler_EDF_SMP_Set_affinity(
653  const Scheduler_Control *scheduler,
654  Thread_Control          *thread,
655  Scheduler_Node          *node,
656  const Processor_mask    *affinity
657)
658{
659  Scheduler_Context *context;
660  Processor_mask     local_affinity;
661  uint32_t           rqi;
662
663  context = _Scheduler_Get_context( scheduler );
664  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
665
666  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
667    return false;
668  }
669
670  if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
671    rqi = 0;
672  } else {
673    rqi = _Processor_mask_Find_last_set( &local_affinity );
674  }
675
676  _Scheduler_SMP_Set_affinity(
677    context,
678    thread,
679    node,
680    &rqi,
681    _Scheduler_EDF_SMP_Do_set_affinity,
682    _Scheduler_EDF_SMP_Extract_from_ready,
683    _Scheduler_EDF_SMP_Get_highest_ready,
684    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
685    _Scheduler_EDF_SMP_Enqueue,
686    _Scheduler_EDF_SMP_Allocate_processor
687  );
688
689  return true;
690}
Note: See TracBrowser for help on using the repository browser.