source: rtems/cpukit/score/src/scheduleredfsmp.c @ 4edcede7

5
Last change on this file since 4edcede7 was 4edcede7, checked in by Sebastian Huber <sebastian.huber@…>, on 10/27/17 at 07:03:31

score: Simplify SMP get lowest scheduled

There is no need to pass in the order relation since the scheduled
threads reside on an already ordered chain. The caller will decide what
to do with the lowest scheduled thread.

  • Property mode set to 100644
File size: 19.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief EDF SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMPEDF
7 */
8
9/*
10 * Copyright (c) 2017 embedded brains GmbH.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/scheduleredfsmp.h>
22#include <rtems/score/schedulersmpimpl.h>
23
24static inline Scheduler_EDF_SMP_Context *
25_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
26{
27  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
28}
29
30static inline Scheduler_EDF_SMP_Context *
31_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
32{
33  return (Scheduler_EDF_SMP_Context *) context;
34}
35
36static inline Scheduler_EDF_SMP_Node *
37_Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
38{
39  return (Scheduler_EDF_SMP_Node *) node;
40}
41
42static inline bool _Scheduler_EDF_SMP_Less(
43  const void        *left,
44  const RBTree_Node *right
45)
46{
47  const Priority_Control   *the_left;
48  const Scheduler_SMP_Node *the_right;
49  Priority_Control          prio_left;
50  Priority_Control          prio_right;
51
52  the_left = left;
53  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
54
55  prio_left = *the_left;
56  prio_right = the_right->priority;
57
58  return prio_left < prio_right;
59}
60
61static inline bool _Scheduler_EDF_SMP_Less_or_equal(
62  const void        *left,
63  const RBTree_Node *right
64)
65{
66  const Priority_Control   *the_left;
67  const Scheduler_SMP_Node *the_right;
68  Priority_Control          prio_left;
69  Priority_Control          prio_right;
70
71  the_left = left;
72  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
73
74  prio_left = *the_left;
75  prio_right = the_right->priority;
76
77  return prio_left <= prio_right;
78}
79
80void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
81{
82  Scheduler_EDF_SMP_Context *self =
83    _Scheduler_EDF_SMP_Get_context( scheduler );
84
85  _Scheduler_SMP_Initialize( &self->Base );
86  _Chain_Initialize_empty( &self->Affine_queues );
87  /* The ready queues are zero initialized and thus empty */
88}
89
90void _Scheduler_EDF_SMP_Node_initialize(
91  const Scheduler_Control *scheduler,
92  Scheduler_Node          *node,
93  Thread_Control          *the_thread,
94  Priority_Control         priority
95)
96{
97  Scheduler_SMP_Node *smp_node;
98
99  smp_node = _Scheduler_SMP_Node_downcast( node );
100  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
101}
102
103static inline void _Scheduler_EDF_SMP_Do_update(
104  Scheduler_Context *context,
105  Scheduler_Node    *node,
106  Priority_Control   new_priority
107)
108{
109  Scheduler_SMP_Node *smp_node;
110
111  (void) context;
112
113  smp_node = _Scheduler_SMP_Node_downcast( node );
114  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
115}
116
117static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
118{
119  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
120
121  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
122}
123
124static inline bool _Scheduler_EDF_SMP_Overall_less(
125  const Scheduler_EDF_SMP_Node *left,
126  const Scheduler_EDF_SMP_Node *right
127)
128{
129  Priority_Control lp;
130  Priority_Control rp;
131
132  lp = left->Base.priority;
133  rp = right->Base.priority;
134
135  return lp < rp || (lp == rp && left->generation < right->generation );
136}
137
138static inline Scheduler_EDF_SMP_Node *
139_Scheduler_EDF_SMP_Challenge_highest_ready(
140  Scheduler_EDF_SMP_Context *self,
141  Scheduler_EDF_SMP_Node    *highest_ready,
142  RBTree_Control            *ready_queue
143)
144{
145  Scheduler_EDF_SMP_Node *other;
146
147  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
148  _Assert( other != NULL );
149
150  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
151    return other;
152  }
153
154  return highest_ready;
155}
156
157static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
158  Scheduler_Context *context,
159  Scheduler_Node    *filter
160)
161{
162  Scheduler_EDF_SMP_Context *self;
163  Scheduler_EDF_SMP_Node    *highest_ready;
164  Scheduler_EDF_SMP_Node    *node;
165  uint32_t                   rqi;
166  const Chain_Node          *tail;
167  Chain_Node                *next;
168
169  self = _Scheduler_EDF_SMP_Get_self( context );
170  highest_ready = (Scheduler_EDF_SMP_Node *)
171    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
172  _Assert( highest_ready != NULL );
173
174  /*
175   * The filter node is a scheduled node which is no longer on the scheduled
176   * chain.  In case this is an affine thread, then we have to check the
177   * corresponding affine ready queue.
178   */
179
180  node = (Scheduler_EDF_SMP_Node *) filter;
181  rqi = node->ready_queue_index;
182
183  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
184    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
185      self,
186      highest_ready,
187      &self->Ready[ rqi ].Queue
188    );
189  }
190
191  tail = _Chain_Immutable_tail( &self->Affine_queues );
192  next = _Chain_First( &self->Affine_queues );
193
194  while ( next != tail ) {
195    Scheduler_EDF_SMP_Ready_queue *ready_queue;
196
197    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
198    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
199      self,
200      highest_ready,
201      &ready_queue->Queue
202    );
203
204    next = _Chain_Next( next );
205  }
206
207  return &highest_ready->Base.Base;
208}
209
210static inline void _Scheduler_EDF_SMP_Set_scheduled(
211  Scheduler_EDF_SMP_Context *self,
212  Scheduler_EDF_SMP_Node    *scheduled,
213  const Per_CPU_Control     *cpu
214)
215{
216  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
217}
218
219static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
220  const Scheduler_EDF_SMP_Context *self,
221  uint32_t                         rqi
222)
223{
224  return self->Ready[ rqi ].scheduled;
225}
226
227static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
228  Scheduler_Context *context,
229  Scheduler_Node    *filter_base
230)
231{
232  Scheduler_EDF_SMP_Node *filter;
233  uint32_t                rqi;
234
235  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
236  rqi = filter->ready_queue_index;
237
238  if ( rqi != 0 ) {
239    Scheduler_EDF_SMP_Context *self;
240    Scheduler_EDF_SMP_Node    *node;
241
242    self = _Scheduler_EDF_SMP_Get_self( context );
243    node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
244
245    if ( node->ready_queue_index > 0 ) {
246      _Assert( node->ready_queue_index == rqi );
247      return &node->Base.Base;
248    }
249  }
250
251  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
252}
253
254static inline void _Scheduler_EDF_SMP_Insert_ready(
255  Scheduler_Context *context,
256  Scheduler_Node    *node_base,
257  size_t             generation_index,
258  int                increment,
259  bool            ( *less )( const void *, const RBTree_Node * )
260)
261{
262  Scheduler_EDF_SMP_Context     *self;
263  Scheduler_EDF_SMP_Node        *node;
264  uint32_t                       rqi;
265  Scheduler_EDF_SMP_Ready_queue *ready_queue;
266  int64_t                        generation;
267
268  self = _Scheduler_EDF_SMP_Get_self( context );
269  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
270  rqi = node->ready_queue_index;
271  ready_queue = &self->Ready[ rqi ];
272
273  generation = self->generations[ generation_index ];
274  node->generation = generation;
275  self->generations[ generation_index ] = generation + increment;
276
277  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
278  _RBTree_Insert_inline(
279    &ready_queue->Queue,
280    &node->Base.Base.Node.RBTree,
281    &node->Base.priority,
282    less
283  );
284
285  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
286    Scheduler_EDF_SMP_Node *scheduled;
287
288    scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
289
290    if ( scheduled->ready_queue_index == 0 ) {
291      _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
292    }
293  }
294}
295
296static inline void _Scheduler_EDF_SMP_Extract_from_ready(
297  Scheduler_Context *context,
298  Scheduler_Node    *node_to_extract
299)
300{
301  Scheduler_EDF_SMP_Context     *self;
302  Scheduler_EDF_SMP_Node        *node;
303  uint32_t                       rqi;
304  Scheduler_EDF_SMP_Ready_queue *ready_queue;
305
306  self = _Scheduler_EDF_SMP_Get_self( context );
307  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
308  rqi = node->ready_queue_index;
309  ready_queue = &self->Ready[ rqi ];
310
311  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
312  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
313
314  if (
315    rqi != 0
316      && _RBTree_Is_empty( &ready_queue->Queue )
317      && !_Chain_Is_node_off_chain( &ready_queue->Node )
318  ) {
319    _Chain_Extract_unprotected( &ready_queue->Node );
320    _Chain_Set_off_chain( &ready_queue->Node );
321  }
322}
323
324static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
325  Scheduler_Context *context,
326  Scheduler_Node    *scheduled_to_ready
327)
328{
329  _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
330  _Scheduler_EDF_SMP_Insert_ready(
331    context,
332    scheduled_to_ready,
333    0,
334    1,
335    _Scheduler_EDF_SMP_Less
336  );
337}
338
339static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
340  Scheduler_Context *context,
341  Scheduler_Node    *ready_to_scheduled
342)
343{
344  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
345  _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled );
346}
347
348static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
349  Scheduler_Context *context,
350  Scheduler_Node    *node_to_insert
351)
352{
353  _Scheduler_EDF_SMP_Insert_ready(
354    context,
355    node_to_insert,
356    1,
357    -1,
358    _Scheduler_EDF_SMP_Less_or_equal
359  );
360}
361
362static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
363  Scheduler_Context *context,
364  Scheduler_Node    *node_to_insert
365)
366{
367  _Scheduler_EDF_SMP_Insert_ready(
368    context,
369    node_to_insert,
370    0,
371    1,
372    _Scheduler_EDF_SMP_Less
373  );
374}
375
376static inline void _Scheduler_EDF_SMP_Allocate_processor(
377  Scheduler_Context *context,
378  Scheduler_Node    *scheduled_base,
379  Scheduler_Node    *victim_base,
380  Per_CPU_Control   *victim_cpu
381)
382{
383  Scheduler_EDF_SMP_Context     *self;
384  Scheduler_EDF_SMP_Node        *scheduled;
385  uint32_t                       rqi;
386
387  (void) victim_base;
388  self = _Scheduler_EDF_SMP_Get_self( context );
389  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
390  rqi = scheduled->ready_queue_index;
391
392  if ( rqi != 0 ) {
393    Scheduler_EDF_SMP_Ready_queue *ready_queue;
394    Per_CPU_Control               *desired_cpu;
395
396    ready_queue = &self->Ready[ rqi ];
397
398    if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
399      _Chain_Extract_unprotected( &ready_queue->Node );
400      _Chain_Set_off_chain( &ready_queue->Node );
401    }
402
403    desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
404
405    if ( victim_cpu != desired_cpu ) {
406      Scheduler_EDF_SMP_Node *node;
407
408      node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
409      _Assert( node->ready_queue_index == 0 );
410      _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
411      _Scheduler_SMP_Allocate_processor_exact(
412        context,
413        &node->Base.Base,
414        NULL,
415        victim_cpu
416      );
417      victim_cpu = desired_cpu;
418    }
419  }
420
421  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
422  _Scheduler_SMP_Allocate_processor_exact(
423    context,
424    &scheduled->Base.Base,
425    NULL,
426    victim_cpu
427  );
428}
429
430void _Scheduler_EDF_SMP_Block(
431  const Scheduler_Control *scheduler,
432  Thread_Control          *thread,
433  Scheduler_Node          *node
434)
435{
436  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
437
438  _Scheduler_SMP_Block(
439    context,
440    thread,
441    node,
442    _Scheduler_EDF_SMP_Extract_from_ready,
443    _Scheduler_EDF_SMP_Get_highest_ready,
444    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
445    _Scheduler_EDF_SMP_Allocate_processor
446  );
447}
448
449static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
450  Scheduler_Context    *context,
451  Scheduler_Node       *node,
452  Chain_Node_order      order,
453  Scheduler_SMP_Insert  insert_ready,
454  Scheduler_SMP_Insert  insert_scheduled
455)
456{
457  return _Scheduler_SMP_Enqueue_ordered(
458    context,
459    node,
460    order,
461    insert_ready,
462    insert_scheduled,
463    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
464    _Scheduler_EDF_SMP_Get_lowest_scheduled,
465    _Scheduler_EDF_SMP_Allocate_processor
466  );
467}
468
469static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
470  Scheduler_Context *context,
471  Scheduler_Node    *node
472)
473{
474  return _Scheduler_EDF_SMP_Enqueue_ordered(
475    context,
476    node,
477    _Scheduler_SMP_Insert_priority_lifo_order,
478    _Scheduler_EDF_SMP_Insert_ready_lifo,
479    _Scheduler_SMP_Insert_scheduled_lifo
480  );
481}
482
483static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
484  Scheduler_Context *context,
485  Scheduler_Node    *node
486)
487{
488  return _Scheduler_EDF_SMP_Enqueue_ordered(
489    context,
490    node,
491    _Scheduler_SMP_Insert_priority_fifo_order,
492    _Scheduler_EDF_SMP_Insert_ready_fifo,
493    _Scheduler_SMP_Insert_scheduled_fifo
494  );
495}
496
497static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
498  Scheduler_Context *context,
499  Scheduler_Node *node,
500  Chain_Node_order order,
501  Scheduler_SMP_Insert insert_ready,
502  Scheduler_SMP_Insert insert_scheduled
503)
504{
505  return _Scheduler_SMP_Enqueue_scheduled_ordered(
506    context,
507    node,
508    order,
509    _Scheduler_EDF_SMP_Extract_from_ready,
510    _Scheduler_EDF_SMP_Get_highest_ready,
511    insert_ready,
512    insert_scheduled,
513    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
514    _Scheduler_EDF_SMP_Allocate_processor
515  );
516}
517
518static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
519  Scheduler_Context *context,
520  Scheduler_Node *node
521)
522{
523  return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
524    context,
525    node,
526    _Scheduler_SMP_Insert_priority_lifo_order,
527    _Scheduler_EDF_SMP_Insert_ready_lifo,
528    _Scheduler_SMP_Insert_scheduled_lifo
529  );
530}
531
532static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
533  Scheduler_Context *context,
534  Scheduler_Node *node
535)
536{
537  return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
538    context,
539    node,
540    _Scheduler_SMP_Insert_priority_fifo_order,
541    _Scheduler_EDF_SMP_Insert_ready_fifo,
542    _Scheduler_SMP_Insert_scheduled_fifo
543  );
544}
545
546void _Scheduler_EDF_SMP_Unblock(
547  const Scheduler_Control *scheduler,
548  Thread_Control          *thread,
549  Scheduler_Node          *node
550)
551{
552  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
553
554  _Scheduler_SMP_Unblock(
555    context,
556    thread,
557    node,
558    _Scheduler_EDF_SMP_Do_update,
559    _Scheduler_EDF_SMP_Enqueue_fifo
560  );
561}
562
563static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
564  Scheduler_Context *context,
565  Thread_Control    *the_thread,
566  Scheduler_Node    *node
567)
568{
569  return _Scheduler_SMP_Ask_for_help(
570    context,
571    the_thread,
572    node,
573    _Scheduler_SMP_Insert_priority_lifo_order,
574    _Scheduler_EDF_SMP_Insert_ready_lifo,
575    _Scheduler_SMP_Insert_scheduled_lifo,
576    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
577    _Scheduler_EDF_SMP_Get_lowest_scheduled,
578    _Scheduler_EDF_SMP_Allocate_processor
579  );
580}
581
582void _Scheduler_EDF_SMP_Update_priority(
583  const Scheduler_Control *scheduler,
584  Thread_Control          *thread,
585  Scheduler_Node          *node
586)
587{
588  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
589
590  _Scheduler_SMP_Update_priority(
591    context,
592    thread,
593    node,
594    _Scheduler_EDF_SMP_Extract_from_ready,
595    _Scheduler_EDF_SMP_Do_update,
596    _Scheduler_EDF_SMP_Enqueue_fifo,
597    _Scheduler_EDF_SMP_Enqueue_lifo,
598    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
599    _Scheduler_EDF_SMP_Enqueue_scheduled_lifo,
600    _Scheduler_EDF_SMP_Do_ask_for_help
601  );
602}
603
604bool _Scheduler_EDF_SMP_Ask_for_help(
605  const Scheduler_Control *scheduler,
606  Thread_Control          *the_thread,
607  Scheduler_Node          *node
608)
609{
610  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
611
612  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
613}
614
615void _Scheduler_EDF_SMP_Reconsider_help_request(
616  const Scheduler_Control *scheduler,
617  Thread_Control          *the_thread,
618  Scheduler_Node          *node
619)
620{
621  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
622
623  _Scheduler_SMP_Reconsider_help_request(
624    context,
625    the_thread,
626    node,
627    _Scheduler_EDF_SMP_Extract_from_ready
628  );
629}
630
631void _Scheduler_EDF_SMP_Withdraw_node(
632  const Scheduler_Control *scheduler,
633  Thread_Control          *the_thread,
634  Scheduler_Node          *node,
635  Thread_Scheduler_state   next_state
636)
637{
638  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
639
640  _Scheduler_SMP_Withdraw_node(
641    context,
642    the_thread,
643    node,
644    next_state,
645    _Scheduler_EDF_SMP_Extract_from_ready,
646    _Scheduler_EDF_SMP_Get_highest_ready,
647    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
648    _Scheduler_EDF_SMP_Allocate_processor
649  );
650}
651
652static inline void _Scheduler_EDF_SMP_Register_idle(
653  Scheduler_Context *context,
654  Scheduler_Node    *idle_base,
655  Per_CPU_Control   *cpu
656)
657{
658  Scheduler_EDF_SMP_Context *self;
659  Scheduler_EDF_SMP_Node    *idle;
660
661  self = _Scheduler_EDF_SMP_Get_self( context );
662  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
663  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
664}
665
666void _Scheduler_EDF_SMP_Add_processor(
667  const Scheduler_Control *scheduler,
668  Thread_Control          *idle
669)
670{
671  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
672
673  _Scheduler_SMP_Add_processor(
674    context,
675    idle,
676    _Scheduler_EDF_SMP_Has_ready,
677    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
678    _Scheduler_EDF_SMP_Register_idle
679  );
680}
681
682Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
683  const Scheduler_Control *scheduler,
684  Per_CPU_Control         *cpu
685)
686{
687  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
688
689  return _Scheduler_SMP_Remove_processor(
690    context,
691    cpu,
692    _Scheduler_EDF_SMP_Extract_from_ready,
693    _Scheduler_EDF_SMP_Enqueue_fifo
694  );
695}
696
697void _Scheduler_EDF_SMP_Yield(
698  const Scheduler_Control *scheduler,
699  Thread_Control          *thread,
700  Scheduler_Node          *node
701)
702{
703  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
704
705  _Scheduler_SMP_Yield(
706    context,
707    thread,
708    node,
709    _Scheduler_EDF_SMP_Extract_from_ready,
710    _Scheduler_EDF_SMP_Enqueue_fifo,
711    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo
712  );
713}
714
715static inline void _Scheduler_EDF_SMP_Do_set_affinity(
716  Scheduler_Context *context,
717  Scheduler_Node    *node_base,
718  void              *arg
719)
720{
721  Scheduler_EDF_SMP_Node *node;
722  const uint32_t         *rqi;
723
724  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
725  rqi = arg;
726  node->ready_queue_index = *rqi;
727}
728
729void _Scheduler_EDF_SMP_Start_idle(
730  const Scheduler_Control *scheduler,
731  Thread_Control          *idle,
732  Per_CPU_Control         *cpu
733)
734{
735  Scheduler_Context *context;
736
737  context = _Scheduler_Get_context( scheduler );
738
739  _Scheduler_SMP_Do_start_idle(
740    context,
741    idle,
742    cpu,
743    _Scheduler_EDF_SMP_Register_idle
744  );
745}
746
747bool _Scheduler_EDF_SMP_Set_affinity(
748  const Scheduler_Control *scheduler,
749  Thread_Control          *thread,
750  Scheduler_Node          *node,
751  const Processor_mask    *affinity
752)
753{
754  Scheduler_Context *context;
755  Processor_mask     a;
756  Processor_mask     b;
757  uint32_t           rqi;
758
759  context = _Scheduler_Get_context( scheduler );
760  _Processor_mask_And( &a, &context->Processors, affinity );
761
762  if ( _Processor_mask_Count( &a ) == 0 ) {
763    return false;
764  }
765
766  _Processor_mask_And( &b, &_SMP_Online_processors, affinity );
767
768  if ( _Processor_mask_Count( &b ) == _SMP_Processor_count ) {
769    rqi = 0;
770  } else {
771    rqi = _Processor_mask_Find_last_set( &a );
772  }
773
774  _Scheduler_SMP_Set_affinity(
775    context,
776    thread,
777    node,
778    &rqi,
779    _Scheduler_EDF_SMP_Do_set_affinity,
780    _Scheduler_EDF_SMP_Extract_from_ready,
781    _Scheduler_EDF_SMP_Get_highest_ready,
782    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
783    _Scheduler_EDF_SMP_Enqueue_fifo,
784    _Scheduler_EDF_SMP_Allocate_processor
785  );
786
787  return true;
788}
Note: See TracBrowser for help on using the repository browser.