source: rtems/cpukit/score/src/scheduleredfsmp.c @ 34487537

5
Last change on this file since 34487537 was 34487537, checked in by Sebastian Huber <sebastian.huber@…>, on 07/04/17 at 07:57:30

score: Add simple affinity support to EDF SMP

Update #3059.

  • Property mode set to 100644
File size: 19.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief EDF SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMPEDF
7 */
8
9/*
10 * Copyright (c) 2017 embedded brains GmbH.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/scheduleredfsmp.h>
22#include <rtems/score/schedulersmpimpl.h>
23
24static inline Scheduler_EDF_SMP_Context *
25_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
26{
27  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
28}
29
30static inline Scheduler_EDF_SMP_Context *
31_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
32{
33  return (Scheduler_EDF_SMP_Context *) context;
34}
35
36static inline Scheduler_EDF_SMP_Node *
37_Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
38{
39  return (Scheduler_EDF_SMP_Node *) node;
40}
41
42static inline bool _Scheduler_EDF_SMP_Less(
43  const void        *left,
44  const RBTree_Node *right
45)
46{
47  const Priority_Control   *the_left;
48  const Scheduler_SMP_Node *the_right;
49  Priority_Control          prio_left;
50  Priority_Control          prio_right;
51
52  the_left = left;
53  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
54
55  prio_left = *the_left;
56  prio_right = the_right->priority;
57
58  return prio_left < prio_right;
59}
60
61static inline bool _Scheduler_EDF_SMP_Less_or_equal(
62  const void        *left,
63  const RBTree_Node *right
64)
65{
66  const Priority_Control   *the_left;
67  const Scheduler_SMP_Node *the_right;
68  Priority_Control          prio_left;
69  Priority_Control          prio_right;
70
71  the_left = left;
72  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
73
74  prio_left = *the_left;
75  prio_right = the_right->priority;
76
77  return prio_left <= prio_right;
78}
79
80void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
81{
82  Scheduler_EDF_SMP_Context *self =
83    _Scheduler_EDF_SMP_Get_context( scheduler );
84
85  _Scheduler_SMP_Initialize( &self->Base );
86  _Chain_Initialize_empty( &self->Affine_queues );
87  /* The ready queues are zero initialized and thus empty */
88}
89
90void _Scheduler_EDF_SMP_Node_initialize(
91  const Scheduler_Control *scheduler,
92  Scheduler_Node          *node,
93  Thread_Control          *the_thread,
94  Priority_Control         priority
95)
96{
97  Scheduler_SMP_Node *smp_node;
98
99  smp_node = _Scheduler_SMP_Node_downcast( node );
100  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
101}
102
103static inline void _Scheduler_EDF_SMP_Do_update(
104  Scheduler_Context *context,
105  Scheduler_Node    *node,
106  Priority_Control   new_priority
107)
108{
109  Scheduler_SMP_Node *smp_node;
110
111  (void) context;
112
113  smp_node = _Scheduler_SMP_Node_downcast( node );
114  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
115}
116
117static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
118{
119  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
120
121  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
122}
123
124static inline bool _Scheduler_EDF_SMP_Overall_less(
125  const Scheduler_EDF_SMP_Node *left,
126  const Scheduler_EDF_SMP_Node *right
127)
128{
129  Priority_Control lp;
130  Priority_Control rp;
131
132  lp = left->Base.priority;
133  rp = right->Base.priority;
134
135  return lp < rp || (lp == rp && left->generation < right->generation );
136}
137
138static inline Scheduler_EDF_SMP_Node *
139_Scheduler_EDF_SMP_Challenge_highest_ready(
140  Scheduler_EDF_SMP_Context *self,
141  Scheduler_EDF_SMP_Node    *highest_ready,
142  RBTree_Control            *ready_queue
143)
144{
145  Scheduler_EDF_SMP_Node *other;
146
147  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
148  _Assert( other != NULL );
149
150  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
151    return other;
152  }
153
154  return highest_ready;
155}
156
157static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
158  Scheduler_Context *context,
159  Scheduler_Node    *filter
160)
161{
162  Scheduler_EDF_SMP_Context *self;
163  Scheduler_EDF_SMP_Node    *highest_ready;
164  Scheduler_EDF_SMP_Node    *node;
165  uint32_t                   rqi;
166  const Chain_Node          *tail;
167  Chain_Node                *next;
168
169  self = _Scheduler_EDF_SMP_Get_self( context );
170  highest_ready = (Scheduler_EDF_SMP_Node *)
171    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
172  _Assert( highest_ready != NULL );
173
174  /*
175   * The filter node is a scheduled node which is no longer on the scheduled
176   * chain.  In case this is an affine thread, then we have to check the
177   * corresponding affine ready queue.
178   */
179
180  node = (Scheduler_EDF_SMP_Node *) filter;
181  rqi = node->ready_queue_index;
182
183  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
184    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
185      self,
186      highest_ready,
187      &self->Ready[ rqi ].Queue
188    );
189  }
190
191  tail = _Chain_Immutable_tail( &self->Affine_queues );
192  next = _Chain_First( &self->Affine_queues );
193
194  while ( next != tail ) {
195    Scheduler_EDF_SMP_Ready_queue *ready_queue;
196
197    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
198    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
199      self,
200      highest_ready,
201      &ready_queue->Queue
202    );
203
204    next = _Chain_Next( next );
205  }
206
207  return &highest_ready->Base.Base;
208}
209
210static inline void _Scheduler_EDF_SMP_Set_scheduled(
211  Scheduler_EDF_SMP_Context *self,
212  Scheduler_EDF_SMP_Node    *scheduled,
213  const Per_CPU_Control     *cpu
214)
215{
216  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
217}
218
219static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
220  const Scheduler_EDF_SMP_Context *self,
221  uint32_t                         rqi
222)
223{
224  return self->Ready[ rqi ].scheduled;
225}
226
227static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
228  Scheduler_Context *context,
229  Scheduler_Node    *filter_base,
230  Chain_Node_order   order
231)
232{
233  Scheduler_EDF_SMP_Node *filter;
234  uint32_t                rqi;
235
236  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
237  rqi = filter->ready_queue_index;
238
239  if ( rqi != 0 ) {
240    Scheduler_EDF_SMP_Context *self;
241    Scheduler_EDF_SMP_Node    *node;
242
243    self = _Scheduler_EDF_SMP_Get_self( context );
244    node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
245
246    if ( node->ready_queue_index > 0 ) {
247      _Assert( node->ready_queue_index == rqi );
248      return &node->Base.Base;
249    }
250  }
251
252  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base, order );
253}
254
255static inline void _Scheduler_EDF_SMP_Insert_ready(
256  Scheduler_Context *context,
257  Scheduler_Node    *node_base,
258  size_t             generation_index,
259  int                increment,
260  bool            ( *less )( const void *, const RBTree_Node * )
261)
262{
263  Scheduler_EDF_SMP_Context     *self;
264  Scheduler_EDF_SMP_Node        *node;
265  uint32_t                       rqi;
266  Scheduler_EDF_SMP_Ready_queue *ready_queue;
267  int64_t                        generation;
268
269  self = _Scheduler_EDF_SMP_Get_self( context );
270  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
271  rqi = node->ready_queue_index;
272  ready_queue = &self->Ready[ rqi ];
273
274  generation = self->generations[ generation_index ];
275  node->generation = generation;
276  self->generations[ generation_index ] = generation + increment;
277
278  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
279  _RBTree_Insert_inline(
280    &ready_queue->Queue,
281    &node->Base.Base.Node.RBTree,
282    &node->Base.priority,
283    less
284  );
285
286  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
287    Scheduler_EDF_SMP_Node *scheduled;
288
289    scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
290
291    if ( scheduled->ready_queue_index == 0 ) {
292      _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
293    }
294  }
295}
296
297static inline void _Scheduler_EDF_SMP_Extract_from_ready(
298  Scheduler_Context *context,
299  Scheduler_Node    *node_to_extract
300)
301{
302  Scheduler_EDF_SMP_Context     *self;
303  Scheduler_EDF_SMP_Node        *node;
304  uint32_t                       rqi;
305  Scheduler_EDF_SMP_Ready_queue *ready_queue;
306
307  self = _Scheduler_EDF_SMP_Get_self( context );
308  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
309  rqi = node->ready_queue_index;
310  ready_queue = &self->Ready[ rqi ];
311
312  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
313  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
314
315  if (
316    rqi != 0
317      && _RBTree_Is_empty( &ready_queue->Queue )
318      && !_Chain_Is_node_off_chain( &ready_queue->Node )
319  ) {
320    _Chain_Extract_unprotected( &ready_queue->Node );
321    _Chain_Set_off_chain( &ready_queue->Node );
322  }
323}
324
325static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
326  Scheduler_Context *context,
327  Scheduler_Node    *scheduled_to_ready
328)
329{
330  _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
331  _Scheduler_EDF_SMP_Insert_ready(
332    context,
333    scheduled_to_ready,
334    0,
335    1,
336    _Scheduler_EDF_SMP_Less
337  );
338}
339
340static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
341  Scheduler_Context *context,
342  Scheduler_Node    *ready_to_scheduled
343)
344{
345  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
346  _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled );
347}
348
349static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
350  Scheduler_Context *context,
351  Scheduler_Node    *node_to_insert
352)
353{
354  _Scheduler_EDF_SMP_Insert_ready(
355    context,
356    node_to_insert,
357    1,
358    -1,
359    _Scheduler_EDF_SMP_Less_or_equal
360  );
361}
362
363static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
364  Scheduler_Context *context,
365  Scheduler_Node    *node_to_insert
366)
367{
368  _Scheduler_EDF_SMP_Insert_ready(
369    context,
370    node_to_insert,
371    0,
372    1,
373    _Scheduler_EDF_SMP_Less
374  );
375}
376
377static inline void _Scheduler_EDF_SMP_Allocate_processor(
378  Scheduler_Context *context,
379  Scheduler_Node    *scheduled_base,
380  Scheduler_Node    *victim_base,
381  Per_CPU_Control   *victim_cpu
382)
383{
384  Scheduler_EDF_SMP_Context     *self;
385  Scheduler_EDF_SMP_Node        *scheduled;
386  uint32_t                       rqi;
387
388  (void) victim_base;
389  self = _Scheduler_EDF_SMP_Get_self( context );
390  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
391  rqi = scheduled->ready_queue_index;
392
393  if ( rqi != 0 ) {
394    Scheduler_EDF_SMP_Ready_queue *ready_queue;
395    Per_CPU_Control               *desired_cpu;
396
397    ready_queue = &self->Ready[ rqi ];
398
399    if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
400      _Chain_Extract_unprotected( &ready_queue->Node );
401      _Chain_Set_off_chain( &ready_queue->Node );
402    }
403
404    desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
405
406    if ( victim_cpu != desired_cpu ) {
407      Scheduler_EDF_SMP_Node *node;
408
409      node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
410      _Assert( node->ready_queue_index == 0 );
411      _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
412      _Scheduler_SMP_Allocate_processor_exact(
413        context,
414        &node->Base.Base,
415        NULL,
416        victim_cpu
417      );
418      victim_cpu = desired_cpu;
419    }
420  }
421
422  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
423  _Scheduler_SMP_Allocate_processor_exact(
424    context,
425    &scheduled->Base.Base,
426    NULL,
427    victim_cpu
428  );
429}
430
431void _Scheduler_EDF_SMP_Block(
432  const Scheduler_Control *scheduler,
433  Thread_Control          *thread,
434  Scheduler_Node          *node
435)
436{
437  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
438
439  _Scheduler_SMP_Block(
440    context,
441    thread,
442    node,
443    _Scheduler_EDF_SMP_Extract_from_ready,
444    _Scheduler_EDF_SMP_Get_highest_ready,
445    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
446    _Scheduler_EDF_SMP_Allocate_processor
447  );
448}
449
450static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
451  Scheduler_Context    *context,
452  Scheduler_Node       *node,
453  Chain_Node_order      order,
454  Scheduler_SMP_Insert  insert_ready,
455  Scheduler_SMP_Insert  insert_scheduled
456)
457{
458  return _Scheduler_SMP_Enqueue_ordered(
459    context,
460    node,
461    order,
462    insert_ready,
463    insert_scheduled,
464    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
465    _Scheduler_EDF_SMP_Get_lowest_scheduled,
466    _Scheduler_EDF_SMP_Allocate_processor
467  );
468}
469
470static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
471  Scheduler_Context *context,
472  Scheduler_Node    *node
473)
474{
475  return _Scheduler_EDF_SMP_Enqueue_ordered(
476    context,
477    node,
478    _Scheduler_SMP_Insert_priority_lifo_order,
479    _Scheduler_EDF_SMP_Insert_ready_lifo,
480    _Scheduler_SMP_Insert_scheduled_lifo
481  );
482}
483
484static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
485  Scheduler_Context *context,
486  Scheduler_Node    *node
487)
488{
489  return _Scheduler_EDF_SMP_Enqueue_ordered(
490    context,
491    node,
492    _Scheduler_SMP_Insert_priority_fifo_order,
493    _Scheduler_EDF_SMP_Insert_ready_fifo,
494    _Scheduler_SMP_Insert_scheduled_fifo
495  );
496}
497
498static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
499  Scheduler_Context *context,
500  Scheduler_Node *node,
501  Chain_Node_order order,
502  Scheduler_SMP_Insert insert_ready,
503  Scheduler_SMP_Insert insert_scheduled
504)
505{
506  return _Scheduler_SMP_Enqueue_scheduled_ordered(
507    context,
508    node,
509    order,
510    _Scheduler_EDF_SMP_Extract_from_ready,
511    _Scheduler_EDF_SMP_Get_highest_ready,
512    insert_ready,
513    insert_scheduled,
514    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
515    _Scheduler_EDF_SMP_Allocate_processor
516  );
517}
518
519static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
520  Scheduler_Context *context,
521  Scheduler_Node *node
522)
523{
524  return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
525    context,
526    node,
527    _Scheduler_SMP_Insert_priority_lifo_order,
528    _Scheduler_EDF_SMP_Insert_ready_lifo,
529    _Scheduler_SMP_Insert_scheduled_lifo
530  );
531}
532
533static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
534  Scheduler_Context *context,
535  Scheduler_Node *node
536)
537{
538  return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
539    context,
540    node,
541    _Scheduler_SMP_Insert_priority_fifo_order,
542    _Scheduler_EDF_SMP_Insert_ready_fifo,
543    _Scheduler_SMP_Insert_scheduled_fifo
544  );
545}
546
547void _Scheduler_EDF_SMP_Unblock(
548  const Scheduler_Control *scheduler,
549  Thread_Control          *thread,
550  Scheduler_Node          *node
551)
552{
553  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
554
555  _Scheduler_SMP_Unblock(
556    context,
557    thread,
558    node,
559    _Scheduler_EDF_SMP_Do_update,
560    _Scheduler_EDF_SMP_Enqueue_fifo
561  );
562}
563
564static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
565  Scheduler_Context *context,
566  Thread_Control    *the_thread,
567  Scheduler_Node    *node
568)
569{
570  return _Scheduler_SMP_Ask_for_help(
571    context,
572    the_thread,
573    node,
574    _Scheduler_SMP_Insert_priority_lifo_order,
575    _Scheduler_EDF_SMP_Insert_ready_lifo,
576    _Scheduler_SMP_Insert_scheduled_lifo,
577    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
578    _Scheduler_EDF_SMP_Get_lowest_scheduled,
579    _Scheduler_EDF_SMP_Allocate_processor
580  );
581}
582
583void _Scheduler_EDF_SMP_Update_priority(
584  const Scheduler_Control *scheduler,
585  Thread_Control          *thread,
586  Scheduler_Node          *node
587)
588{
589  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
590
591  _Scheduler_SMP_Update_priority(
592    context,
593    thread,
594    node,
595    _Scheduler_EDF_SMP_Extract_from_ready,
596    _Scheduler_EDF_SMP_Do_update,
597    _Scheduler_EDF_SMP_Enqueue_fifo,
598    _Scheduler_EDF_SMP_Enqueue_lifo,
599    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
600    _Scheduler_EDF_SMP_Enqueue_scheduled_lifo,
601    _Scheduler_EDF_SMP_Do_ask_for_help
602  );
603}
604
605bool _Scheduler_EDF_SMP_Ask_for_help(
606  const Scheduler_Control *scheduler,
607  Thread_Control          *the_thread,
608  Scheduler_Node          *node
609)
610{
611  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
612
613  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
614}
615
616void _Scheduler_EDF_SMP_Reconsider_help_request(
617  const Scheduler_Control *scheduler,
618  Thread_Control          *the_thread,
619  Scheduler_Node          *node
620)
621{
622  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
623
624  _Scheduler_SMP_Reconsider_help_request(
625    context,
626    the_thread,
627    node,
628    _Scheduler_EDF_SMP_Extract_from_ready
629  );
630}
631
632void _Scheduler_EDF_SMP_Withdraw_node(
633  const Scheduler_Control *scheduler,
634  Thread_Control          *the_thread,
635  Scheduler_Node          *node,
636  Thread_Scheduler_state   next_state
637)
638{
639  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
640
641  _Scheduler_SMP_Withdraw_node(
642    context,
643    the_thread,
644    node,
645    next_state,
646    _Scheduler_EDF_SMP_Extract_from_ready,
647    _Scheduler_EDF_SMP_Get_highest_ready,
648    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
649    _Scheduler_EDF_SMP_Allocate_processor
650  );
651}
652
653static inline void _Scheduler_EDF_SMP_Register_idle(
654  Scheduler_Context *context,
655  Scheduler_Node    *idle_base,
656  Per_CPU_Control   *cpu
657)
658{
659  Scheduler_EDF_SMP_Context *self;
660  Scheduler_EDF_SMP_Node    *idle;
661
662  self = _Scheduler_EDF_SMP_Get_self( context );
663  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
664  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
665}
666
667void _Scheduler_EDF_SMP_Add_processor(
668  const Scheduler_Control *scheduler,
669  Thread_Control          *idle
670)
671{
672  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
673
674  _Scheduler_SMP_Add_processor(
675    context,
676    idle,
677    _Scheduler_EDF_SMP_Has_ready,
678    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
679    _Scheduler_EDF_SMP_Register_idle
680  );
681}
682
683Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
684  const Scheduler_Control *scheduler,
685  Per_CPU_Control         *cpu
686)
687{
688  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
689
690  return _Scheduler_SMP_Remove_processor(
691    context,
692    cpu,
693    _Scheduler_EDF_SMP_Extract_from_ready,
694    _Scheduler_EDF_SMP_Enqueue_fifo
695  );
696}
697
698void _Scheduler_EDF_SMP_Yield(
699  const Scheduler_Control *scheduler,
700  Thread_Control          *thread,
701  Scheduler_Node          *node
702)
703{
704  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
705
706  _Scheduler_SMP_Yield(
707    context,
708    thread,
709    node,
710    _Scheduler_EDF_SMP_Extract_from_ready,
711    _Scheduler_EDF_SMP_Enqueue_fifo,
712    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo
713  );
714}
715
716static inline void _Scheduler_EDF_SMP_Do_set_affinity(
717  Scheduler_Context *context,
718  Scheduler_Node    *node_base,
719  void              *arg
720)
721{
722  Scheduler_EDF_SMP_Node *node;
723  const uint32_t         *rqi;
724
725  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
726  rqi = arg;
727  node->ready_queue_index = *rqi;
728}
729
730void _Scheduler_EDF_SMP_Start_idle(
731  const Scheduler_Control *scheduler,
732  Thread_Control          *idle,
733  Per_CPU_Control         *cpu
734)
735{
736  Scheduler_Context *context;
737
738  context = _Scheduler_Get_context( scheduler );
739
740  _Scheduler_SMP_Do_start_idle(
741    context,
742    idle,
743    cpu,
744    _Scheduler_EDF_SMP_Register_idle
745  );
746}
747
748bool _Scheduler_EDF_SMP_Set_affinity(
749  const Scheduler_Control *scheduler,
750  Thread_Control          *thread,
751  Scheduler_Node          *node,
752  const Processor_mask    *affinity
753)
754{
755  Scheduler_Context *context;
756  Processor_mask     a;
757  Processor_mask     b;
758  uint32_t           rqi;
759
760  context = _Scheduler_Get_context( scheduler );
761  _Processor_mask_And( &a, &context->Processors, affinity );
762
763  if ( _Processor_mask_Count( &a ) == 0 ) {
764    return false;
765  }
766
767  _Processor_mask_And( &b, &_SMP_Online_processors, affinity );
768
769  if ( _Processor_mask_Count( &b ) == _SMP_Processor_count ) {
770    rqi = 0;
771  } else {
772    rqi = _Processor_mask_Find_last_set( &a );
773  }
774
775  _Scheduler_SMP_Set_affinity(
776    context,
777    thread,
778    node,
779    &rqi,
780    _Scheduler_EDF_SMP_Do_set_affinity,
781    _Scheduler_EDF_SMP_Extract_from_ready,
782    _Scheduler_EDF_SMP_Get_highest_ready,
783    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
784    _Scheduler_EDF_SMP_Enqueue_fifo,
785    _Scheduler_EDF_SMP_Allocate_processor
786  );
787
788  return true;
789}
Note: See TracBrowser for help on using the repository browser.