source: rtems/cpukit/score/src/scheduleredfsmp.c @ 21275b58

5
Last change on this file since 21275b58 was 7097962, checked in by Sebastian Huber <sebastian.huber@…>, on 08/29/18 at 07:43:44

score: Add thread pin/unpin support

Add support to temporarily pin a thread to its current processor. This
may be used to access per-processor data structures in critical sections
with enabled thread dispatching, e.g. a pinned thread is allowed to
block.

Update #3508.

  • Property mode set to 100644
File size: 19.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief EDF SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMPEDF
7 */
8
9/*
10 * Copyright (c) 2017 embedded brains GmbH.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/scheduleredfsmp.h>
22#include <rtems/score/schedulersmpimpl.h>
23
24static inline Scheduler_EDF_SMP_Context *
25_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
26{
27  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
28}
29
30static inline Scheduler_EDF_SMP_Context *
31_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
32{
33  return (Scheduler_EDF_SMP_Context *) context;
34}
35
36static inline Scheduler_EDF_SMP_Node *
37_Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
38{
39  return (Scheduler_EDF_SMP_Node *) node;
40}
41
42static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
43  const void        *left,
44  const RBTree_Node *right
45)
46{
47  const Priority_Control   *the_left;
48  const Scheduler_SMP_Node *the_right;
49  Priority_Control          prio_left;
50  Priority_Control          prio_right;
51
52  the_left = left;
53  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
54
55  prio_left = *the_left;
56  prio_right = the_right->priority;
57
58  return prio_left <= prio_right;
59}
60
61void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
62{
63  Scheduler_EDF_SMP_Context *self =
64    _Scheduler_EDF_SMP_Get_context( scheduler );
65
66  _Scheduler_SMP_Initialize( &self->Base );
67  _Chain_Initialize_empty( &self->Affine_queues );
68  /* The ready queues are zero initialized and thus empty */
69}
70
71void _Scheduler_EDF_SMP_Node_initialize(
72  const Scheduler_Control *scheduler,
73  Scheduler_Node          *node,
74  Thread_Control          *the_thread,
75  Priority_Control         priority
76)
77{
78  Scheduler_SMP_Node *smp_node;
79
80  smp_node = _Scheduler_SMP_Node_downcast( node );
81  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
82}
83
84static inline void _Scheduler_EDF_SMP_Do_update(
85  Scheduler_Context *context,
86  Scheduler_Node    *node,
87  Priority_Control   new_priority
88)
89{
90  Scheduler_SMP_Node *smp_node;
91
92  (void) context;
93
94  smp_node = _Scheduler_SMP_Node_downcast( node );
95  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
96}
97
98static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
99{
100  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
101
102  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
103}
104
105static inline bool _Scheduler_EDF_SMP_Overall_less(
106  const Scheduler_EDF_SMP_Node *left,
107  const Scheduler_EDF_SMP_Node *right
108)
109{
110  Priority_Control lp;
111  Priority_Control rp;
112
113  lp = left->Base.priority;
114  rp = right->Base.priority;
115
116  return lp < rp || (lp == rp && left->generation < right->generation );
117}
118
119static inline Scheduler_EDF_SMP_Node *
120_Scheduler_EDF_SMP_Challenge_highest_ready(
121  Scheduler_EDF_SMP_Context *self,
122  Scheduler_EDF_SMP_Node    *highest_ready,
123  RBTree_Control            *ready_queue
124)
125{
126  Scheduler_EDF_SMP_Node *other;
127
128  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
129  _Assert( other != NULL );
130
131  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
132    return other;
133  }
134
135  return highest_ready;
136}
137
138static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
139  Scheduler_Context *context,
140  Scheduler_Node    *filter
141)
142{
143  Scheduler_EDF_SMP_Context *self;
144  Scheduler_EDF_SMP_Node    *highest_ready;
145  Scheduler_EDF_SMP_Node    *node;
146  uint8_t                    rqi;
147  const Chain_Node          *tail;
148  Chain_Node                *next;
149
150  self = _Scheduler_EDF_SMP_Get_self( context );
151  highest_ready = (Scheduler_EDF_SMP_Node *)
152    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
153  _Assert( highest_ready != NULL );
154
155  /*
156   * The filter node is a scheduled node which is no longer on the scheduled
157   * chain.  In case this is an affine thread, then we have to check the
158   * corresponding affine ready queue.
159   */
160
161  node = (Scheduler_EDF_SMP_Node *) filter;
162  rqi = node->ready_queue_index;
163
164  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
165    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
166      self,
167      highest_ready,
168      &self->Ready[ rqi ].Queue
169    );
170  }
171
172  tail = _Chain_Immutable_tail( &self->Affine_queues );
173  next = _Chain_First( &self->Affine_queues );
174
175  while ( next != tail ) {
176    Scheduler_EDF_SMP_Ready_queue *ready_queue;
177
178    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
179    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
180      self,
181      highest_ready,
182      &ready_queue->Queue
183    );
184
185    next = _Chain_Next( next );
186  }
187
188  return &highest_ready->Base.Base;
189}
190
191static inline void _Scheduler_EDF_SMP_Set_scheduled(
192  Scheduler_EDF_SMP_Context *self,
193  Scheduler_EDF_SMP_Node    *scheduled,
194  const Per_CPU_Control     *cpu
195)
196{
197  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
198}
199
200static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
201  const Scheduler_EDF_SMP_Context *self,
202  uint8_t                          rqi
203)
204{
205  return self->Ready[ rqi ].scheduled;
206}
207
208static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
209  Scheduler_Context *context,
210  Scheduler_Node    *filter_base
211)
212{
213  Scheduler_EDF_SMP_Node *filter;
214  uint8_t                 rqi;
215
216  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
217  rqi = filter->ready_queue_index;
218
219  if ( rqi != 0 ) {
220    Scheduler_EDF_SMP_Context *self;
221    Scheduler_EDF_SMP_Node    *node;
222
223    self = _Scheduler_EDF_SMP_Get_self( context );
224    node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
225
226    if ( node->ready_queue_index > 0 ) {
227      _Assert( node->ready_queue_index == rqi );
228      return &node->Base.Base;
229    }
230  }
231
232  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
233}
234
235static inline void _Scheduler_EDF_SMP_Insert_ready(
236  Scheduler_Context *context,
237  Scheduler_Node    *node_base,
238  Priority_Control   insert_priority
239)
240{
241  Scheduler_EDF_SMP_Context     *self;
242  Scheduler_EDF_SMP_Node        *node;
243  uint8_t                        rqi;
244  Scheduler_EDF_SMP_Ready_queue *ready_queue;
245  int                            generation_index;
246  int                            increment;
247  int64_t                        generation;
248
249  self = _Scheduler_EDF_SMP_Get_self( context );
250  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
251  rqi = node->ready_queue_index;
252  generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
253  increment = ( generation_index << 1 ) - 1;
254  ready_queue = &self->Ready[ rqi ];
255
256  generation = self->generations[ generation_index ];
257  node->generation = generation;
258  self->generations[ generation_index ] = generation + increment;
259
260  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
261  _RBTree_Insert_inline(
262    &ready_queue->Queue,
263    &node->Base.Base.Node.RBTree,
264    &insert_priority,
265    _Scheduler_EDF_SMP_Priority_less_equal
266  );
267
268  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
269    Scheduler_EDF_SMP_Node *scheduled;
270
271    scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
272
273    if ( scheduled->ready_queue_index == 0 ) {
274      _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
275    }
276  }
277}
278
279static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
280  Scheduler_Context *context,
281  Scheduler_Node    *node_to_extract
282)
283{
284  Scheduler_EDF_SMP_Context     *self;
285  Scheduler_EDF_SMP_Node        *node;
286  uint8_t                        rqi;
287  Scheduler_EDF_SMP_Ready_queue *ready_queue;
288
289  self = _Scheduler_EDF_SMP_Get_self( context );
290  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
291
292  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
293
294  rqi = node->ready_queue_index;
295  ready_queue = &self->Ready[ rqi ];
296
297  if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
298    _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
299  }
300}
301
302static inline void _Scheduler_EDF_SMP_Extract_from_ready(
303  Scheduler_Context *context,
304  Scheduler_Node    *node_to_extract
305)
306{
307  Scheduler_EDF_SMP_Context     *self;
308  Scheduler_EDF_SMP_Node        *node;
309  uint8_t                        rqi;
310  Scheduler_EDF_SMP_Ready_queue *ready_queue;
311
312  self = _Scheduler_EDF_SMP_Get_self( context );
313  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
314  rqi = node->ready_queue_index;
315  ready_queue = &self->Ready[ rqi ];
316
317  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
318  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
319
320  if (
321    rqi != 0
322      && _RBTree_Is_empty( &ready_queue->Queue )
323      && !_Chain_Is_node_off_chain( &ready_queue->Node )
324  ) {
325    _Chain_Extract_unprotected( &ready_queue->Node );
326    _Chain_Set_off_chain( &ready_queue->Node );
327  }
328}
329
330static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
331  Scheduler_Context *context,
332  Scheduler_Node    *scheduled_to_ready
333)
334{
335  Priority_Control insert_priority;
336
337  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
338  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
339  _Scheduler_EDF_SMP_Insert_ready(
340    context,
341    scheduled_to_ready,
342    insert_priority
343  );
344}
345
346static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
347  Scheduler_Context *context,
348  Scheduler_Node    *ready_to_scheduled
349)
350{
351  Priority_Control insert_priority;
352
353  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
354  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
355  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
356  _Scheduler_SMP_Insert_scheduled(
357    context,
358    ready_to_scheduled,
359    insert_priority
360  );
361}
362
363static inline void _Scheduler_EDF_SMP_Allocate_processor(
364  Scheduler_Context *context,
365  Scheduler_Node    *scheduled_base,
366  Scheduler_Node    *victim_base,
367  Per_CPU_Control   *victim_cpu
368)
369{
370  Scheduler_EDF_SMP_Context     *self;
371  Scheduler_EDF_SMP_Node        *scheduled;
372  uint8_t                        rqi;
373
374  (void) victim_base;
375  self = _Scheduler_EDF_SMP_Get_self( context );
376  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
377  rqi = scheduled->ready_queue_index;
378
379  if ( rqi != 0 ) {
380    Scheduler_EDF_SMP_Ready_queue *ready_queue;
381    Per_CPU_Control               *desired_cpu;
382
383    ready_queue = &self->Ready[ rqi ];
384
385    if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
386      _Chain_Extract_unprotected( &ready_queue->Node );
387      _Chain_Set_off_chain( &ready_queue->Node );
388    }
389
390    desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
391
392    if ( victim_cpu != desired_cpu ) {
393      Scheduler_EDF_SMP_Node *node;
394
395      node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
396      _Assert( node->ready_queue_index == 0 );
397      _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
398      _Scheduler_SMP_Allocate_processor_exact(
399        context,
400        &node->Base.Base,
401        NULL,
402        victim_cpu
403      );
404      victim_cpu = desired_cpu;
405    }
406  }
407
408  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
409  _Scheduler_SMP_Allocate_processor_exact(
410    context,
411    &scheduled->Base.Base,
412    NULL,
413    victim_cpu
414  );
415}
416
417void _Scheduler_EDF_SMP_Block(
418  const Scheduler_Control *scheduler,
419  Thread_Control          *thread,
420  Scheduler_Node          *node
421)
422{
423  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
424
425  _Scheduler_SMP_Block(
426    context,
427    thread,
428    node,
429    _Scheduler_EDF_SMP_Extract_from_scheduled,
430    _Scheduler_EDF_SMP_Extract_from_ready,
431    _Scheduler_EDF_SMP_Get_highest_ready,
432    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
433    _Scheduler_EDF_SMP_Allocate_processor
434  );
435}
436
437static inline bool _Scheduler_EDF_SMP_Enqueue(
438  Scheduler_Context *context,
439  Scheduler_Node    *node,
440  Priority_Control   insert_priority
441)
442{
443  return _Scheduler_SMP_Enqueue(
444    context,
445    node,
446    insert_priority,
447    _Scheduler_SMP_Priority_less_equal,
448    _Scheduler_EDF_SMP_Insert_ready,
449    _Scheduler_SMP_Insert_scheduled,
450    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
451    _Scheduler_EDF_SMP_Get_lowest_scheduled,
452    _Scheduler_EDF_SMP_Allocate_processor
453  );
454}
455
456static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
457  Scheduler_Context *context,
458  Scheduler_Node    *node,
459  Priority_Control   insert_priority
460)
461{
462  return _Scheduler_SMP_Enqueue_scheduled(
463    context,
464    node,
465    insert_priority,
466    _Scheduler_SMP_Priority_less_equal,
467    _Scheduler_EDF_SMP_Extract_from_ready,
468    _Scheduler_EDF_SMP_Get_highest_ready,
469    _Scheduler_EDF_SMP_Insert_ready,
470    _Scheduler_SMP_Insert_scheduled,
471    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
472    _Scheduler_EDF_SMP_Allocate_processor
473  );
474}
475
476void _Scheduler_EDF_SMP_Unblock(
477  const Scheduler_Control *scheduler,
478  Thread_Control          *thread,
479  Scheduler_Node          *node
480)
481{
482  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
483
484  _Scheduler_SMP_Unblock(
485    context,
486    thread,
487    node,
488    _Scheduler_EDF_SMP_Do_update,
489    _Scheduler_EDF_SMP_Enqueue
490  );
491}
492
493static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
494  Scheduler_Context *context,
495  Thread_Control    *the_thread,
496  Scheduler_Node    *node
497)
498{
499  return _Scheduler_SMP_Ask_for_help(
500    context,
501    the_thread,
502    node,
503    _Scheduler_SMP_Priority_less_equal,
504    _Scheduler_EDF_SMP_Insert_ready,
505    _Scheduler_SMP_Insert_scheduled,
506    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
507    _Scheduler_EDF_SMP_Get_lowest_scheduled,
508    _Scheduler_EDF_SMP_Allocate_processor
509  );
510}
511
512void _Scheduler_EDF_SMP_Update_priority(
513  const Scheduler_Control *scheduler,
514  Thread_Control          *thread,
515  Scheduler_Node          *node
516)
517{
518  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
519
520  _Scheduler_SMP_Update_priority(
521    context,
522    thread,
523    node,
524    _Scheduler_EDF_SMP_Extract_from_ready,
525    _Scheduler_EDF_SMP_Do_update,
526    _Scheduler_EDF_SMP_Enqueue,
527    _Scheduler_EDF_SMP_Enqueue_scheduled,
528    _Scheduler_EDF_SMP_Do_ask_for_help
529  );
530}
531
532bool _Scheduler_EDF_SMP_Ask_for_help(
533  const Scheduler_Control *scheduler,
534  Thread_Control          *the_thread,
535  Scheduler_Node          *node
536)
537{
538  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
539
540  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
541}
542
543void _Scheduler_EDF_SMP_Reconsider_help_request(
544  const Scheduler_Control *scheduler,
545  Thread_Control          *the_thread,
546  Scheduler_Node          *node
547)
548{
549  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
550
551  _Scheduler_SMP_Reconsider_help_request(
552    context,
553    the_thread,
554    node,
555    _Scheduler_EDF_SMP_Extract_from_ready
556  );
557}
558
559void _Scheduler_EDF_SMP_Withdraw_node(
560  const Scheduler_Control *scheduler,
561  Thread_Control          *the_thread,
562  Scheduler_Node          *node,
563  Thread_Scheduler_state   next_state
564)
565{
566  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
567
568  _Scheduler_SMP_Withdraw_node(
569    context,
570    the_thread,
571    node,
572    next_state,
573    _Scheduler_EDF_SMP_Extract_from_ready,
574    _Scheduler_EDF_SMP_Get_highest_ready,
575    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
576    _Scheduler_EDF_SMP_Allocate_processor
577  );
578}
579
580static inline void _Scheduler_EDF_SMP_Register_idle(
581  Scheduler_Context *context,
582  Scheduler_Node    *idle_base,
583  Per_CPU_Control   *cpu
584)
585{
586  Scheduler_EDF_SMP_Context *self;
587  Scheduler_EDF_SMP_Node    *idle;
588
589  self = _Scheduler_EDF_SMP_Get_self( context );
590  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
591  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
592}
593
594void _Scheduler_EDF_SMP_Add_processor(
595  const Scheduler_Control *scheduler,
596  Thread_Control          *idle
597)
598{
599  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
600
601  _Scheduler_SMP_Add_processor(
602    context,
603    idle,
604    _Scheduler_EDF_SMP_Has_ready,
605    _Scheduler_EDF_SMP_Enqueue_scheduled,
606    _Scheduler_EDF_SMP_Register_idle
607  );
608}
609
610Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
611  const Scheduler_Control *scheduler,
612  Per_CPU_Control         *cpu
613)
614{
615  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
616
617  return _Scheduler_SMP_Remove_processor(
618    context,
619    cpu,
620    _Scheduler_EDF_SMP_Extract_from_ready,
621    _Scheduler_EDF_SMP_Enqueue
622  );
623}
624
625void _Scheduler_EDF_SMP_Yield(
626  const Scheduler_Control *scheduler,
627  Thread_Control          *thread,
628  Scheduler_Node          *node
629)
630{
631  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
632
633  _Scheduler_SMP_Yield(
634    context,
635    thread,
636    node,
637    _Scheduler_EDF_SMP_Extract_from_ready,
638    _Scheduler_EDF_SMP_Enqueue,
639    _Scheduler_EDF_SMP_Enqueue_scheduled
640  );
641}
642
643static inline void _Scheduler_EDF_SMP_Do_set_affinity(
644  Scheduler_Context *context,
645  Scheduler_Node    *node_base,
646  void              *arg
647)
648{
649  Scheduler_EDF_SMP_Node *node;
650  const uint8_t          *rqi;
651
652  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
653  rqi = arg;
654  node->ready_queue_index = *rqi;
655}
656
657void _Scheduler_EDF_SMP_Start_idle(
658  const Scheduler_Control *scheduler,
659  Thread_Control          *idle,
660  Per_CPU_Control         *cpu
661)
662{
663  Scheduler_Context *context;
664
665  context = _Scheduler_Get_context( scheduler );
666
667  _Scheduler_SMP_Do_start_idle(
668    context,
669    idle,
670    cpu,
671    _Scheduler_EDF_SMP_Register_idle
672  );
673}
674
675void _Scheduler_EDF_SMP_Pin(
676  const Scheduler_Control *scheduler,
677  Thread_Control          *thread,
678  Scheduler_Node          *node_base,
679  struct Per_CPU_Control  *cpu
680)
681{
682  Scheduler_EDF_SMP_Node *node;
683  uint8_t                 rqi;
684
685  (void) scheduler;
686  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
687  rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
688
689  _Assert(
690    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
691  );
692
693  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
694  node->ready_queue_index = rqi;
695  node->pinning_ready_queue_index = rqi;
696}
697
698void _Scheduler_EDF_SMP_Unpin(
699  const Scheduler_Control *scheduler,
700  Thread_Control          *thread,
701  Scheduler_Node          *node_base,
702  struct Per_CPU_Control  *cpu
703)
704{
705  Scheduler_EDF_SMP_Node *node;
706
707  (void) scheduler;
708  (void) cpu;
709  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
710
711  _Assert(
712    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
713  );
714
715  node->ready_queue_index = node->affinity_ready_queue_index;
716  node->pinning_ready_queue_index = 0;
717}
718
719bool _Scheduler_EDF_SMP_Set_affinity(
720  const Scheduler_Control *scheduler,
721  Thread_Control          *thread,
722  Scheduler_Node          *node_base,
723  const Processor_mask    *affinity
724)
725{
726  Scheduler_Context      *context;
727  Scheduler_EDF_SMP_Node *node;
728  Processor_mask          local_affinity;
729  uint8_t                 rqi;
730
731  context = _Scheduler_Get_context( scheduler );
732  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
733
734  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
735    return false;
736  }
737
738  if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
739    rqi = 0;
740  } else {
741    rqi = _Processor_mask_Find_last_set( &local_affinity );
742  }
743
744  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
745  node->affinity_ready_queue_index = rqi;
746
747  if ( node->pinning_ready_queue_index == 0 ) {
748    _Scheduler_SMP_Set_affinity(
749      context,
750      thread,
751      node_base,
752      &rqi,
753      _Scheduler_EDF_SMP_Do_set_affinity,
754      _Scheduler_EDF_SMP_Extract_from_ready,
755      _Scheduler_EDF_SMP_Get_highest_ready,
756      _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
757      _Scheduler_EDF_SMP_Enqueue,
758      _Scheduler_EDF_SMP_Allocate_processor
759    );
760  }
761
762  return true;
763}
Note: See TracBrowser for help on using the repository browser.