source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ b2dbb634

5
Last change on this file since b2dbb634 was b2dbb634, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/17 at 09:36:23

score: Remove CPU_set_Control

Use Processor_mask instead.

Update #2514.

  • Property mode set to 100644
File size: 18.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/priority.h>
27
28/*
29 * The following methods which initially were static in schedulerprioritysmp.c
30 * are shared with this scheduler. They are now public so they can be shared.
31 *
32 *  + _Scheduler_priority_SMP_Get_self
33 *  + _Scheduler_priority_SMP_Insert_ready_fifo
34 *  + _Scheduler_priority_SMP_Insert_ready_lifo
35 *  + _Scheduler_priority_SMP_Thread_get_node
36 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
37 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
38 *  + _Scheduler_priority_SMP_Extract_from_ready
39 *  + _Scheduler_priority_SMP_Do_update
40 */
41
42static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
43  const Chain_Node *to_insert,
44  const Chain_Node *next
45)
46{
47  return next != NULL
48    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
49}
50
51static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
52  const Chain_Node *to_insert,
53  const Chain_Node *next
54)
55{
56  return next != NULL
57    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
58}
59
60static Scheduler_priority_affinity_SMP_Node *
61_Scheduler_priority_affinity_SMP_Node_downcast(
62  Scheduler_Node *node
63)
64{
65  return (Scheduler_priority_affinity_SMP_Node *) node;
66}
67
68/*
69 * This method initializes the scheduler control information for
70 * this scheduler instance.
71 */
72void _Scheduler_priority_affinity_SMP_Node_initialize(
73  const Scheduler_Control *scheduler,
74  Scheduler_Node          *node,
75  Thread_Control          *the_thread,
76  Priority_Control         priority
77)
78{
79  Scheduler_priority_affinity_SMP_Node *the_node;
80
81  _Scheduler_priority_SMP_Node_initialize( scheduler, node, the_thread, priority );
82
83  /*
84   *  All we add is affinity information to the basic SMP node.
85   */
86  the_node = _Scheduler_priority_affinity_SMP_Node_downcast( node );
87  _Processor_mask_To_cpu_set_t(
88    _SMP_Get_online_processors(),
89    sizeof( the_node->affinity ),
90    &the_node->affinity
91  );
92}
93
94/*
95 * This method is unique to this scheduler because it takes into
96 * account affinity as it determines the highest ready thread.
97 * Since this is used to pick a new thread to replace the victim,
98 * the highest ready thread must have affinity such that it can
99 * be executed on the victim's processor.
100 */
101static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
102  Scheduler_Context *context,
103  Scheduler_Node    *victim
104)
105{
106  Scheduler_priority_SMP_Context       *self =
107    _Scheduler_priority_SMP_Get_self( context );
108  Priority_Control                      index;
109  Scheduler_Node                       *highest = NULL;
110  Thread_Control                       *victim_thread;
111  uint32_t                              victim_cpu_index;
112  Scheduler_priority_affinity_SMP_Node *node;
113
114  /*
115   * This is done when we need to check if reevaluations are needed.
116   */
117  if ( victim == NULL ) {
118    node = (Scheduler_priority_affinity_SMP_Node *)
119      _Scheduler_priority_Ready_queue_first(
120        &self->Bit_map,
121        &self->Ready[ 0 ]
122      );
123
124    return &node->Base.Base.Base;
125  }
126
127  victim_thread = _Scheduler_Node_get_owner( victim );
128  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
129
130  /**
131   * @todo The deterministic priority scheduler structure is optimized
132   * for insertion, extraction, and finding the highest priority
133   * thread. Scanning the list of ready threads is not a purpose
134   * for which it was optimized. There are optimizations to be
135   * made in this loop.
136   *
137   * + by checking the major bit, we could potentially skip entire
138   *   groups of 16.
139   *
140   * When using this scheduler as implemented, the application's
141   * choice of numeric priorities and their distribution can have
142   * an impact on performance.
143   */
144  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
145        index <= PRIORITY_MAXIMUM;
146        index++ )
147  {
148    Chain_Control   *chain =  &self->Ready[index];
149    Chain_Node      *chain_node;
150    for ( chain_node = _Chain_First( chain );
151          chain_node != _Chain_Immutable_tail( chain ) ;
152          chain_node = _Chain_Next( chain_node ) )
153    {
154      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
155
156      /*
157       * Can this thread run on this CPU?
158       */
159      if ( CPU_ISSET( (int) victim_cpu_index, &node->affinity ) ) {
160        highest = &node->Base.Base.Base;
161        break;
162      }
163    }
164    if ( highest )
165      break;
166  }
167
168  _Assert( highest != NULL );
169
170  return highest;
171}
172
173/*
174 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
175 * but has the difference that is invokes this scheduler's
176 * get_highest_ready() support method.
177 */
178void _Scheduler_priority_affinity_SMP_Block(
179  const Scheduler_Control *scheduler,
180  Thread_Control          *thread,
181  Scheduler_Node          *node
182)
183{
184  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
185
186  _Scheduler_SMP_Block(
187    context,
188    thread,
189    node,
190    _Scheduler_priority_SMP_Extract_from_ready,
191    _Scheduler_priority_affinity_SMP_Get_highest_ready,
192    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
193    _Scheduler_SMP_Allocate_processor_exact
194  );
195
196  /*
197   * Since this removed a single thread from the scheduled set
198   * and selected the most appropriate thread from the ready
199   * set to replace it, there should be no need for thread
200   * migrations.
201   */
202}
203
204/*
205 * This method is unique to this scheduler because it must take into
206 * account affinity as it searches for the lowest priority scheduled
207 * thread. It ignores those which cannot be replaced by the filter
208 * thread because the potential victim thread does not have affinity
209 * for that processor.
210 */
211static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
212  Scheduler_Context *context,
213  Scheduler_Node    *filter_base,
214  Chain_Node_order   order
215)
216{
217  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
218  Scheduler_Node *lowest_scheduled = NULL;
219  Chain_Control   *scheduled = &self->Scheduled;
220  Chain_Node      *chain_node;
221  Scheduler_priority_affinity_SMP_Node *filter =
222    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
223
224  for ( chain_node = _Chain_Last( scheduled );
225        chain_node != _Chain_Immutable_head( scheduled ) ;
226        chain_node = _Chain_Previous( chain_node ) ) {
227    Scheduler_priority_affinity_SMP_Node *node;
228    Thread_Control                       *thread;
229    uint32_t                              cpu_index;
230
231    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
232
233    /*
234     * If we didn't find a thread which is of equal or lower importance
235     * than filter thread is, then we can't schedule the filter thread
236     * to execute.
237     */
238    if (
239      (*order)(
240        &node->Base.Base.Base.Node.Chain,
241        &filter->Base.Base.Base.Node.Chain
242      )
243    ) {
244      break;
245    }
246
247    /* cpu_index is the processor number thread is executing on */
248    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
249    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
250
251    if ( CPU_ISSET( (int) cpu_index, &filter->affinity ) ) {
252      lowest_scheduled = &node->Base.Base.Base;
253      break;
254    }
255
256  }
257
258  return lowest_scheduled;
259}
260
261/*
262 * This method is unique to this scheduler because it must pass
263 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
264 * _Scheduler_SMP_Enqueue_ordered.
265 */
266static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
267  Scheduler_Context *context,
268  Scheduler_Node    *node
269)
270{
271  return _Scheduler_SMP_Enqueue_ordered(
272    context,
273    node,
274    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
275    _Scheduler_priority_SMP_Insert_ready_fifo,
276    _Scheduler_SMP_Insert_scheduled_fifo,
277    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
278    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
279    _Scheduler_SMP_Allocate_processor_exact
280  );
281}
282
283/*
284 * This method is invoked at the end of certain scheduling operations
285 * to ensure that the highest priority ready thread cannot be scheduled
286 * to execute. When we schedule with affinity, there is the possibility
287 * that we need to migrate a thread to another core to ensure that the
288 * highest priority ready threads are in fact scheduled.
289 */
290static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
291  Scheduler_Context *context
292)
293{
294  Scheduler_priority_SMP_Context *self;
295  Scheduler_Node                 *lowest_scheduled;
296  Scheduler_Node                 *highest_ready;
297
298  self = _Scheduler_priority_SMP_Get_self( context );
299
300  while (1) {
301    if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
302      /* Nothing to do */
303      break;
304    }
305
306    highest_ready =
307      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
308
309    lowest_scheduled =
310      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
311        context,
312        highest_ready,
313        _Scheduler_SMP_Insert_priority_lifo_order
314      );
315
316    /*
317     * If we can't find a thread to displace from the scheduled set,
318     * then we have placed all the highest priority threads possible
319     * in the scheduled set.
320     *
321     * We found the absolute highest priority thread without
322     * considering affinity. But now we have to consider that thread's
323     * affinity as we look to place it.
324     */
325    if ( lowest_scheduled == NULL )
326      break;
327
328    /*
329     * But if we found a thread which is lower priority than one
330     * in the ready set, then we need to swap them out.
331     */
332
333    _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
334    _Scheduler_SMP_Enqueue_to_scheduled(
335      context,
336      highest_ready,
337      lowest_scheduled,
338      _Scheduler_SMP_Insert_scheduled_fifo,
339      _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
340      _Scheduler_SMP_Allocate_processor_exact
341    );
342  }
343}
344
345/*
346 * This is the public scheduler specific Unblock operation.
347 */
348void _Scheduler_priority_affinity_SMP_Unblock(
349  const Scheduler_Control *scheduler,
350  Thread_Control          *thread,
351  Scheduler_Node          *node
352)
353{
354  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
355
356  _Scheduler_SMP_Unblock(
357    context,
358    thread,
359    node,
360    _Scheduler_priority_SMP_Do_update,
361    _Scheduler_priority_affinity_SMP_Enqueue_fifo
362  );
363
364  /*
365   * Perform any thread migrations that are needed due to these changes.
366   */
367  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
368}
369
370/*
371 *  This is unique to this scheduler because it passes scheduler specific
372 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
373 */
374static bool _Scheduler_priority_affinity_SMP_Enqueue_ordered(
375  Scheduler_Context     *context,
376  Scheduler_Node        *node,
377  Chain_Node_order       order,
378  Scheduler_SMP_Insert   insert_ready,
379  Scheduler_SMP_Insert   insert_scheduled
380)
381{
382  return _Scheduler_SMP_Enqueue_ordered(
383    context,
384    node,
385    order,
386    insert_ready,
387    insert_scheduled,
388    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
389    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
390    _Scheduler_SMP_Allocate_processor_exact
391  );
392}
393
394/*
395 *  This is unique to this scheduler because it is on the path
396 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
397 *  invokes a scheduler unique get_lowest_scheduled helper.
398 */
399static bool _Scheduler_priority_affinity_SMP_Enqueue_lifo(
400  Scheduler_Context *context,
401  Scheduler_Node    *node
402)
403{
404  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
405    context,
406    node,
407    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
408    _Scheduler_priority_SMP_Insert_ready_lifo,
409    _Scheduler_SMP_Insert_scheduled_lifo
410  );
411}
412
413/*
414 * This method is unique to this scheduler because it must
415 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
416 * this scheduler's get_highest_ready() helper.
417 */
418static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
419  Scheduler_Context    *context,
420  Scheduler_Node       *node,
421  Chain_Node_order      order,
422  Scheduler_SMP_Insert  insert_ready,
423  Scheduler_SMP_Insert  insert_scheduled
424)
425{
426  return _Scheduler_SMP_Enqueue_scheduled_ordered(
427    context,
428    node,
429    order,
430    _Scheduler_priority_SMP_Extract_from_ready,
431    _Scheduler_priority_affinity_SMP_Get_highest_ready,
432    insert_ready,
433    insert_scheduled,
434    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
435    _Scheduler_SMP_Allocate_processor_exact
436  );
437}
438
439/*
440 *  This is unique to this scheduler because it is on the path
441 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
442 *  invokes a scheduler unique get_lowest_scheduled helper.
443 */
444static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
445  Scheduler_Context *context,
446  Scheduler_Node    *node
447)
448{
449  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
450    context,
451    node,
452    _Scheduler_SMP_Insert_priority_lifo_order,
453    _Scheduler_priority_SMP_Insert_ready_lifo,
454    _Scheduler_SMP_Insert_scheduled_lifo
455  );
456}
457
458/*
459 *  This is unique to this scheduler because it is on the path
460 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
461 *  invokes a scheduler unique get_lowest_scheduled helper.
462 */
463static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
464  Scheduler_Context *context,
465  Scheduler_Node    *node
466)
467{
468  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
469    context,
470    node,
471    _Scheduler_SMP_Insert_priority_fifo_order,
472    _Scheduler_priority_SMP_Insert_ready_fifo,
473    _Scheduler_SMP_Insert_scheduled_fifo
474  );
475}
476
477static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
478  Scheduler_Context *context,
479  Thread_Control    *the_thread,
480  Scheduler_Node    *node
481)
482{
483  return _Scheduler_SMP_Ask_for_help(
484    context,
485    the_thread,
486    node,
487    _Scheduler_SMP_Insert_priority_lifo_order,
488    _Scheduler_priority_SMP_Insert_ready_lifo,
489    _Scheduler_SMP_Insert_scheduled_lifo,
490    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
491    _Scheduler_SMP_Get_lowest_scheduled,
492    _Scheduler_SMP_Allocate_processor_lazy
493  );
494}
495
496void _Scheduler_priority_affinity_SMP_Update_priority(
497  const Scheduler_Control *scheduler,
498  Thread_Control          *thread,
499  Scheduler_Node          *node
500)
501{
502  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
503
504  _Scheduler_SMP_Update_priority(
505    context,
506    thread,
507    node,
508    _Scheduler_priority_SMP_Extract_from_ready,
509    _Scheduler_priority_SMP_Do_update,
510    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
511    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
512    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
513    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
514    _Scheduler_priority_affinity_SMP_Do_ask_for_help
515  );
516
517  /*
518   * Perform any thread migrations that are needed due to these changes.
519   */
520  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
521}
522
523bool _Scheduler_priority_affinity_SMP_Ask_for_help(
524  const Scheduler_Control *scheduler,
525  Thread_Control          *the_thread,
526  Scheduler_Node          *node
527)
528{
529  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
530
531  return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
532}
533
534void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
535  const Scheduler_Control *scheduler,
536  Thread_Control          *the_thread,
537  Scheduler_Node          *node
538)
539{
540  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
541
542  _Scheduler_SMP_Reconsider_help_request(
543    context,
544    the_thread,
545    node,
546    _Scheduler_priority_SMP_Extract_from_ready
547  );
548}
549
550void _Scheduler_priority_affinity_SMP_Withdraw_node(
551  const Scheduler_Control *scheduler,
552  Thread_Control          *the_thread,
553  Scheduler_Node          *node,
554  Thread_Scheduler_state   next_state
555)
556{
557  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
558
559  _Scheduler_SMP_Withdraw_node(
560    context,
561    the_thread,
562    node,
563    next_state,
564    _Scheduler_priority_SMP_Extract_from_ready,
565    _Scheduler_priority_affinity_SMP_Get_highest_ready,
566    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
567    _Scheduler_SMP_Allocate_processor_lazy
568  );
569}
570
571void _Scheduler_priority_affinity_SMP_Add_processor(
572  const Scheduler_Control *scheduler,
573  Thread_Control          *idle
574)
575{
576  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
577
578  _Scheduler_SMP_Add_processor(
579    context,
580    idle,
581    _Scheduler_priority_SMP_Has_ready,
582    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
583    _Scheduler_SMP_Do_nothing_register_idle
584  );
585}
586
587Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
588  const Scheduler_Control *scheduler,
589  Per_CPU_Control         *cpu
590)
591{
592  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
593
594  return _Scheduler_SMP_Remove_processor(
595    context,
596    cpu,
597    _Scheduler_priority_SMP_Extract_from_ready,
598    _Scheduler_priority_affinity_SMP_Enqueue_fifo
599  );
600}
601
602bool _Scheduler_priority_affinity_SMP_Set_affinity(
603  const Scheduler_Control *scheduler,
604  Thread_Control          *thread,
605  Scheduler_Node          *node_base,
606  const Processor_mask    *affinity
607)
608{
609  Scheduler_Context                    *context;
610  Scheduler_priority_affinity_SMP_Node *node;
611  States_Control                        current_state;
612  Processor_mask                        my_affinity;
613  cpu_set_t                             cpuset;
614
615  context = _Scheduler_Get_context( scheduler );
616  _Processor_mask_And( &my_affinity, &context->Processors, affinity );
617
618  if ( _Processor_mask_Count( &my_affinity ) == 0 ) {
619    return false;
620  }
621
622  _Processor_mask_To_cpu_set_t( &my_affinity, sizeof( cpuset ), &cpuset );
623  node = _Scheduler_priority_affinity_SMP_Node_downcast( node_base );
624
625  /*
626   * The old and new set are the same, there is no point in
627   * doing anything.
628   */
629  if ( CPU_EQUAL( &cpuset, &node->affinity ) )
630    return true;
631
632  current_state = thread->current_state;
633
634  if ( _States_Is_ready( current_state ) ) {
635    _Scheduler_priority_affinity_SMP_Block( scheduler, thread, &node->Base.Base.Base );
636  }
637
638  CPU_COPY( &cpuset, &node->affinity );
639
640  if ( _States_Is_ready( current_state ) ) {
641    /*
642     * FIXME: Do not ignore threads in need for help.
643     */
644    (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base );
645  }
646
647  return true;
648}
Note: See TracBrowser for help on using the repository browser.