source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ a3ad4af

5
Last change on this file since a3ad4af was a3ad4af, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/17 at 09:22:21

posix: Validate affinity sets by the scheduler

Update #2514.

  • Property mode set to 100644
File size: 18.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/priority.h>
27
28/*
29 * The following methods which initially were static in schedulerprioritysmp.c
30 * are shared with this scheduler. They are now public so they can be shared.
31 *
32 *  + _Scheduler_priority_SMP_Get_self
33 *  + _Scheduler_priority_SMP_Insert_ready_fifo
34 *  + _Scheduler_priority_SMP_Insert_ready_lifo
35 *  + _Scheduler_priority_SMP_Thread_get_node
36 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
37 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
38 *  + _Scheduler_priority_SMP_Extract_from_ready
39 *  + _Scheduler_priority_SMP_Do_update
40 */
41
42static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
43  const Chain_Node *to_insert,
44  const Chain_Node *next
45)
46{
47  return next != NULL
48    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
49}
50
51static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
52  const Chain_Node *to_insert,
53  const Chain_Node *next
54)
55{
56  return next != NULL
57    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
58}
59
60static Scheduler_priority_affinity_SMP_Node *
61_Scheduler_priority_affinity_SMP_Node_downcast(
62  Scheduler_Node *node
63)
64{
65  return (Scheduler_priority_affinity_SMP_Node *) node;
66}
67
68/*
69 * This method initializes the scheduler control information for
70 * this scheduler instance.
71 */
72void _Scheduler_priority_affinity_SMP_Node_initialize(
73  const Scheduler_Control *scheduler,
74  Scheduler_Node          *node,
75  Thread_Control          *the_thread,
76  Priority_Control         priority
77)
78{
79  Scheduler_priority_affinity_SMP_Node *the_node;
80
81  _Scheduler_priority_SMP_Node_initialize( scheduler, node, the_thread, priority );
82
83  /*
84   *  All we add is affinity information to the basic SMP node.
85   */
86  the_node = _Scheduler_priority_affinity_SMP_Node_downcast( node );
87  the_node->Affinity     = *_CPU_set_Default();
88  the_node->Affinity.set = &the_node->Affinity.preallocated;
89}
90
91/*
92 * This method is unique to this scheduler because it takes into
93 * account affinity as it determines the highest ready thread.
94 * Since this is used to pick a new thread to replace the victim,
95 * the highest ready thread must have affinity such that it can
96 * be executed on the victim's processor.
97 */
98static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
99  Scheduler_Context *context,
100  Scheduler_Node    *victim
101)
102{
103  Scheduler_priority_SMP_Context       *self =
104    _Scheduler_priority_SMP_Get_self( context );
105  Priority_Control                      index;
106  Scheduler_Node                       *highest = NULL;
107  Thread_Control                       *victim_thread;
108  uint32_t                              victim_cpu_index;
109  Scheduler_priority_affinity_SMP_Node *node;
110
111  /*
112   * This is done when we need to check if reevaluations are needed.
113   */
114  if ( victim == NULL ) {
115    node = (Scheduler_priority_affinity_SMP_Node *)
116      _Scheduler_priority_Ready_queue_first(
117        &self->Bit_map,
118        &self->Ready[ 0 ]
119      );
120
121    return &node->Base.Base.Base;
122  }
123
124  victim_thread = _Scheduler_Node_get_owner( victim );
125  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
126
127  /**
128   * @todo The deterministic priority scheduler structure is optimized
129   * for insertion, extraction, and finding the highest priority
130   * thread. Scanning the list of ready threads is not a purpose
131   * for which it was optimized. There are optimizations to be
132   * made in this loop.
133   *
134   * + by checking the major bit, we could potentially skip entire
135   *   groups of 16.
136   *
137   * When using this scheduler as implemented, the application's
138   * choice of numeric priorities and their distribution can have
139   * an impact on performance.
140   */
141  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
142        index <= PRIORITY_MAXIMUM;
143        index++ )
144  {
145    Chain_Control   *chain =  &self->Ready[index];
146    Chain_Node      *chain_node;
147    for ( chain_node = _Chain_First( chain );
148          chain_node != _Chain_Immutable_tail( chain ) ;
149          chain_node = _Chain_Next( chain_node ) )
150    {
151      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
152
153      /*
154       * Can this thread run on this CPU?
155       */
156      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
157        highest = &node->Base.Base.Base;
158        break;
159      }
160    }
161    if ( highest )
162      break;
163  }
164
165  _Assert( highest != NULL );
166
167  return highest;
168}
169
170/*
171 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
172 * but has the difference that is invokes this scheduler's
173 * get_highest_ready() support method.
174 */
175void _Scheduler_priority_affinity_SMP_Block(
176  const Scheduler_Control *scheduler,
177  Thread_Control          *thread,
178  Scheduler_Node          *node
179)
180{
181  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
182
183  _Scheduler_SMP_Block(
184    context,
185    thread,
186    node,
187    _Scheduler_priority_SMP_Extract_from_ready,
188    _Scheduler_priority_affinity_SMP_Get_highest_ready,
189    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
190    _Scheduler_SMP_Allocate_processor_exact
191  );
192
193  /*
194   * Since this removed a single thread from the scheduled set
195   * and selected the most appropriate thread from the ready
196   * set to replace it, there should be no need for thread
197   * migrations.
198   */
199}
200
201/*
202 * This method is unique to this scheduler because it must take into
203 * account affinity as it searches for the lowest priority scheduled
204 * thread. It ignores those which cannot be replaced by the filter
205 * thread because the potential victim thread does not have affinity
206 * for that processor.
207 */
208static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
209  Scheduler_Context *context,
210  Scheduler_Node    *filter_base,
211  Chain_Node_order   order
212)
213{
214  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
215  Scheduler_Node *lowest_scheduled = NULL;
216  Chain_Control   *scheduled = &self->Scheduled;
217  Chain_Node      *chain_node;
218  Scheduler_priority_affinity_SMP_Node *filter =
219    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
220
221  for ( chain_node = _Chain_Last( scheduled );
222        chain_node != _Chain_Immutable_head( scheduled ) ;
223        chain_node = _Chain_Previous( chain_node ) ) {
224    Scheduler_priority_affinity_SMP_Node *node;
225    Thread_Control                       *thread;
226    uint32_t                              cpu_index;
227
228    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
229
230    /*
231     * If we didn't find a thread which is of equal or lower importance
232     * than filter thread is, then we can't schedule the filter thread
233     * to execute.
234     */
235    if (
236      (*order)(
237        &node->Base.Base.Base.Node.Chain,
238        &filter->Base.Base.Base.Node.Chain
239      )
240    ) {
241      break;
242    }
243
244    /* cpu_index is the processor number thread is executing on */
245    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
246    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
247
248    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
249      lowest_scheduled = &node->Base.Base.Base;
250      break;
251    }
252
253  }
254
255  return lowest_scheduled;
256}
257
258/*
259 * This method is unique to this scheduler because it must pass
260 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
261 * _Scheduler_SMP_Enqueue_ordered.
262 */
263static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
264  Scheduler_Context *context,
265  Scheduler_Node    *node
266)
267{
268  return _Scheduler_SMP_Enqueue_ordered(
269    context,
270    node,
271    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
272    _Scheduler_priority_SMP_Insert_ready_fifo,
273    _Scheduler_SMP_Insert_scheduled_fifo,
274    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
275    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
276    _Scheduler_SMP_Allocate_processor_exact
277  );
278}
279
280/*
281 * This method is invoked at the end of certain scheduling operations
282 * to ensure that the highest priority ready thread cannot be scheduled
283 * to execute. When we schedule with affinity, there is the possibility
284 * that we need to migrate a thread to another core to ensure that the
285 * highest priority ready threads are in fact scheduled.
286 */
287static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
288  Scheduler_Context *context
289)
290{
291  Scheduler_priority_SMP_Context *self;
292  Scheduler_Node                 *lowest_scheduled;
293  Scheduler_Node                 *highest_ready;
294
295  self = _Scheduler_priority_SMP_Get_self( context );
296
297  while (1) {
298    if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
299      /* Nothing to do */
300      break;
301    }
302
303    highest_ready =
304      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
305
306    lowest_scheduled =
307      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
308        context,
309        highest_ready,
310        _Scheduler_SMP_Insert_priority_lifo_order
311      );
312
313    /*
314     * If we can't find a thread to displace from the scheduled set,
315     * then we have placed all the highest priority threads possible
316     * in the scheduled set.
317     *
318     * We found the absolute highest priority thread without
319     * considering affinity. But now we have to consider that thread's
320     * affinity as we look to place it.
321     */
322    if ( lowest_scheduled == NULL )
323      break;
324
325    /*
326     * But if we found a thread which is lower priority than one
327     * in the ready set, then we need to swap them out.
328     */
329
330    _Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
331    _Scheduler_SMP_Enqueue_to_scheduled(
332      context,
333      highest_ready,
334      lowest_scheduled,
335      _Scheduler_SMP_Insert_scheduled_fifo,
336      _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
337      _Scheduler_SMP_Allocate_processor_exact
338    );
339  }
340}
341
342/*
343 * This is the public scheduler specific Unblock operation.
344 */
345void _Scheduler_priority_affinity_SMP_Unblock(
346  const Scheduler_Control *scheduler,
347  Thread_Control          *thread,
348  Scheduler_Node          *node
349)
350{
351  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
352
353  _Scheduler_SMP_Unblock(
354    context,
355    thread,
356    node,
357    _Scheduler_priority_SMP_Do_update,
358    _Scheduler_priority_affinity_SMP_Enqueue_fifo
359  );
360
361  /*
362   * Perform any thread migrations that are needed due to these changes.
363   */
364  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
365}
366
367/*
368 *  This is unique to this scheduler because it passes scheduler specific
369 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
370 */
371static bool _Scheduler_priority_affinity_SMP_Enqueue_ordered(
372  Scheduler_Context     *context,
373  Scheduler_Node        *node,
374  Chain_Node_order       order,
375  Scheduler_SMP_Insert   insert_ready,
376  Scheduler_SMP_Insert   insert_scheduled
377)
378{
379  return _Scheduler_SMP_Enqueue_ordered(
380    context,
381    node,
382    order,
383    insert_ready,
384    insert_scheduled,
385    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
386    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
387    _Scheduler_SMP_Allocate_processor_exact
388  );
389}
390
391/*
392 *  This is unique to this scheduler because it is on the path
393 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
394 *  invokes a scheduler unique get_lowest_scheduled helper.
395 */
396static bool _Scheduler_priority_affinity_SMP_Enqueue_lifo(
397  Scheduler_Context *context,
398  Scheduler_Node    *node
399)
400{
401  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
402    context,
403    node,
404    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
405    _Scheduler_priority_SMP_Insert_ready_lifo,
406    _Scheduler_SMP_Insert_scheduled_lifo
407  );
408}
409
410/*
411 * This method is unique to this scheduler because it must
412 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
413 * this scheduler's get_highest_ready() helper.
414 */
415static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
416  Scheduler_Context    *context,
417  Scheduler_Node       *node,
418  Chain_Node_order      order,
419  Scheduler_SMP_Insert  insert_ready,
420  Scheduler_SMP_Insert  insert_scheduled
421)
422{
423  return _Scheduler_SMP_Enqueue_scheduled_ordered(
424    context,
425    node,
426    order,
427    _Scheduler_priority_SMP_Extract_from_ready,
428    _Scheduler_priority_affinity_SMP_Get_highest_ready,
429    insert_ready,
430    insert_scheduled,
431    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
432    _Scheduler_SMP_Allocate_processor_exact
433  );
434}
435
436/*
437 *  This is unique to this scheduler because it is on the path
438 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
439 *  invokes a scheduler unique get_lowest_scheduled helper.
440 */
441static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
442  Scheduler_Context *context,
443  Scheduler_Node    *node
444)
445{
446  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
447    context,
448    node,
449    _Scheduler_SMP_Insert_priority_lifo_order,
450    _Scheduler_priority_SMP_Insert_ready_lifo,
451    _Scheduler_SMP_Insert_scheduled_lifo
452  );
453}
454
455/*
456 *  This is unique to this scheduler because it is on the path
457 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
458 *  invokes a scheduler unique get_lowest_scheduled helper.
459 */
460static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
461  Scheduler_Context *context,
462  Scheduler_Node    *node
463)
464{
465  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
466    context,
467    node,
468    _Scheduler_SMP_Insert_priority_fifo_order,
469    _Scheduler_priority_SMP_Insert_ready_fifo,
470    _Scheduler_SMP_Insert_scheduled_fifo
471  );
472}
473
474static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
475  Scheduler_Context *context,
476  Thread_Control    *the_thread,
477  Scheduler_Node    *node
478)
479{
480  return _Scheduler_SMP_Ask_for_help(
481    context,
482    the_thread,
483    node,
484    _Scheduler_SMP_Insert_priority_lifo_order,
485    _Scheduler_priority_SMP_Insert_ready_lifo,
486    _Scheduler_SMP_Insert_scheduled_lifo,
487    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
488    _Scheduler_SMP_Get_lowest_scheduled,
489    _Scheduler_SMP_Allocate_processor_lazy
490  );
491}
492
493void _Scheduler_priority_affinity_SMP_Update_priority(
494  const Scheduler_Control *scheduler,
495  Thread_Control          *thread,
496  Scheduler_Node          *node
497)
498{
499  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
500
501  _Scheduler_SMP_Update_priority(
502    context,
503    thread,
504    node,
505    _Scheduler_priority_SMP_Extract_from_ready,
506    _Scheduler_priority_SMP_Do_update,
507    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
508    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
509    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
510    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
511    _Scheduler_priority_affinity_SMP_Do_ask_for_help
512  );
513
514  /*
515   * Perform any thread migrations that are needed due to these changes.
516   */
517  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
518}
519
520bool _Scheduler_priority_affinity_SMP_Ask_for_help(
521  const Scheduler_Control *scheduler,
522  Thread_Control          *the_thread,
523  Scheduler_Node          *node
524)
525{
526  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
527
528  return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
529}
530
531void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
532  const Scheduler_Control *scheduler,
533  Thread_Control          *the_thread,
534  Scheduler_Node          *node
535)
536{
537  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
538
539  _Scheduler_SMP_Reconsider_help_request(
540    context,
541    the_thread,
542    node,
543    _Scheduler_priority_SMP_Extract_from_ready
544  );
545}
546
547void _Scheduler_priority_affinity_SMP_Withdraw_node(
548  const Scheduler_Control *scheduler,
549  Thread_Control          *the_thread,
550  Scheduler_Node          *node,
551  Thread_Scheduler_state   next_state
552)
553{
554  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
555
556  _Scheduler_SMP_Withdraw_node(
557    context,
558    the_thread,
559    node,
560    next_state,
561    _Scheduler_priority_SMP_Extract_from_ready,
562    _Scheduler_priority_affinity_SMP_Get_highest_ready,
563    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
564    _Scheduler_SMP_Allocate_processor_lazy
565  );
566}
567
568void _Scheduler_priority_affinity_SMP_Add_processor(
569  const Scheduler_Control *scheduler,
570  Thread_Control          *idle
571)
572{
573  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
574
575  _Scheduler_SMP_Add_processor(
576    context,
577    idle,
578    _Scheduler_priority_SMP_Has_ready,
579    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
580    _Scheduler_SMP_Do_nothing_register_idle
581  );
582}
583
584Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
585  const Scheduler_Control *scheduler,
586  Per_CPU_Control         *cpu
587)
588{
589  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
590
591  return _Scheduler_SMP_Remove_processor(
592    context,
593    cpu,
594    _Scheduler_priority_SMP_Extract_from_ready,
595    _Scheduler_priority_affinity_SMP_Enqueue_fifo
596  );
597}
598
599bool _Scheduler_priority_affinity_SMP_Set_affinity(
600  const Scheduler_Control *scheduler,
601  Thread_Control          *thread,
602  Scheduler_Node          *node_base,
603  const Processor_mask    *affinity
604)
605{
606  Scheduler_Context                    *context;
607  Scheduler_priority_affinity_SMP_Node *node;
608  States_Control                        current_state;
609  Processor_mask                        my_affinity;
610  cpu_set_t                             cpuset;
611
612  context = _Scheduler_Get_context( scheduler );
613  _Processor_mask_And( &my_affinity, &context->Processors, affinity );
614
615  if ( _Processor_mask_Count( &my_affinity ) == 0 ) {
616    return false;
617  }
618
619  _Processor_mask_To_cpu_set_t( &my_affinity, sizeof( cpuset ), &cpuset );
620  node = _Scheduler_priority_affinity_SMP_Node_downcast( node_base );
621
622  /*
623   * The old and new set are the same, there is no point in
624   * doing anything.
625   */
626  if ( CPU_EQUAL( &cpuset, node->Affinity.set ) )
627    return true;
628
629  current_state = thread->current_state;
630
631  if ( _States_Is_ready( current_state ) ) {
632    _Scheduler_priority_affinity_SMP_Block( scheduler, thread, &node->Base.Base.Base );
633  }
634
635  CPU_COPY( &cpuset, node->Affinity.set );
636
637  if ( _States_Is_ready( current_state ) ) {
638    /*
639     * FIXME: Do not ignore threads in need for help.
640     */
641    (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base );
642  }
643
644  return true;
645}
Note: See TracBrowser for help on using the repository browser.