source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ f0c8501

4.115
Last change on this file since f0c8501 was f0c8501, checked in by Sebastian Huber <sebastian.huber@…>, on 07/11/14 at 07:47:05

score: Scheduler helping proto for affinity sched

The priority affinity scheduler has the nice property that it can
produce more than one scheduled to ready state change in one operation.
Each scheduled to ready state change may lead to one thread in need for
help. Since it is currently only possible to return at most one thread
in need for help, we have a problem here.

A solution might be to move the check for migrations into the ask for
help mechanism.

  • Property mode set to 100644
File size: 18.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Thread_get_node
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
46  const Chain_Node *to_insert,
47  const Chain_Node *next
48)
49{
50  return next != NULL
51    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
52}
53
54static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
55  const Chain_Node *to_insert,
56  const Chain_Node *next
57)
58{
59  return next != NULL
60    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
61}
62
63static Scheduler_priority_affinity_SMP_Node *
64_Scheduler_priority_affinity_SMP_Thread_get_own_node(
65  Thread_Control *thread
66)
67{
68  return (Scheduler_priority_affinity_SMP_Node *)
69    _Scheduler_Thread_get_own_node( thread );
70}
71
72/*
73 * This method returns the scheduler node for the specified thread
74 * as a scheduler specific type.
75 */
76static Scheduler_priority_affinity_SMP_Node *
77_Scheduler_priority_affinity_SMP_Thread_get_node(
78  Thread_Control *thread
79)
80{
81  return (Scheduler_priority_affinity_SMP_Node *)
82    _Scheduler_Thread_get_node( thread );
83}
84
85static Scheduler_priority_affinity_SMP_Node *
86_Scheduler_priority_affinity_SMP_Node_downcast(
87  Scheduler_Node *node
88)
89{
90  return (Scheduler_priority_affinity_SMP_Node *) node;
91}
92
93/*
94 * This method initializes the scheduler control information for
95 * this scheduler instance.
96 */
97void _Scheduler_priority_affinity_SMP_Node_initialize(
98  const Scheduler_Control *scheduler,
99  Thread_Control          *thread
100)
101{
102  Scheduler_priority_affinity_SMP_Node *node =
103    _Scheduler_priority_affinity_SMP_Thread_get_own_node( thread );
104
105  (void) scheduler;
106
107  _Scheduler_SMP_Node_initialize( &node->Base.Base, thread );
108
109  /*
110   *  All we add is affinity information to the basic SMP node.
111   */
112  node->Affinity     = *_CPU_set_Default();
113  node->Affinity.set = &node->Affinity.preallocated;
114}
115
116/*
117 * This method is slightly different from
118 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
119 * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
120 * but does not take into account affinity.
121 */
122static inline void _Scheduler_SMP_Allocate_processor_exact(
123  Scheduler_Context *context,
124  Thread_Control    *scheduled_thread,
125  Thread_Control    *victim_thread
126)
127{
128  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
129  Per_CPU_Control *cpu_self = _Per_CPU_Get();
130
131  (void) context;
132
133  _Thread_Set_CPU( scheduled_thread, victim_cpu );
134  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
135}
136
137/*
138 * This method is unique to this scheduler because it takes into
139 * account affinity as it determines the highest ready thread.
140 * Since this is used to pick a new thread to replace the victim,
141 * the highest ready thread must have affinity such that it can
142 * be executed on the victim's processor.
143 */
144static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
145  Scheduler_Context *context,
146  Scheduler_Node    *victim
147)
148{
149  Scheduler_priority_SMP_Context       *self =
150    _Scheduler_priority_SMP_Get_self( context );
151  Priority_Control                      index;
152  Scheduler_Node                       *highest = NULL;
153  Thread_Control                       *victim_thread;
154  uint32_t                              victim_cpu_index;
155  Scheduler_priority_affinity_SMP_Node *node;
156
157  /*
158   * This is done when we need to check if reevaluations are needed.
159   */
160  if ( victim == NULL ) {
161    node = (Scheduler_priority_affinity_SMP_Node *)
162      _Scheduler_priority_Ready_queue_first(
163        &self->Bit_map,
164        &self->Ready[ 0 ]
165      );
166
167    return &node->Base.Base.Base;
168  }
169
170  victim_thread = _Scheduler_Node_get_owner( victim );
171  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
172
173  /**
174   * @todo The deterministic priority scheduler structure is optimized
175   * for insertion, extraction, and finding the highest priority
176   * thread. Scanning the list of ready threads is not a purpose
177   * for which it was optimized. There are optimizations to be
178   * made in this loop.
179   *
180   * + by checking the major bit, we could potentially skip entire
181   *   groups of 16.
182   *
183   * When using this scheduler as implemented, the application's
184   * choice of numeric priorities and their distribution can have
185   * an impact on performance.
186   */
187  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
188        index <= PRIORITY_MAXIMUM;
189        index++ )
190  {
191    Chain_Control   *chain =  &self->Ready[index];
192    Chain_Node      *chain_node;
193    for ( chain_node = _Chain_First( chain );
194          chain_node != _Chain_Immutable_tail( chain ) ;
195          chain_node = _Chain_Next( chain_node ) )
196    {
197      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
198
199      /*
200       * Can this thread run on this CPU?
201       */
202      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
203        highest = &node->Base.Base.Base;
204        break;
205      }
206    }
207    if ( highest )
208      break;
209  }
210
211  _Assert( highest != NULL );
212
213  return highest;
214}
215
216/*
217 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
218 * but has the difference that is invokes this scheduler's
219 * get_highest_ready() support method.
220 */
221void _Scheduler_priority_affinity_SMP_Block(
222  const Scheduler_Control *scheduler,
223  Thread_Control *thread
224)
225{
226  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
227
228  _Scheduler_SMP_Block(
229    context,
230    thread,
231    _Scheduler_priority_SMP_Extract_from_ready,
232    _Scheduler_priority_affinity_SMP_Get_highest_ready,
233    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
234    _Scheduler_SMP_Allocate_processor_exact
235  );
236
237  /*
238   * Since this removed a single thread from the scheduled set
239   * and selected the most appropriate thread from the ready
240   * set to replace it, there should be no need for thread
241   * migrations.
242   */
243}
244
245/*
246 * This method is unique to this scheduler because it must take into
247 * account affinity as it searches for the lowest priority scheduled
248 * thread. It ignores those which cannot be replaced by the filter
249 * thread because the potential victim thread does not have affinity
250 * for that processor.
251 */
252static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
253  Scheduler_Context *context,
254  Scheduler_Node    *filter_base,
255  Chain_Node_order   order
256)
257{
258  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
259  Scheduler_Node *lowest_scheduled = NULL;
260  Chain_Control   *scheduled = &self->Scheduled;
261  Chain_Node      *chain_node;
262  Scheduler_priority_affinity_SMP_Node *filter =
263    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
264
265  for ( chain_node = _Chain_Last( scheduled );
266        chain_node != _Chain_Immutable_head( scheduled ) ;
267        chain_node = _Chain_Previous( chain_node ) ) {
268    Scheduler_priority_affinity_SMP_Node *node;
269    Thread_Control                       *thread;
270    uint32_t                              cpu_index;
271
272    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
273
274    /*
275     * If we didn't find a thread which is of equal or lower importance
276     * than filter thread is, then we can't schedule the filter thread
277     * to execute.
278     */
279    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
280      break;
281
282    /* cpu_index is the processor number thread is executing on */
283    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
284    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
285
286    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
287      lowest_scheduled = &node->Base.Base.Base;
288      break;
289    }
290
291  }
292
293  return lowest_scheduled;
294}
295
296/*
297 * This method is unique to this scheduler because it must pass
298 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
299 * _Scheduler_SMP_Enqueue_ordered.
300 */
301static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
302  Scheduler_Context *context,
303  Scheduler_Node    *node,
304  Thread_Control    *needs_help
305)
306{
307  return _Scheduler_SMP_Enqueue_ordered(
308    context,
309    node,
310    needs_help,
311    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
312    _Scheduler_priority_SMP_Insert_ready_fifo,
313    _Scheduler_SMP_Insert_scheduled_fifo,
314    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
315    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
316    _Scheduler_SMP_Allocate_processor_exact
317  );
318}
319
320/*
321 * This method is invoked at the end of certain scheduling operations
322 * to ensure that the highest priority ready thread cannot be scheduled
323 * to execute. When we schedule with affinity, there is the possibility
324 * that we need to migrate a thread to another core to ensure that the
325 * highest priority ready threads are in fact scheduled.
326 */
327static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
328  Scheduler_Context *context
329)
330{
331  Scheduler_Node        *lowest_scheduled;
332  Scheduler_Node        *highest_ready;
333
334  while (1) {
335    highest_ready =
336      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
337
338    lowest_scheduled =
339      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
340        context,
341        highest_ready,
342        _Scheduler_SMP_Insert_priority_lifo_order
343      );
344
345    /*
346     * If we can't find a thread to displace from the scheduled set,
347     * then we have placed all the highest priority threads possible
348     * in the scheduled set.
349     *
350     * We found the absolute highest priority thread without
351     * considering affinity. But now we have to consider that thread's
352     * affinity as we look to place it.
353     */
354    if ( lowest_scheduled == NULL )
355      break;
356
357    /*
358     * FIXME: Do not consider threads using the scheduler helping protocol
359     * since this could produce more than one thread in need for help in one
360     * operation which is currently not possible.
361     */
362    if ( lowest_scheduled->help_state != SCHEDULER_HELP_YOURSELF )
363      break;
364
365    /*
366     * But if we found a thread which is lower priority than one
367     * in the ready set, then we need to swap them out.
368     */
369
370    _Scheduler_SMP_Node_change_state(
371      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
372      SCHEDULER_SMP_NODE_READY
373    );
374    _Scheduler_Thread_change_state(
375      _Scheduler_Node_get_user( lowest_scheduled ),
376      THREAD_SCHEDULER_READY
377    );
378
379    _Scheduler_SMP_Allocate_processor(
380      context,
381      highest_ready,
382      lowest_scheduled,
383      _Scheduler_SMP_Allocate_processor_exact
384    );
385
386    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
387      context,
388      highest_ready
389    );
390
391    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
392      context,
393      lowest_scheduled
394    );
395  }
396}
397
398/*
399 * This is the public scheduler specific Unblock operation.
400 */
401Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
402  const Scheduler_Control *scheduler,
403  Thread_Control *thread
404)
405{
406  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
407  Thread_Control    *needs_help;
408
409  needs_help = _Scheduler_SMP_Unblock(
410    context,
411    thread,
412    _Scheduler_priority_affinity_SMP_Enqueue_fifo
413  );
414
415  /*
416   * Perform any thread migrations that are needed due to these changes.
417   */
418  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
419
420  return needs_help;
421}
422
423/*
424 *  This is unique to this scheduler because it passes scheduler specific
425 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
426 */
427static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
428  Scheduler_Context     *context,
429  Scheduler_Node        *node,
430  Thread_Control        *needs_help,
431  Chain_Node_order       order,
432  Scheduler_SMP_Insert   insert_ready,
433  Scheduler_SMP_Insert   insert_scheduled
434)
435{
436  return _Scheduler_SMP_Enqueue_ordered(
437    context,
438    node,
439    needs_help,
440    order,
441    insert_ready,
442    insert_scheduled,
443    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
444    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
445    _Scheduler_SMP_Allocate_processor_exact
446  );
447}
448
449/*
450 *  This is unique to this scheduler because it is on the path
451 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
452 *  invokes a scheduler unique get_lowest_scheduled helper.
453 */
454static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_lifo(
455  Scheduler_Context *context,
456  Scheduler_Node    *node,
457  Thread_Control    *needs_help
458)
459{
460  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
461    context,
462    node,
463    needs_help,
464    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
465    _Scheduler_priority_SMP_Insert_ready_lifo,
466    _Scheduler_SMP_Insert_scheduled_lifo
467  );
468}
469
470/*
471 * This method is unique to this scheduler because it must
472 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
473 * this scheduler's get_highest_ready() helper.
474 */
475static Thread_Control *
476_Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
477  Scheduler_Context    *context,
478  Scheduler_Node       *node,
479  Chain_Node_order      order,
480  Scheduler_SMP_Insert  insert_ready,
481  Scheduler_SMP_Insert  insert_scheduled
482)
483{
484  return _Scheduler_SMP_Enqueue_scheduled_ordered(
485    context,
486    node,
487    order,
488    _Scheduler_priority_SMP_Extract_from_ready,
489    _Scheduler_priority_affinity_SMP_Get_highest_ready,
490    insert_ready,
491    insert_scheduled,
492    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
493    _Scheduler_SMP_Allocate_processor_exact
494  );
495}
496
497/*
498 *  This is unique to this scheduler because it is on the path
499 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
500 *  invokes a scheduler unique get_lowest_scheduled helper.
501 */
502static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
503  Scheduler_Context *context,
504  Scheduler_Node    *node
505)
506{
507  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
508    context,
509    node,
510    _Scheduler_SMP_Insert_priority_lifo_order,
511    _Scheduler_priority_SMP_Insert_ready_lifo,
512    _Scheduler_SMP_Insert_scheduled_lifo
513  );
514}
515
516/*
517 *  This is unique to this scheduler because it is on the path
518 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
519 *  invokes a scheduler unique get_lowest_scheduled helper.
520 */
521static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
522  Scheduler_Context *context,
523  Scheduler_Node    *node
524)
525{
526  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
527    context,
528    node,
529    _Scheduler_SMP_Insert_priority_fifo_order,
530    _Scheduler_priority_SMP_Insert_ready_fifo,
531    _Scheduler_SMP_Insert_scheduled_fifo
532  );
533}
534
535/*
536 * This is the public scheduler specific Change Priority operation.
537 */
538Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
539  const Scheduler_Control *scheduler,
540  Thread_Control          *thread,
541  Priority_Control         new_priority,
542  bool                     prepend_it
543)
544{
545  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
546  Thread_Control    *displaced;
547
548  displaced = _Scheduler_SMP_Change_priority(
549    context,
550    thread,
551    new_priority,
552    prepend_it,
553    _Scheduler_priority_SMP_Extract_from_ready,
554    _Scheduler_priority_SMP_Do_update,
555    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
556    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
557    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
558    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
559  );
560
561  /*
562   * Perform any thread migrations that are needed due to these changes.
563   */
564  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
565
566  return displaced;
567}
568
569Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
570  const Scheduler_Control *scheduler,
571  Thread_Control          *offers_help,
572  Thread_Control          *needs_help
573)
574{
575  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
576
577  needs_help = _Scheduler_SMP_Ask_for_help(
578    context,
579    offers_help,
580    needs_help,
581    _Scheduler_priority_affinity_SMP_Enqueue_fifo
582  );
583
584  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
585
586  return needs_help;
587}
588
589/*
590 * This is the public scheduler specific Change Priority operation.
591 */
592bool _Scheduler_priority_affinity_SMP_Get_affinity(
593  const Scheduler_Control *scheduler,
594  Thread_Control          *thread,
595  size_t                   cpusetsize,
596  cpu_set_t               *cpuset
597)
598{
599  Scheduler_priority_affinity_SMP_Node *node =
600    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
601
602  (void) scheduler;
603
604  if ( node->Affinity.setsize != cpusetsize ) {
605    return false;
606  }
607
608  CPU_COPY( cpuset, node->Affinity.set );
609  return true;
610}
611
612bool _Scheduler_priority_affinity_SMP_Set_affinity(
613  const Scheduler_Control *scheduler,
614  Thread_Control          *thread,
615  size_t                   cpusetsize,
616  const cpu_set_t         *cpuset
617)
618{
619  Scheduler_priority_affinity_SMP_Node *node =
620    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
621
622  (void) scheduler;
623
624  /*
625   * Validate that the cpset meets basic requirements.
626   */
627  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
628    return false;
629  }
630
631  /*
632   * The old and new set are the same, there is no point in
633   * doing anything.
634   */
635  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
636    return true;
637
638  _Thread_Set_state( thread, STATES_MIGRATING );
639    CPU_COPY( node->Affinity.set, cpuset );
640  _Thread_Clear_state( thread, STATES_MIGRATING );
641
642  return true;
643}
Note: See TracBrowser for help on using the repository browser.