source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ 238629f

4.115
Last change on this file since 238629f was 238629f, checked in by Joel Sherrill <joel.sherrill@…>, on 05/19/14 at 20:26:55

Add SMP Priority Scheduler with Affinity

This scheduler attempts to account for needed thread migrations caused
as a side-effect of a thread state, affinity, or priority change operation.

This scheduler has its own allocate_processor handler named
_Scheduler_SMP_Allocate_processor_exact() because
_Scheduler_SMP_Allocate_processor() attempts to prevent an executing
thread from moving off its current CPU without considering affinity.
Without this, the scheduler makes all the right decisions and then
they are discarded at the end.

==Side Effects of Adding This Scheduler==

Added Thread_Control * parameter to Scheduler_SMP_Get_highest_ready type
so methods looking for the highest ready thread can filter by the processor
on which the thread blocking resides. This allows affinity to be considered.
Simple Priority SMP and Priority SMP ignore this parameter.

+ Added get_lowest_scheduled argument to _Scheduler_SMP_Enqueue_ordered().

+ Added allocate_processor argument to the following methods:

  • _Scheduler_SMP_Block()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()

+ schedulerprioritysmpimpl.h is a new file with prototypes for methods

which were formerly static in schedulerprioritysmp.c but now need to
be public to be shared with this scheduler.

NOTE:

_Scheduler_SMP_Get_lowest_ready() appears to have a path which would
allow it to return a NULL. Previously, _Scheduler_SMP_Enqueue_ordered()
would have asserted on it. If it cannot return a NULL,
_Scheduler_SMP_Get_lowest_ready() should have an assertions.

  • Property mode set to 100644
File size: 15.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Node_get
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45/*
46 * This method returns the scheduler node for the specified thread
47 * as a scheduler specific type.
48 */
49static Scheduler_priority_affinity_SMP_Node *
50_Scheduler_priority_affinity_SMP_Node_get(
51  Thread_Control *thread
52)
53{
54  return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Node_get( thread );
55}
56
57/*
58 * This method initializes the scheduler control information for
59 * this scheduler instance.
60 */
61void _Scheduler_priority_affinity_SMP_Node_initialize(
62  const Scheduler_Control *scheduler,
63  Thread_Control          *thread
64)
65{
66  Scheduler_SMP_Node *smp_node = _Scheduler_SMP_Node_get( thread );
67
68  Scheduler_priority_affinity_SMP_Node *node =
69    _Scheduler_priority_affinity_SMP_Node_get( thread );
70
71  (scheduler);
72
73  /*
74   *  All we add is affinity information to the basic SMP node.
75   */
76  _Scheduler_SMP_Node_initialize( smp_node );
77
78  node->Affinity     = *_CPU_set_Default();
79  node->Affinity.set = &node->Affinity.preallocated;
80}
81
82/*
83 * This method is slightly different from _Scheduler_SMP_Allocate_processor()
84 * in that it does what it is asked to do. _Scheduler_SMP_Allocate_processor()
85 * attempts to prevent migrations but does not take into account affinity
86 */
87static inline void _Scheduler_SMP_Allocate_processor_exact(
88   Scheduler_SMP_Context *self,
89   Thread_Control        *scheduled,
90   Thread_Control        *victim
91)
92{
93   Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
94   Per_CPU_Control    *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
95   Per_CPU_Control    *cpu_of_victim = _Thread_Get_CPU( victim );
96   Per_CPU_Control    *cpu_self = _Per_CPU_Get();
97
98   _Scheduler_SMP_Node_change_state(
99     scheduled_node,
100     SCHEDULER_SMP_NODE_SCHEDULED
101   );
102
103   _Thread_Set_CPU( scheduled, cpu_of_victim );
104   _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, scheduled );
105}
106
107/*
108 * This method is unique to this scheduler because it takes into
109 * account affinity as it determines the highest ready thread.
110 * Since this is used to pick a new thread to replace the victim,
111 * the highest ready thread must have affinity such that it can
112 * be executed on the victim's processor.
113 */
114static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
115  Scheduler_Context *context,
116  Thread_Control    *victim
117)
118{
119  Scheduler_priority_SMP_Context *self =
120    _Scheduler_priority_SMP_Get_self( context );
121  Priority_Control                index;
122  Thread_Control                 *highest = NULL;
123  int                             victim_cpu;
124
125  /*
126   * This is done when we need to check if reevaluations are needed.
127   */
128  if ( victim == NULL ) {
129    return _Scheduler_priority_Ready_queue_first(
130        &self->Bit_map,
131        &self->Ready[ 0 ]
132      );
133  }
134
135  victim_cpu = _Per_CPU_Get_index( _Thread_Get_CPU( victim ) );
136
137  /**
138   * @todo The deterministic priority scheduler structure is optimized
139   * for insertion, extraction, and finding the highest priority
140   * thread. Scanning the list of ready threads is not a purpose
141   * for which it was optimized. There are optimizations to be
142   * made in this loop.
143   *
144   * + by checking the major bit, we could potentially skip entire
145   *   groups of 16.
146   *
147   * When using this scheduler as implemented, the application's
148   * choice of numeric priorities and their distribution can have
149   * an impact on performance.
150   */
151  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
152        index <= PRIORITY_MAXIMUM;
153        index++ )
154  {
155    Chain_Control   *chain =  &self->Ready[index];
156    Chain_Node      *chain_node;
157    for ( chain_node = _Chain_First( chain );
158          chain_node != _Chain_Immutable_tail( chain ) ;
159          chain_node = _Chain_Next( chain_node ) )
160    {
161      Thread_Control                       *thread;
162      Scheduler_priority_affinity_SMP_Node *node;
163
164      thread = (Thread_Control *) chain_node;
165      node = _Scheduler_priority_affinity_SMP_Node_get( thread );
166
167      /*
168       * Can this thread run on this CPU?
169       */
170      if ( CPU_ISSET( victim_cpu, node->Affinity.set ) ) {
171        highest = thread;
172        break;
173      }
174    }
175    if ( highest )
176      break;
177  }
178
179  _Assert( highest != NULL );
180
181  return highest;
182}
183
184/*
185 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
186 * but has the difference that is invokes this scheduler's
187 * get_highest_ready() support method.
188 */
189void _Scheduler_priority_affinity_SMP_Block(
190  const Scheduler_Control *scheduler,
191  Thread_Control *thread
192)
193{
194  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
195
196  _Scheduler_SMP_Block(
197    context,
198    thread,
199    _Scheduler_priority_SMP_Extract_from_ready,
200    _Scheduler_priority_affinity_SMP_Get_highest_ready,
201    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
202    _Scheduler_SMP_Allocate_processor_exact
203  );
204
205  /*
206   * Since this removed a single thread from the scheduled set
207   * and selected the most appropriate thread from the ready
208   * set to replace it, there should be no need for thread
209   * migrations.
210   */
211}
212
213/*
214 * This method is unique to this scheduler because it must take into
215 * account affinity as it searches for the lowest priority scheduled
216 * thread. It ignores those which cannot be replaced by the filter
217 * thread because the potential victim thread does not have affinity
218 * for that processor.
219 */
220static Thread_Control *_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
221  Scheduler_Context *context,
222  Thread_Control    *filter,
223  Chain_Node_order   order
224)
225{
226  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
227  Thread_Control  *lowest_scheduled = NULL;
228  Thread_Control  *thread = NULL;
229  Chain_Control   *scheduled = &self->Scheduled;
230  Scheduler_priority_affinity_SMP_Node *node =
231    _Scheduler_priority_affinity_SMP_Node_get( filter );
232
233  for ( thread =  (Thread_Control *) _Chain_Last( scheduled );
234        (Chain_Node *) thread != _Chain_Immutable_head( scheduled ) ;
235        thread = (Thread_Control *) _Chain_Previous( &thread->Object.Node ) ) {
236    int   cpu_index;
237
238    /*
239     * If we didn't find a thread which is of equal or lower importance
240     * than filter thread is, then we can't schedule the filter thread
241     * to execute.
242     */
243    if ( (*order)(&thread->Object.Node, &filter->Object.Node) )
244      break;
245
246    /* cpu_index is the processor number thread is executing on */
247    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
248
249    if ( CPU_ISSET( cpu_index, node->Affinity.set ) ) {
250      lowest_scheduled = thread;
251      break;
252    }
253
254  }
255
256  return lowest_scheduled;
257}
258
259/*
260 * This method is unique to this scheduler because it must pass
261 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
262 * _Scheduler_SMP_Enqueue_ordered.
263 */
264static void _Scheduler_priority_affinity_SMP_Enqueue_fifo(
265  Scheduler_Context *context,
266  Thread_Control *thread
267)
268{
269  _Scheduler_SMP_Enqueue_ordered(
270    context,
271    thread,
272    _Scheduler_simple_Insert_priority_fifo_order,
273    _Scheduler_priority_SMP_Insert_ready_fifo,
274    _Scheduler_SMP_Insert_scheduled_fifo,
275    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
276    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
277    _Scheduler_SMP_Allocate_processor_exact
278  );
279}
280
281/*
282 * This method is invoked at the end of certain scheduling operations
283 * to ensure that the highest priority ready thread cannot be scheduled
284 * to execute. When we schedule with affinity, there is the possibility
285 * that we need to migrate a thread to another core to ensure that the
286 * highest priority ready threads are in fact scheduled.
287 */
288static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
289  Scheduler_Context *context
290)
291{
292  Thread_Control        *lowest_scheduled;
293  Thread_Control        *highest_ready;
294  Scheduler_SMP_Node    *lowest_scheduled_node;
295  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
296
297  while (1) {
298    highest_ready =
299      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
300    lowest_scheduled = _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
301      context,
302      highest_ready,
303      _Scheduler_simple_Insert_priority_lifo_order
304    );
305
306    /*
307     * If we can't find a thread to displace from the scheduled set,
308     * then we have placed all the highest priority threads possible
309     * in the scheduled set.
310     *
311     * We found the absolute highest priority thread without
312     * considering affinity. But now we have to consider that thread's
313     * affinity as we look to place it.
314     */
315    if ( lowest_scheduled == NULL )
316      break;
317
318    /*
319     * But if we found a thread which is lower priority than one
320     * in the ready set, then we need to swap them out.
321     */
322    lowest_scheduled_node = _Scheduler_SMP_Node_get( lowest_scheduled );
323
324    _Scheduler_SMP_Node_change_state(
325      lowest_scheduled_node,
326      SCHEDULER_SMP_NODE_READY
327    );
328
329    _Scheduler_SMP_Allocate_processor_exact(
330      self,
331      highest_ready,
332      lowest_scheduled
333    );
334
335    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
336      context,
337      highest_ready
338    );
339
340    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
341      &self->Base,
342      lowest_scheduled
343    );
344  }
345}
346
347/*
348 * This is the public scheduler specific Unblock operation.
349 */
350void _Scheduler_priority_affinity_SMP_Unblock(
351  const Scheduler_Control *scheduler,
352  Thread_Control *thread
353)
354{
355  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
356
357  _Scheduler_SMP_Unblock(
358    context,
359    thread,
360    _Scheduler_priority_affinity_SMP_Enqueue_fifo
361  );
362
363  /*
364   * Perform any thread migrations that are needed due to these changes.
365   */
366  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
367}
368
369/*
370 *  This is unique to this scheduler because it passes scheduler specific
371 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
372 */
373static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
374  Scheduler_Context     *context,
375  Thread_Control        *thread,
376  Chain_Node_order       order,
377  Scheduler_SMP_Insert   insert_ready,
378  Scheduler_SMP_Insert   insert_scheduled
379)
380{
381  _Scheduler_SMP_Enqueue_ordered(
382    context,
383    thread,
384    order,
385    insert_ready,
386    insert_scheduled,
387    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
388    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
389    _Scheduler_SMP_Allocate_processor_exact
390  );
391}
392
393/*
394 *  This is unique to this scheduler because it is on the path
395 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
396 *  invokes a scheduler unique get_lowest_scheduled helper.
397 */
398static void _Scheduler_priority_affinity_SMP_Enqueue_lifo(
399  Scheduler_Context *context,
400  Thread_Control *thread
401)
402{
403  _Scheduler_priority_affinity_SMP_Enqueue_ordered(
404    context,
405    thread,
406    _Scheduler_simple_Insert_priority_lifo_order,
407    _Scheduler_priority_SMP_Insert_ready_lifo,
408    _Scheduler_SMP_Insert_scheduled_lifo
409  );
410}
411
412/*
413 * This method is unique to this scheduler because it must
414 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
415 * this scheduler's get_highest_ready() helper.
416 */
417static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
418  Scheduler_Context *context,
419  Thread_Control *thread,
420  Chain_Node_order order,
421  Scheduler_SMP_Insert insert_ready,
422  Scheduler_SMP_Insert insert_scheduled
423)
424{
425  _Scheduler_SMP_Enqueue_scheduled_ordered(
426    context,
427    thread,
428    order,
429    _Scheduler_priority_affinity_SMP_Get_highest_ready,
430    insert_ready,
431    insert_scheduled,
432    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
433    _Scheduler_SMP_Allocate_processor_exact
434  );
435}
436
437/*
438 *  This is unique to this scheduler because it is on the path
439 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
440 *  invokes a scheduler unique get_lowest_scheduled helper.
441 */
442static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
443  Scheduler_Context *context,
444  Thread_Control *thread
445)
446{
447  _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
448    context,
449    thread,
450    _Scheduler_simple_Insert_priority_lifo_order,
451    _Scheduler_priority_SMP_Insert_ready_lifo,
452    _Scheduler_SMP_Insert_scheduled_lifo
453  );
454}
455
456/*
457 *  This is unique to this scheduler because it is on the path
458 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
459 *  invokes a scheduler unique get_lowest_scheduled helper.
460 */
461static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
462  Scheduler_Context *context,
463  Thread_Control *thread
464)
465{
466  _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
467    context,
468    thread,
469    _Scheduler_simple_Insert_priority_fifo_order,
470    _Scheduler_priority_SMP_Insert_ready_fifo,
471    _Scheduler_SMP_Insert_scheduled_fifo
472  );
473}
474
475/*
476 * This is the public scheduler specific Change Priority operation.
477 */
478void _Scheduler_priority_affinity_SMP_Change_priority(
479  const Scheduler_Control *scheduler,
480  Thread_Control          *thread,
481  Priority_Control         new_priority,
482  bool                     prepend_it
483)
484{
485  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
486
487  _Scheduler_SMP_Change_priority(
488    context,
489    thread,
490    new_priority,
491    prepend_it,
492    _Scheduler_priority_SMP_Extract_from_ready,
493    _Scheduler_priority_SMP_Do_update,
494    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
495    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
496    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
497    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
498  );
499
500  /*
501   * Perform any thread migrations that are needed due to these changes.
502   */
503  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
504}
505
506/*
507 * This is the public scheduler specific Change Priority operation.
508 */
509bool _Scheduler_priority_affinity_SMP_Get_affinity(
510  const Scheduler_Control *scheduler,
511  Thread_Control          *thread,
512  size_t                   cpusetsize,
513  cpu_set_t               *cpuset
514)
515{
516  Scheduler_priority_affinity_SMP_Node *node =
517    _Scheduler_priority_affinity_SMP_Node_get(thread);
518
519  (void) scheduler;
520
521  if ( node->Affinity.setsize != cpusetsize ) {
522    return false;
523  }
524
525  CPU_COPY( cpuset, node->Affinity.set );
526  return true;
527}
528
529bool _Scheduler_priority_affinity_SMP_Set_affinity(
530  const Scheduler_Control *scheduler,
531  Thread_Control          *thread,
532  size_t                   cpusetsize,
533  const cpu_set_t         *cpuset
534)
535{
536  Scheduler_priority_affinity_SMP_Node *node =
537    _Scheduler_priority_affinity_SMP_Node_get(thread);
538
539  (void) scheduler;
540
541  /*
542   * Validate that the cpset meets basic requirements.
543   */
544  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
545    return false;
546  }
547
548  /*
549   * The old and new set are the same, there is no point in
550   * doing anything.
551   */
552  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
553    return true;
554
555  _Thread_Set_state( thread, STATES_MIGRATING );
556    CPU_COPY( node->Affinity.set, cpuset );
557  _Thread_Clear_state( thread, STATES_MIGRATING );
558
559  return true;
560}
Note: See TracBrowser for help on using the repository browser.