source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ 9bfad8c

5
Last change on this file since 9bfad8c was 9bfad8c, checked in by Sebastian Huber <sebastian.huber@…>, on 06/08/16 at 20:22:46

score: Add thread priority to scheduler nodes

The thread priority is manifest in two independent areas. One area is
the user visible thread priority along with a potential thread queue.
The other is the scheduler. Currently, a thread priority update via
_Thread_Change_priority() first updates the user visble thread priority
and the thread queue, then the scheduler is notified if necessary. The
priority is passed to the scheduler via a local variable. A generation
counter ensures that the scheduler discards out-of-date priorities.

This use of a local variable ties the update in these two areas close
together. For later enhancements and the OMIP locking protocol
implementation we need more flexibility. Add a thread priority
information block to Scheduler_Node and synchronize priority value
updates via a sequence lock on SMP configurations.

Update #2556.

  • Property mode set to 100644
File size: 18.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Thread_get_node
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
46  const Chain_Node *to_insert,
47  const Chain_Node *next
48)
49{
50  return next != NULL
51    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
52}
53
54static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
55  const Chain_Node *to_insert,
56  const Chain_Node *next
57)
58{
59  return next != NULL
60    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
61}
62
63static Scheduler_priority_affinity_SMP_Node *
64_Scheduler_priority_affinity_SMP_Thread_get_own_node(
65  Thread_Control *thread
66)
67{
68  return (Scheduler_priority_affinity_SMP_Node *)
69    _Scheduler_Thread_get_own_node( thread );
70}
71
72/*
73 * This method returns the scheduler node for the specified thread
74 * as a scheduler specific type.
75 */
76static Scheduler_priority_affinity_SMP_Node *
77_Scheduler_priority_affinity_SMP_Thread_get_node(
78  Thread_Control *thread
79)
80{
81  return (Scheduler_priority_affinity_SMP_Node *)
82    _Scheduler_Thread_get_node( thread );
83}
84
85static Scheduler_priority_affinity_SMP_Node *
86_Scheduler_priority_affinity_SMP_Node_downcast(
87  Scheduler_Node *node
88)
89{
90  return (Scheduler_priority_affinity_SMP_Node *) node;
91}
92
93/*
94 * This method initializes the scheduler control information for
95 * this scheduler instance.
96 */
97void _Scheduler_priority_affinity_SMP_Node_initialize(
98  const Scheduler_Control *scheduler,
99  Thread_Control          *the_thread,
100  Priority_Control         priority
101)
102{
103  Scheduler_priority_affinity_SMP_Node *node =
104    _Scheduler_priority_affinity_SMP_Thread_get_own_node( the_thread );
105
106  _Scheduler_priority_SMP_Node_initialize( scheduler, the_thread, priority );
107
108  /*
109   *  All we add is affinity information to the basic SMP node.
110   */
111  node->Affinity     = *_CPU_set_Default();
112  node->Affinity.set = &node->Affinity.preallocated;
113}
114
115/*
116 * This method is slightly different from
117 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
118 * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
119 * but does not take into account affinity.
120 */
121static inline void _Scheduler_SMP_Allocate_processor_exact(
122  Scheduler_Context *context,
123  Thread_Control    *scheduled_thread,
124  Thread_Control    *victim_thread
125)
126{
127  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
128  Per_CPU_Control *cpu_self = _Per_CPU_Get();
129
130  (void) context;
131
132  _Thread_Set_CPU( scheduled_thread, victim_cpu );
133  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
134}
135
136/*
137 * This method is unique to this scheduler because it takes into
138 * account affinity as it determines the highest ready thread.
139 * Since this is used to pick a new thread to replace the victim,
140 * the highest ready thread must have affinity such that it can
141 * be executed on the victim's processor.
142 */
143static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
144  Scheduler_Context *context,
145  Scheduler_Node    *victim
146)
147{
148  Scheduler_priority_SMP_Context       *self =
149    _Scheduler_priority_SMP_Get_self( context );
150  Priority_Control                      index;
151  Scheduler_Node                       *highest = NULL;
152  Thread_Control                       *victim_thread;
153  uint32_t                              victim_cpu_index;
154  Scheduler_priority_affinity_SMP_Node *node;
155
156  /*
157   * This is done when we need to check if reevaluations are needed.
158   */
159  if ( victim == NULL ) {
160    node = (Scheduler_priority_affinity_SMP_Node *)
161      _Scheduler_priority_Ready_queue_first(
162        &self->Bit_map,
163        &self->Ready[ 0 ]
164      );
165
166    return &node->Base.Base.Base;
167  }
168
169  victim_thread = _Scheduler_Node_get_owner( victim );
170  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
171
172  /**
173   * @todo The deterministic priority scheduler structure is optimized
174   * for insertion, extraction, and finding the highest priority
175   * thread. Scanning the list of ready threads is not a purpose
176   * for which it was optimized. There are optimizations to be
177   * made in this loop.
178   *
179   * + by checking the major bit, we could potentially skip entire
180   *   groups of 16.
181   *
182   * When using this scheduler as implemented, the application's
183   * choice of numeric priorities and their distribution can have
184   * an impact on performance.
185   */
186  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
187        index <= PRIORITY_MAXIMUM;
188        index++ )
189  {
190    Chain_Control   *chain =  &self->Ready[index];
191    Chain_Node      *chain_node;
192    for ( chain_node = _Chain_First( chain );
193          chain_node != _Chain_Immutable_tail( chain ) ;
194          chain_node = _Chain_Next( chain_node ) )
195    {
196      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
197
198      /*
199       * Can this thread run on this CPU?
200       */
201      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
202        highest = &node->Base.Base.Base;
203        break;
204      }
205    }
206    if ( highest )
207      break;
208  }
209
210  _Assert( highest != NULL );
211
212  return highest;
213}
214
215/*
216 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
217 * but has the difference that is invokes this scheduler's
218 * get_highest_ready() support method.
219 */
220void _Scheduler_priority_affinity_SMP_Block(
221  const Scheduler_Control *scheduler,
222  Thread_Control *thread
223)
224{
225  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
226
227  _Scheduler_SMP_Block(
228    context,
229    thread,
230    _Scheduler_priority_SMP_Extract_from_ready,
231    _Scheduler_priority_affinity_SMP_Get_highest_ready,
232    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
233    _Scheduler_SMP_Allocate_processor_exact
234  );
235
236  /*
237   * Since this removed a single thread from the scheduled set
238   * and selected the most appropriate thread from the ready
239   * set to replace it, there should be no need for thread
240   * migrations.
241   */
242}
243
244/*
245 * This method is unique to this scheduler because it must take into
246 * account affinity as it searches for the lowest priority scheduled
247 * thread. It ignores those which cannot be replaced by the filter
248 * thread because the potential victim thread does not have affinity
249 * for that processor.
250 */
251static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
252  Scheduler_Context *context,
253  Scheduler_Node    *filter_base,
254  Chain_Node_order   order
255)
256{
257  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
258  Scheduler_Node *lowest_scheduled = NULL;
259  Chain_Control   *scheduled = &self->Scheduled;
260  Chain_Node      *chain_node;
261  Scheduler_priority_affinity_SMP_Node *filter =
262    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
263
264  for ( chain_node = _Chain_Last( scheduled );
265        chain_node != _Chain_Immutable_head( scheduled ) ;
266        chain_node = _Chain_Previous( chain_node ) ) {
267    Scheduler_priority_affinity_SMP_Node *node;
268    Thread_Control                       *thread;
269    uint32_t                              cpu_index;
270
271    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
272
273    /*
274     * If we didn't find a thread which is of equal or lower importance
275     * than filter thread is, then we can't schedule the filter thread
276     * to execute.
277     */
278    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
279      break;
280
281    /* cpu_index is the processor number thread is executing on */
282    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
283    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
284
285    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
286      lowest_scheduled = &node->Base.Base.Base;
287      break;
288    }
289
290  }
291
292  return lowest_scheduled;
293}
294
295/*
296 * This method is unique to this scheduler because it must pass
297 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
298 * _Scheduler_SMP_Enqueue_ordered.
299 */
300static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
301  Scheduler_Context *context,
302  Scheduler_Node    *node,
303  Thread_Control    *needs_help
304)
305{
306  return _Scheduler_SMP_Enqueue_ordered(
307    context,
308    node,
309    needs_help,
310    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
311    _Scheduler_priority_SMP_Insert_ready_fifo,
312    _Scheduler_SMP_Insert_scheduled_fifo,
313    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
314    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
315    _Scheduler_SMP_Allocate_processor_exact
316  );
317}
318
319/*
320 * This method is invoked at the end of certain scheduling operations
321 * to ensure that the highest priority ready thread cannot be scheduled
322 * to execute. When we schedule with affinity, there is the possibility
323 * that we need to migrate a thread to another core to ensure that the
324 * highest priority ready threads are in fact scheduled.
325 */
326static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
327  Scheduler_Context *context
328)
329{
330  Scheduler_Node        *lowest_scheduled;
331  Scheduler_Node        *highest_ready;
332
333  while (1) {
334    highest_ready =
335      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
336
337    lowest_scheduled =
338      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
339        context,
340        highest_ready,
341        _Scheduler_SMP_Insert_priority_lifo_order
342      );
343
344    /*
345     * If we can't find a thread to displace from the scheduled set,
346     * then we have placed all the highest priority threads possible
347     * in the scheduled set.
348     *
349     * We found the absolute highest priority thread without
350     * considering affinity. But now we have to consider that thread's
351     * affinity as we look to place it.
352     */
353    if ( lowest_scheduled == NULL )
354      break;
355
356    /*
357     * FIXME: Do not consider threads using the scheduler helping protocol
358     * since this could produce more than one thread in need for help in one
359     * operation which is currently not possible.
360     */
361    if ( lowest_scheduled->help_state != SCHEDULER_HELP_YOURSELF )
362      break;
363
364    /*
365     * But if we found a thread which is lower priority than one
366     * in the ready set, then we need to swap them out.
367     */
368
369    _Scheduler_SMP_Node_change_state(
370      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
371      SCHEDULER_SMP_NODE_READY
372    );
373    _Scheduler_Thread_change_state(
374      _Scheduler_Node_get_user( lowest_scheduled ),
375      THREAD_SCHEDULER_READY
376    );
377
378    _Scheduler_SMP_Allocate_processor(
379      context,
380      highest_ready,
381      lowest_scheduled,
382      _Scheduler_SMP_Allocate_processor_exact
383    );
384
385    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
386      context,
387      highest_ready
388    );
389
390    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
391      context,
392      lowest_scheduled
393    );
394  }
395}
396
397/*
398 * This is the public scheduler specific Unblock operation.
399 */
400Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
401  const Scheduler_Control *scheduler,
402  Thread_Control *thread
403)
404{
405  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
406  Thread_Control    *needs_help;
407
408  needs_help = _Scheduler_SMP_Unblock(
409    context,
410    thread,
411    _Scheduler_priority_SMP_Do_update,
412    _Scheduler_priority_affinity_SMP_Enqueue_fifo
413  );
414
415  /*
416   * Perform any thread migrations that are needed due to these changes.
417   */
418  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
419
420  return needs_help;
421}
422
423/*
424 *  This is unique to this scheduler because it passes scheduler specific
425 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
426 */
427static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
428  Scheduler_Context     *context,
429  Scheduler_Node        *node,
430  Thread_Control        *needs_help,
431  Chain_Node_order       order,
432  Scheduler_SMP_Insert   insert_ready,
433  Scheduler_SMP_Insert   insert_scheduled
434)
435{
436  return _Scheduler_SMP_Enqueue_ordered(
437    context,
438    node,
439    needs_help,
440    order,
441    insert_ready,
442    insert_scheduled,
443    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
444    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
445    _Scheduler_SMP_Allocate_processor_exact
446  );
447}
448
449/*
450 *  This is unique to this scheduler because it is on the path
451 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
452 *  invokes a scheduler unique get_lowest_scheduled helper.
453 */
454static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_lifo(
455  Scheduler_Context *context,
456  Scheduler_Node    *node,
457  Thread_Control    *needs_help
458)
459{
460  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
461    context,
462    node,
463    needs_help,
464    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
465    _Scheduler_priority_SMP_Insert_ready_lifo,
466    _Scheduler_SMP_Insert_scheduled_lifo
467  );
468}
469
470/*
471 * This method is unique to this scheduler because it must
472 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
473 * this scheduler's get_highest_ready() helper.
474 */
475static Thread_Control *
476_Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
477  Scheduler_Context    *context,
478  Scheduler_Node       *node,
479  Chain_Node_order      order,
480  Scheduler_SMP_Insert  insert_ready,
481  Scheduler_SMP_Insert  insert_scheduled
482)
483{
484  return _Scheduler_SMP_Enqueue_scheduled_ordered(
485    context,
486    node,
487    order,
488    _Scheduler_priority_SMP_Extract_from_ready,
489    _Scheduler_priority_affinity_SMP_Get_highest_ready,
490    insert_ready,
491    insert_scheduled,
492    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
493    _Scheduler_SMP_Allocate_processor_exact
494  );
495}
496
497/*
498 *  This is unique to this scheduler because it is on the path
499 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
500 *  invokes a scheduler unique get_lowest_scheduled helper.
501 */
502static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
503  Scheduler_Context *context,
504  Scheduler_Node    *node
505)
506{
507  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
508    context,
509    node,
510    _Scheduler_SMP_Insert_priority_lifo_order,
511    _Scheduler_priority_SMP_Insert_ready_lifo,
512    _Scheduler_SMP_Insert_scheduled_lifo
513  );
514}
515
516/*
517 *  This is unique to this scheduler because it is on the path
518 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
519 *  invokes a scheduler unique get_lowest_scheduled helper.
520 */
521static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
522  Scheduler_Context *context,
523  Scheduler_Node    *node
524)
525{
526  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
527    context,
528    node,
529    _Scheduler_SMP_Insert_priority_fifo_order,
530    _Scheduler_priority_SMP_Insert_ready_fifo,
531    _Scheduler_SMP_Insert_scheduled_fifo
532  );
533}
534
535/*
536 * This is the public scheduler specific Change Priority operation.
537 */
538Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority(
539  const Scheduler_Control *scheduler,
540  Thread_Control          *thread
541)
542{
543  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
544  Thread_Control    *displaced;
545
546  displaced = _Scheduler_SMP_Update_priority(
547    context,
548    thread,
549    _Scheduler_priority_SMP_Extract_from_ready,
550    _Scheduler_priority_SMP_Do_update,
551    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
552    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
553    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
554    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
555  );
556
557  /*
558   * Perform any thread migrations that are needed due to these changes.
559   */
560  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
561
562  return displaced;
563}
564
565Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
566  const Scheduler_Control *scheduler,
567  Thread_Control          *offers_help,
568  Thread_Control          *needs_help
569)
570{
571  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
572
573  needs_help = _Scheduler_SMP_Ask_for_help(
574    context,
575    offers_help,
576    needs_help,
577    _Scheduler_priority_affinity_SMP_Enqueue_fifo
578  );
579
580  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
581
582  return needs_help;
583}
584
585/*
586 * This is the public scheduler specific Change Priority operation.
587 */
588bool _Scheduler_priority_affinity_SMP_Get_affinity(
589  const Scheduler_Control *scheduler,
590  Thread_Control          *thread,
591  size_t                   cpusetsize,
592  cpu_set_t               *cpuset
593)
594{
595  Scheduler_priority_affinity_SMP_Node *node =
596    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
597
598  (void) scheduler;
599
600  if ( node->Affinity.setsize != cpusetsize ) {
601    return false;
602  }
603
604  CPU_COPY( cpuset, node->Affinity.set );
605  return true;
606}
607
608bool _Scheduler_priority_affinity_SMP_Set_affinity(
609  const Scheduler_Control *scheduler,
610  Thread_Control          *thread,
611  size_t                   cpusetsize,
612  const cpu_set_t         *cpuset
613)
614{
615  Scheduler_priority_affinity_SMP_Node *node;
616  States_Control                        current_state;
617
618  /*
619   * Validate that the cpset meets basic requirements.
620   */
621  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
622    return false;
623  }
624
625  node = _Scheduler_priority_affinity_SMP_Thread_get_node( thread );
626
627  /*
628   * The old and new set are the same, there is no point in
629   * doing anything.
630   */
631  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
632    return true;
633
634  current_state = thread->current_state;
635
636  if ( _States_Is_ready( current_state ) ) {
637    _Scheduler_priority_affinity_SMP_Block( scheduler, thread );
638  }
639
640  CPU_COPY( node->Affinity.set, cpuset );
641
642  if ( _States_Is_ready( current_state ) ) {
643    /*
644     * FIXME: Do not ignore threads in need for help.
645     */
646    (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread );
647  }
648
649  return true;
650}
Note: See TracBrowser for help on using the repository browser.