source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ 8568341

4.115
Last change on this file since 8568341 was 8568341, checked in by Sebastian Huber <sebastian.huber@…>, on 06/11/14 at 12:31:03

score: Need for help indicator for scheduler ops

Return a thread in need for help for the following scheduler operations

  • unblock,
  • change priority, and
  • yield.

A thread in need for help is a thread that encounters a scheduler state
change from scheduled to ready or a thread that cannot be scheduled in
an unblock operation. Such a thread can ask threads which depend on
resources owned by this thread for help.

  • Property mode set to 100644
File size: 17.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Thread_get_node
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
46  const Chain_Node *to_insert,
47  const Chain_Node *next
48)
49{
50  return next != NULL
51    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
52}
53
54static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
55  const Chain_Node *to_insert,
56  const Chain_Node *next
57)
58{
59  return next != NULL
60    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
61}
62
63/*
64 * This method returns the scheduler node for the specified thread
65 * as a scheduler specific type.
66 */
67static Scheduler_priority_affinity_SMP_Node *
68_Scheduler_priority_affinity_SMP_Thread_get_node(
69  Thread_Control *thread
70)
71{
72  return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Thread_get_node( thread );
73}
74
75static Scheduler_priority_affinity_SMP_Node *
76_Scheduler_priority_affinity_SMP_Node_downcast(
77  Scheduler_Node *node
78)
79{
80  return (Scheduler_priority_affinity_SMP_Node *) node;
81}
82
83/*
84 * This method initializes the scheduler control information for
85 * this scheduler instance.
86 */
87void _Scheduler_priority_affinity_SMP_Node_initialize(
88  const Scheduler_Control *scheduler,
89  Thread_Control          *thread
90)
91{
92  Scheduler_priority_affinity_SMP_Node *node =
93    _Scheduler_priority_affinity_SMP_Thread_get_node( thread );
94
95  (void) scheduler;
96
97  _Scheduler_SMP_Node_initialize( &node->Base.Base, thread );
98
99  /*
100   *  All we add is affinity information to the basic SMP node.
101   */
102  node->Affinity     = *_CPU_set_Default();
103  node->Affinity.set = &node->Affinity.preallocated;
104}
105
106/*
107 * This method is slightly different from
108 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
109 * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
110 * but does not take into account affinity
111 */
112static inline void _Scheduler_SMP_Allocate_processor_exact(
113  Scheduler_Context *context,
114  Thread_Control    *scheduled_thread,
115  Thread_Control    *victim_thread
116)
117{
118  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
119  Per_CPU_Control *cpu_self = _Per_CPU_Get();
120
121  (void) context;
122
123  _Thread_Set_CPU( scheduled_thread, victim_cpu );
124  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
125}
126
127/*
128 * This method is unique to this scheduler because it takes into
129 * account affinity as it determines the highest ready thread.
130 * Since this is used to pick a new thread to replace the victim,
131 * the highest ready thread must have affinity such that it can
132 * be executed on the victim's processor.
133 */
134static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
135  Scheduler_Context *context,
136  Scheduler_Node    *victim
137)
138{
139  Scheduler_priority_SMP_Context       *self =
140    _Scheduler_priority_SMP_Get_self( context );
141  Priority_Control                      index;
142  Scheduler_Node                       *highest = NULL;
143  Thread_Control                       *victim_thread;
144  uint32_t                              victim_cpu_index;
145  Scheduler_priority_affinity_SMP_Node *node;
146
147  /*
148   * This is done when we need to check if reevaluations are needed.
149   */
150  if ( victim == NULL ) {
151    node = (Scheduler_priority_affinity_SMP_Node *)
152      _Scheduler_priority_Ready_queue_first(
153        &self->Bit_map,
154        &self->Ready[ 0 ]
155      );
156
157    return &node->Base.Base.Base;
158  }
159
160  victim_thread = _Scheduler_Node_get_owner( victim );
161  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
162
163  /**
164   * @todo The deterministic priority scheduler structure is optimized
165   * for insertion, extraction, and finding the highest priority
166   * thread. Scanning the list of ready threads is not a purpose
167   * for which it was optimized. There are optimizations to be
168   * made in this loop.
169   *
170   * + by checking the major bit, we could potentially skip entire
171   *   groups of 16.
172   *
173   * When using this scheduler as implemented, the application's
174   * choice of numeric priorities and their distribution can have
175   * an impact on performance.
176   */
177  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
178        index <= PRIORITY_MAXIMUM;
179        index++ )
180  {
181    Chain_Control   *chain =  &self->Ready[index];
182    Chain_Node      *chain_node;
183    for ( chain_node = _Chain_First( chain );
184          chain_node != _Chain_Immutable_tail( chain ) ;
185          chain_node = _Chain_Next( chain_node ) )
186    {
187      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
188
189      /*
190       * Can this thread run on this CPU?
191       */
192      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
193        highest = &node->Base.Base.Base;
194        break;
195      }
196    }
197    if ( highest )
198      break;
199  }
200
201  _Assert( highest != NULL );
202
203  return highest;
204}
205
206/*
207 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
208 * but has the difference that is invokes this scheduler's
209 * get_highest_ready() support method.
210 */
211void _Scheduler_priority_affinity_SMP_Block(
212  const Scheduler_Control *scheduler,
213  Thread_Control *thread
214)
215{
216  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
217
218  _Scheduler_SMP_Block(
219    context,
220    thread,
221    _Scheduler_priority_SMP_Extract_from_ready,
222    _Scheduler_priority_affinity_SMP_Get_highest_ready,
223    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
224    _Scheduler_SMP_Allocate_processor_exact
225  );
226
227  /*
228   * Since this removed a single thread from the scheduled set
229   * and selected the most appropriate thread from the ready
230   * set to replace it, there should be no need for thread
231   * migrations.
232   */
233}
234
235/*
236 * This method is unique to this scheduler because it must take into
237 * account affinity as it searches for the lowest priority scheduled
238 * thread. It ignores those which cannot be replaced by the filter
239 * thread because the potential victim thread does not have affinity
240 * for that processor.
241 */
242static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
243  Scheduler_Context *context,
244  Scheduler_Node    *filter_base,
245  Chain_Node_order   order
246)
247{
248  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
249  Scheduler_Node *lowest_scheduled = NULL;
250  Chain_Control   *scheduled = &self->Scheduled;
251  Chain_Node      *chain_node;
252  Scheduler_priority_affinity_SMP_Node *filter =
253    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
254
255  for ( chain_node = _Chain_Last( scheduled );
256        chain_node != _Chain_Immutable_head( scheduled ) ;
257        chain_node = _Chain_Previous( chain_node ) ) {
258    Scheduler_priority_affinity_SMP_Node *node;
259    Thread_Control                       *thread;
260    uint32_t                              cpu_index;
261
262    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
263
264    /*
265     * If we didn't find a thread which is of equal or lower importance
266     * than filter thread is, then we can't schedule the filter thread
267     * to execute.
268     */
269    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
270      break;
271
272    /* cpu_index is the processor number thread is executing on */
273    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
274    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
275
276    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
277      lowest_scheduled = &node->Base.Base.Base;
278      break;
279    }
280
281  }
282
283  return lowest_scheduled;
284}
285
286/*
287 * This method is unique to this scheduler because it must pass
288 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
289 * _Scheduler_SMP_Enqueue_ordered.
290 */
291static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
292  Scheduler_Context *context,
293  Scheduler_Node    *node,
294  Thread_Control    *needs_help
295)
296{
297  return _Scheduler_SMP_Enqueue_ordered(
298    context,
299    node,
300    needs_help,
301    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
302    _Scheduler_priority_SMP_Insert_ready_fifo,
303    _Scheduler_SMP_Insert_scheduled_fifo,
304    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
305    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
306    _Scheduler_SMP_Allocate_processor_exact
307  );
308}
309
310/*
311 * This method is invoked at the end of certain scheduling operations
312 * to ensure that the highest priority ready thread cannot be scheduled
313 * to execute. When we schedule with affinity, there is the possibility
314 * that we need to migrate a thread to another core to ensure that the
315 * highest priority ready threads are in fact scheduled.
316 */
317static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
318  Scheduler_Context *context
319)
320{
321  Scheduler_Node        *lowest_scheduled;
322  Scheduler_Node        *highest_ready;
323
324  while (1) {
325    highest_ready =
326      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
327
328    lowest_scheduled =
329      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
330        context,
331        highest_ready,
332        _Scheduler_SMP_Insert_priority_lifo_order
333      );
334
335    /*
336     * If we can't find a thread to displace from the scheduled set,
337     * then we have placed all the highest priority threads possible
338     * in the scheduled set.
339     *
340     * We found the absolute highest priority thread without
341     * considering affinity. But now we have to consider that thread's
342     * affinity as we look to place it.
343     */
344    if ( lowest_scheduled == NULL )
345      break;
346
347    /*
348     * But if we found a thread which is lower priority than one
349     * in the ready set, then we need to swap them out.
350     */
351
352    _Scheduler_SMP_Node_change_state(
353      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
354      SCHEDULER_SMP_NODE_READY
355    );
356
357    _Scheduler_SMP_Allocate_processor(
358      context,
359      highest_ready,
360      lowest_scheduled,
361      _Scheduler_SMP_Allocate_processor_exact
362    );
363
364    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
365      context,
366      highest_ready
367    );
368
369    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
370      context,
371      lowest_scheduled
372    );
373  }
374}
375
376/*
377 * This is the public scheduler specific Unblock operation.
378 */
379Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
380  const Scheduler_Control *scheduler,
381  Thread_Control *thread
382)
383{
384  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
385  Thread_Control    *needs_help;
386
387  needs_help = _Scheduler_SMP_Unblock(
388    context,
389    thread,
390    _Scheduler_priority_affinity_SMP_Enqueue_fifo
391  );
392
393  /*
394   * Perform any thread migrations that are needed due to these changes.
395   */
396  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
397
398  return needs_help;
399}
400
401/*
402 *  This is unique to this scheduler because it passes scheduler specific
403 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
404 */
405static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
406  Scheduler_Context     *context,
407  Scheduler_Node        *node,
408  Thread_Control        *needs_help,
409  Chain_Node_order       order,
410  Scheduler_SMP_Insert   insert_ready,
411  Scheduler_SMP_Insert   insert_scheduled
412)
413{
414  return _Scheduler_SMP_Enqueue_ordered(
415    context,
416    node,
417    needs_help,
418    order,
419    insert_ready,
420    insert_scheduled,
421    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
422    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
423    _Scheduler_SMP_Allocate_processor_exact
424  );
425}
426
427/*
428 *  This is unique to this scheduler because it is on the path
429 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
430 *  invokes a scheduler unique get_lowest_scheduled helper.
431 */
432static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_lifo(
433  Scheduler_Context *context,
434  Scheduler_Node    *node,
435  Thread_Control    *needs_help
436)
437{
438  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
439    context,
440    node,
441    needs_help,
442    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
443    _Scheduler_priority_SMP_Insert_ready_lifo,
444    _Scheduler_SMP_Insert_scheduled_lifo
445  );
446}
447
448/*
449 * This method is unique to this scheduler because it must
450 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
451 * this scheduler's get_highest_ready() helper.
452 */
453static Thread_Control *
454_Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
455  Scheduler_Context    *context,
456  Scheduler_Node       *node,
457  Chain_Node_order      order,
458  Scheduler_SMP_Insert  insert_ready,
459  Scheduler_SMP_Insert  insert_scheduled
460)
461{
462  return _Scheduler_SMP_Enqueue_scheduled_ordered(
463    context,
464    node,
465    order,
466    _Scheduler_priority_affinity_SMP_Get_highest_ready,
467    insert_ready,
468    insert_scheduled,
469    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
470    _Scheduler_SMP_Allocate_processor_exact
471  );
472}
473
474/*
475 *  This is unique to this scheduler because it is on the path
476 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
477 *  invokes a scheduler unique get_lowest_scheduled helper.
478 */
479static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
480  Scheduler_Context *context,
481  Scheduler_Node    *node
482)
483{
484  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
485    context,
486    node,
487    _Scheduler_SMP_Insert_priority_lifo_order,
488    _Scheduler_priority_SMP_Insert_ready_lifo,
489    _Scheduler_SMP_Insert_scheduled_lifo
490  );
491}
492
493/*
494 *  This is unique to this scheduler because it is on the path
495 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
496 *  invokes a scheduler unique get_lowest_scheduled helper.
497 */
498static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
499  Scheduler_Context *context,
500  Scheduler_Node    *node
501)
502{
503  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
504    context,
505    node,
506    _Scheduler_SMP_Insert_priority_fifo_order,
507    _Scheduler_priority_SMP_Insert_ready_fifo,
508    _Scheduler_SMP_Insert_scheduled_fifo
509  );
510}
511
512/*
513 * This is the public scheduler specific Change Priority operation.
514 */
515Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
516  const Scheduler_Control *scheduler,
517  Thread_Control          *thread,
518  Priority_Control         new_priority,
519  bool                     prepend_it
520)
521{
522  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
523  Thread_Control    *displaced;
524
525  displaced = _Scheduler_SMP_Change_priority(
526    context,
527    thread,
528    new_priority,
529    prepend_it,
530    _Scheduler_priority_SMP_Extract_from_ready,
531    _Scheduler_priority_SMP_Do_update,
532    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
533    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
534    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
535    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
536  );
537
538  /*
539   * Perform any thread migrations that are needed due to these changes.
540   */
541  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
542
543  return displaced;
544}
545
546/*
547 * This is the public scheduler specific Change Priority operation.
548 */
549bool _Scheduler_priority_affinity_SMP_Get_affinity(
550  const Scheduler_Control *scheduler,
551  Thread_Control          *thread,
552  size_t                   cpusetsize,
553  cpu_set_t               *cpuset
554)
555{
556  Scheduler_priority_affinity_SMP_Node *node =
557    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
558
559  (void) scheduler;
560
561  if ( node->Affinity.setsize != cpusetsize ) {
562    return false;
563  }
564
565  CPU_COPY( cpuset, node->Affinity.set );
566  return true;
567}
568
569bool _Scheduler_priority_affinity_SMP_Set_affinity(
570  const Scheduler_Control *scheduler,
571  Thread_Control          *thread,
572  size_t                   cpusetsize,
573  const cpu_set_t         *cpuset
574)
575{
576  Scheduler_priority_affinity_SMP_Node *node =
577    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
578
579  (void) scheduler;
580
581  /*
582   * Validate that the cpset meets basic requirements.
583   */
584  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
585    return false;
586  }
587
588  /*
589   * The old and new set are the same, there is no point in
590   * doing anything.
591   */
592  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
593    return true;
594
595  _Thread_Set_state( thread, STATES_MIGRATING );
596    CPU_COPY( node->Affinity.set, cpuset );
597  _Thread_Clear_state( thread, STATES_MIGRATING );
598
599  return true;
600}
Note: See TracBrowser for help on using the repository browser.