source: rtems/cpukit/score/src/schedulerpriorityaffinitysmp.c @ 5c3d250

4.115
Last change on this file since 5c3d250 was 5c3d250, checked in by Sebastian Huber <sebastian.huber@…>, on 07/04/14 at 12:34:23

score: Implement scheduler helping protocol

The following scheduler operations return a thread in need for help

  • unblock,
  • change priority, and
  • yield.

A thread in need for help is a thread that encounters a scheduler state
change from scheduled to ready or a thread that cannot be scheduled in
an unblock operation. Such a thread can ask threads which depend on
resources owned by this thread for help.

Add a new ask for help scheduler operation. This operation is used by
_Scheduler_Ask_for_help() to help threads in need for help returned by
the operations mentioned above. This operation is also used by
_Scheduler_Thread_change_resource_root() in case the root of a resource
sub-tree changes. A use case is the ownership change of a resource.

In case it is not possible to schedule a thread in need for help, then
the corresponding scheduler node will be placed into the set of ready
scheduler nodes of the scheduler instance. Once a state change from
ready to scheduled happens for this scheduler node it may be used to
schedule the thread in need for help.

  • Property mode set to 100644
File size: 18.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Deterministic Priority Affinity SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerPriorityAffinitySMP
7 */
8
9/*
10 *  COPYRIGHT (c) 2014.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19  #include "config.h"
20#endif
21
22#include <rtems/score/schedulerpriorityaffinitysmp.h>
23#include <rtems/score/schedulerpriorityimpl.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/schedulerprioritysmpimpl.h>
26#include <rtems/score/wkspace.h>
27#include <rtems/score/cpusetimpl.h>
28
29#include <rtems/score/priority.h>
30
31/*
32 * The following methods which initially were static in schedulerprioritysmp.c
33 * are shared with this scheduler. They are now public so they can be shared.
34 *
35 *  + _Scheduler_priority_SMP_Get_self
36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
38 *  + _Scheduler_priority_SMP_Thread_get_node
39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
41 *  + _Scheduler_priority_SMP_Extract_from_ready
42 *  + _Scheduler_priority_SMP_Do_update
43 */
44
45static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
46  const Chain_Node *to_insert,
47  const Chain_Node *next
48)
49{
50  return next != NULL
51    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
52}
53
54static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
55  const Chain_Node *to_insert,
56  const Chain_Node *next
57)
58{
59  return next != NULL
60    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
61}
62
63static Scheduler_priority_affinity_SMP_Node *
64_Scheduler_priority_affinity_SMP_Thread_get_own_node(
65  Thread_Control *thread
66)
67{
68  return (Scheduler_priority_affinity_SMP_Node *)
69    _Scheduler_Thread_get_own_node( thread );
70}
71
72/*
73 * This method returns the scheduler node for the specified thread
74 * as a scheduler specific type.
75 */
76static Scheduler_priority_affinity_SMP_Node *
77_Scheduler_priority_affinity_SMP_Thread_get_node(
78  Thread_Control *thread
79)
80{
81  return (Scheduler_priority_affinity_SMP_Node *)
82    _Scheduler_Thread_get_node( thread );
83}
84
85static Scheduler_priority_affinity_SMP_Node *
86_Scheduler_priority_affinity_SMP_Node_downcast(
87  Scheduler_Node *node
88)
89{
90  return (Scheduler_priority_affinity_SMP_Node *) node;
91}
92
93/*
94 * This method initializes the scheduler control information for
95 * this scheduler instance.
96 */
97void _Scheduler_priority_affinity_SMP_Node_initialize(
98  const Scheduler_Control *scheduler,
99  Thread_Control          *thread
100)
101{
102  Scheduler_priority_affinity_SMP_Node *node =
103    _Scheduler_priority_affinity_SMP_Thread_get_own_node( thread );
104
105  (void) scheduler;
106
107  _Scheduler_SMP_Node_initialize( &node->Base.Base, thread );
108
109  /*
110   *  All we add is affinity information to the basic SMP node.
111   */
112  node->Affinity     = *_CPU_set_Default();
113  node->Affinity.set = &node->Affinity.preallocated;
114}
115
116/*
117 * This method is slightly different from
118 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
119 * do. _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
120 * but does not take into account affinity.
121 */
122static inline void _Scheduler_SMP_Allocate_processor_exact(
123  Scheduler_Context *context,
124  Thread_Control    *scheduled_thread,
125  Thread_Control    *victim_thread
126)
127{
128  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
129  Per_CPU_Control *cpu_self = _Per_CPU_Get();
130
131  (void) context;
132
133  _Thread_Set_CPU( scheduled_thread, victim_cpu );
134  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
135}
136
137/*
138 * This method is unique to this scheduler because it takes into
139 * account affinity as it determines the highest ready thread.
140 * Since this is used to pick a new thread to replace the victim,
141 * the highest ready thread must have affinity such that it can
142 * be executed on the victim's processor.
143 */
144static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
145  Scheduler_Context *context,
146  Scheduler_Node    *victim
147)
148{
149  Scheduler_priority_SMP_Context       *self =
150    _Scheduler_priority_SMP_Get_self( context );
151  Priority_Control                      index;
152  Scheduler_Node                       *highest = NULL;
153  Thread_Control                       *victim_thread;
154  uint32_t                              victim_cpu_index;
155  Scheduler_priority_affinity_SMP_Node *node;
156
157  /*
158   * This is done when we need to check if reevaluations are needed.
159   */
160  if ( victim == NULL ) {
161    node = (Scheduler_priority_affinity_SMP_Node *)
162      _Scheduler_priority_Ready_queue_first(
163        &self->Bit_map,
164        &self->Ready[ 0 ]
165      );
166
167    return &node->Base.Base.Base;
168  }
169
170  victim_thread = _Scheduler_Node_get_owner( victim );
171  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
172
173  /**
174   * @todo The deterministic priority scheduler structure is optimized
175   * for insertion, extraction, and finding the highest priority
176   * thread. Scanning the list of ready threads is not a purpose
177   * for which it was optimized. There are optimizations to be
178   * made in this loop.
179   *
180   * + by checking the major bit, we could potentially skip entire
181   *   groups of 16.
182   *
183   * When using this scheduler as implemented, the application's
184   * choice of numeric priorities and their distribution can have
185   * an impact on performance.
186   */
187  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
188        index <= PRIORITY_MAXIMUM;
189        index++ )
190  {
191    Chain_Control   *chain =  &self->Ready[index];
192    Chain_Node      *chain_node;
193    for ( chain_node = _Chain_First( chain );
194          chain_node != _Chain_Immutable_tail( chain ) ;
195          chain_node = _Chain_Next( chain_node ) )
196    {
197      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
198
199      /*
200       * Can this thread run on this CPU?
201       */
202      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
203        highest = &node->Base.Base.Base;
204        break;
205      }
206    }
207    if ( highest )
208      break;
209  }
210
211  _Assert( highest != NULL );
212
213  return highest;
214}
215
216/*
217 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
218 * but has the difference that is invokes this scheduler's
219 * get_highest_ready() support method.
220 */
221void _Scheduler_priority_affinity_SMP_Block(
222  const Scheduler_Control *scheduler,
223  Thread_Control *thread
224)
225{
226  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
227
228  _Scheduler_SMP_Block(
229    context,
230    thread,
231    _Scheduler_priority_SMP_Extract_from_ready,
232    _Scheduler_priority_affinity_SMP_Get_highest_ready,
233    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
234    _Scheduler_SMP_Allocate_processor_exact,
235    _Scheduler_priority_SMP_Get_idle_thread
236  );
237
238  /*
239   * Since this removed a single thread from the scheduled set
240   * and selected the most appropriate thread from the ready
241   * set to replace it, there should be no need for thread
242   * migrations.
243   */
244}
245
246/*
247 * This method is unique to this scheduler because it must take into
248 * account affinity as it searches for the lowest priority scheduled
249 * thread. It ignores those which cannot be replaced by the filter
250 * thread because the potential victim thread does not have affinity
251 * for that processor.
252 */
253static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
254  Scheduler_Context *context,
255  Scheduler_Node    *filter_base,
256  Chain_Node_order   order
257)
258{
259  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
260  Scheduler_Node *lowest_scheduled = NULL;
261  Chain_Control   *scheduled = &self->Scheduled;
262  Chain_Node      *chain_node;
263  Scheduler_priority_affinity_SMP_Node *filter =
264    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
265
266  for ( chain_node = _Chain_Last( scheduled );
267        chain_node != _Chain_Immutable_head( scheduled ) ;
268        chain_node = _Chain_Previous( chain_node ) ) {
269    Scheduler_priority_affinity_SMP_Node *node;
270    Thread_Control                       *thread;
271    uint32_t                              cpu_index;
272
273    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
274
275    /*
276     * If we didn't find a thread which is of equal or lower importance
277     * than filter thread is, then we can't schedule the filter thread
278     * to execute.
279     */
280    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
281      break;
282
283    /* cpu_index is the processor number thread is executing on */
284    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
285    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
286
287    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
288      lowest_scheduled = &node->Base.Base.Base;
289      break;
290    }
291
292  }
293
294  return lowest_scheduled;
295}
296
297/*
298 * This method is unique to this scheduler because it must pass
299 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
300 * _Scheduler_SMP_Enqueue_ordered.
301 */
302static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
303  Scheduler_Context *context,
304  Scheduler_Node    *node,
305  Thread_Control    *needs_help
306)
307{
308  return _Scheduler_SMP_Enqueue_ordered(
309    context,
310    node,
311    needs_help,
312    _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
313    _Scheduler_priority_SMP_Insert_ready_fifo,
314    _Scheduler_SMP_Insert_scheduled_fifo,
315    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
316    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
317    _Scheduler_SMP_Allocate_processor_exact,
318    _Scheduler_priority_SMP_Release_idle_thread
319  );
320}
321
322/*
323 * This method is invoked at the end of certain scheduling operations
324 * to ensure that the highest priority ready thread cannot be scheduled
325 * to execute. When we schedule with affinity, there is the possibility
326 * that we need to migrate a thread to another core to ensure that the
327 * highest priority ready threads are in fact scheduled.
328 */
329static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
330  Scheduler_Context *context
331)
332{
333  Scheduler_Node        *lowest_scheduled;
334  Scheduler_Node        *highest_ready;
335
336  while (1) {
337    highest_ready =
338      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
339
340    lowest_scheduled =
341      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
342        context,
343        highest_ready,
344        _Scheduler_SMP_Insert_priority_lifo_order
345      );
346
347    /*
348     * If we can't find a thread to displace from the scheduled set,
349     * then we have placed all the highest priority threads possible
350     * in the scheduled set.
351     *
352     * We found the absolute highest priority thread without
353     * considering affinity. But now we have to consider that thread's
354     * affinity as we look to place it.
355     */
356    if ( lowest_scheduled == NULL )
357      break;
358
359    /*
360     * But if we found a thread which is lower priority than one
361     * in the ready set, then we need to swap them out.
362     */
363
364    _Scheduler_SMP_Node_change_state(
365      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
366      SCHEDULER_SMP_NODE_READY
367    );
368
369    _Scheduler_SMP_Allocate_processor(
370      context,
371      highest_ready,
372      lowest_scheduled,
373      _Scheduler_SMP_Allocate_processor_exact
374    );
375
376    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
377      context,
378      highest_ready
379    );
380
381    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
382      context,
383      lowest_scheduled
384    );
385  }
386}
387
388/*
389 * This is the public scheduler specific Unblock operation.
390 */
391Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
392  const Scheduler_Control *scheduler,
393  Thread_Control *thread
394)
395{
396  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
397  Thread_Control    *needs_help;
398
399  needs_help = _Scheduler_SMP_Unblock(
400    context,
401    thread,
402    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
403    _Scheduler_priority_SMP_Release_idle_thread
404  );
405
406  /*
407   * Perform any thread migrations that are needed due to these changes.
408   */
409  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
410
411  return needs_help;
412}
413
414/*
415 *  This is unique to this scheduler because it passes scheduler specific
416 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
417 */
418static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
419  Scheduler_Context     *context,
420  Scheduler_Node        *node,
421  Thread_Control        *needs_help,
422  Chain_Node_order       order,
423  Scheduler_SMP_Insert   insert_ready,
424  Scheduler_SMP_Insert   insert_scheduled
425)
426{
427  return _Scheduler_SMP_Enqueue_ordered(
428    context,
429    node,
430    needs_help,
431    order,
432    insert_ready,
433    insert_scheduled,
434    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
435    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
436    _Scheduler_SMP_Allocate_processor_exact,
437    _Scheduler_priority_SMP_Release_idle_thread
438  );
439}
440
441/*
442 *  This is unique to this scheduler because it is on the path
443 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
444 *  invokes a scheduler unique get_lowest_scheduled helper.
445 */
446static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_lifo(
447  Scheduler_Context *context,
448  Scheduler_Node    *node,
449  Thread_Control    *needs_help
450)
451{
452  return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
453    context,
454    node,
455    needs_help,
456    _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
457    _Scheduler_priority_SMP_Insert_ready_lifo,
458    _Scheduler_SMP_Insert_scheduled_lifo
459  );
460}
461
462/*
463 * This method is unique to this scheduler because it must
464 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
465 * this scheduler's get_highest_ready() helper.
466 */
467static Thread_Control *
468_Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
469  Scheduler_Context    *context,
470  Scheduler_Node       *node,
471  Chain_Node_order      order,
472  Scheduler_SMP_Insert  insert_ready,
473  Scheduler_SMP_Insert  insert_scheduled
474)
475{
476  return _Scheduler_SMP_Enqueue_scheduled_ordered(
477    context,
478    node,
479    order,
480    _Scheduler_priority_SMP_Extract_from_ready,
481    _Scheduler_priority_affinity_SMP_Get_highest_ready,
482    insert_ready,
483    insert_scheduled,
484    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
485    _Scheduler_SMP_Allocate_processor_exact,
486    _Scheduler_priority_SMP_Get_idle_thread,
487    _Scheduler_priority_SMP_Release_idle_thread
488  );
489}
490
491/*
492 *  This is unique to this scheduler because it is on the path
493 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
494 *  invokes a scheduler unique get_lowest_scheduled helper.
495 */
496static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
497  Scheduler_Context *context,
498  Scheduler_Node    *node
499)
500{
501  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
502    context,
503    node,
504    _Scheduler_SMP_Insert_priority_lifo_order,
505    _Scheduler_priority_SMP_Insert_ready_lifo,
506    _Scheduler_SMP_Insert_scheduled_lifo
507  );
508}
509
510/*
511 *  This is unique to this scheduler because it is on the path
512 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
513 *  invokes a scheduler unique get_lowest_scheduled helper.
514 */
515static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
516  Scheduler_Context *context,
517  Scheduler_Node    *node
518)
519{
520  return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
521    context,
522    node,
523    _Scheduler_SMP_Insert_priority_fifo_order,
524    _Scheduler_priority_SMP_Insert_ready_fifo,
525    _Scheduler_SMP_Insert_scheduled_fifo
526  );
527}
528
529/*
530 * This is the public scheduler specific Change Priority operation.
531 */
532Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
533  const Scheduler_Control *scheduler,
534  Thread_Control          *thread,
535  Priority_Control         new_priority,
536  bool                     prepend_it
537)
538{
539  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
540  Thread_Control    *displaced;
541
542  displaced = _Scheduler_SMP_Change_priority(
543    context,
544    thread,
545    new_priority,
546    prepend_it,
547    _Scheduler_priority_SMP_Extract_from_ready,
548    _Scheduler_priority_SMP_Do_update,
549    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
550    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
551    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
552    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
553  );
554
555  /*
556   * Perform any thread migrations that are needed due to these changes.
557   */
558  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
559
560  return displaced;
561}
562
563Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
564  const Scheduler_Control *scheduler,
565  Thread_Control          *offers_help,
566  Thread_Control          *needs_help
567)
568{
569  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
570
571  needs_help = _Scheduler_SMP_Ask_for_help(
572    context,
573    offers_help,
574    needs_help,
575    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
576    _Scheduler_priority_SMP_Release_idle_thread
577  );
578
579  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
580
581  return needs_help;
582}
583
584/*
585 * This is the public scheduler specific Change Priority operation.
586 */
587bool _Scheduler_priority_affinity_SMP_Get_affinity(
588  const Scheduler_Control *scheduler,
589  Thread_Control          *thread,
590  size_t                   cpusetsize,
591  cpu_set_t               *cpuset
592)
593{
594  Scheduler_priority_affinity_SMP_Node *node =
595    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
596
597  (void) scheduler;
598
599  if ( node->Affinity.setsize != cpusetsize ) {
600    return false;
601  }
602
603  CPU_COPY( cpuset, node->Affinity.set );
604  return true;
605}
606
607bool _Scheduler_priority_affinity_SMP_Set_affinity(
608  const Scheduler_Control *scheduler,
609  Thread_Control          *thread,
610  size_t                   cpusetsize,
611  const cpu_set_t         *cpuset
612)
613{
614  Scheduler_priority_affinity_SMP_Node *node =
615    _Scheduler_priority_affinity_SMP_Thread_get_node(thread);
616
617  (void) scheduler;
618
619  /*
620   * Validate that the cpset meets basic requirements.
621   */
622  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
623    return false;
624  }
625
626  /*
627   * The old and new set are the same, there is no point in
628   * doing anything.
629   */
630  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
631    return true;
632
633  _Thread_Set_state( thread, STATES_MIGRATING );
634    CPU_COPY( node->Affinity.set, cpuset );
635  _Thread_Clear_state( thread, STATES_MIGRATING );
636
637  return true;
638}
Note: See TracBrowser for help on using the repository browser.